comment
stringlengths
1
45k
method_body
stringlengths
23
281k
target_code
stringlengths
0
5.16k
method_body_after
stringlengths
12
281k
context_before
stringlengths
8
543k
context_after
stringlengths
8
543k
Or better still, pick the eligible deployment with the oldest `lastTriggered` timestamp?
private void deployRefreshedCertificates() { var now = clock.instant(); var jobsTriggered = new AtomicInteger(0); curator.readAllEndpointCertificateMetadata().forEach((applicationId, endpointCertificateMetadata) -> endpointCertificateMetadata.lastRefreshed().ifPresent(lastRefreshTime -> { Instant refreshTime = Instant.ofEpochSecond(lastRefreshTime); if (now.isAfter(refreshTime.plus(4, ChronoUnit.DAYS))) { controller().applications().getInstance(applicationId) .ifPresent(instance -> instance.productionDeployments().forEach((zone, deployment) -> { if (deployment.at().isBefore(refreshTime) && jobsTriggered.compareAndSet(0, 1)) { JobType job = JobType.deploymentTo(zone); deploymentTrigger.reTrigger(applicationId, job, "re-triggered by EndpointCertificateMaintainer"); log.info("Re-triggering deployment job " + job.jobName() + " for instance " + applicationId.serializedForm() + " to roll out refreshed endpoint certificate"); } })); } })); }
if (deployment.at().isBefore(refreshTime) && jobsTriggered.compareAndSet(0, 1)) {
private void deployRefreshedCertificates() { var now = clock.instant(); var eligibleJobs = new ArrayList<EligibleJob>(); curator.readAllEndpointCertificateMetadata().forEach((applicationId, endpointCertificateMetadata) -> endpointCertificateMetadata.lastRefreshed().ifPresent(lastRefreshTime -> { Instant refreshTime = Instant.ofEpochSecond(lastRefreshTime); if (now.isAfter(refreshTime.plus(4, ChronoUnit.DAYS))) { controller().applications().getInstance(applicationId) .ifPresent(instance -> instance.productionDeployments().forEach((zone, deployment) -> { if (deployment.at().isBefore(refreshTime)) { JobType job = JobType.deploymentTo(zone); eligibleJobs.add(new EligibleJob(deployment, applicationId, job)); } })); } })); eligibleJobs.stream() .min(oldestFirst) .ifPresent(e -> { deploymentTrigger.reTrigger(e.applicationId, e.job, "re-triggered by EndpointCertificateMaintainer"); log.info("Re-triggering deployment job " + e.job.jobName() + " for instance " + e.applicationId.serializedForm() + " to roll out refreshed endpoint certificate"); }); }
class EndpointCertificateMaintainer extends ControllerMaintainer { private static final Logger log = Logger.getLogger(EndpointCertificateMaintainer.class.getName()); private final DeploymentTrigger deploymentTrigger; private final Clock clock; private final CuratorDb curator; private final SecretStore secretStore; private final EndpointCertificateProvider endpointCertificateProvider; @Inject public EndpointCertificateMaintainer(Controller controller, Duration interval) { super(controller, interval); this.deploymentTrigger = controller.applications().deploymentTrigger(); this.clock = controller.clock(); this.secretStore = controller.secretStore(); this.curator = controller().curator(); this.endpointCertificateProvider = controller.serviceRegistry().endpointCertificateProvider(); } @Override protected double maintain() { try { deployRefreshedCertificates(); updateRefreshedCertificates(); deleteUnusedCertificates(); deleteOrReportUnmanagedCertificates(); } catch (Exception e) { log.log(Level.SEVERE, "Exception caught while maintaining endpoint certificates", e); return 0.0; } return 1.0; } private void updateRefreshedCertificates() { curator.readAllEndpointCertificateMetadata().forEach(((applicationId, endpointCertificateMetadata) -> { var latestAvailableVersion = latestVersionInSecretStore(endpointCertificateMetadata); if (latestAvailableVersion.isPresent() && latestAvailableVersion.getAsInt() > endpointCertificateMetadata.version()) { var refreshedCertificateMetadata = endpointCertificateMetadata .withVersion(latestAvailableVersion.getAsInt()) .withLastRefreshed(clock.instant().getEpochSecond()); try (Mutex lock = lock(applicationId)) { if (Optional.of(endpointCertificateMetadata).equals(curator.readEndpointCertificateMetadata(applicationId))) { curator.writeEndpointCertificateMetadata(applicationId, refreshedCertificateMetadata); } } } })); } /** * If it's been four days since the cert has been refreshed, re-trigger prod deployment jobs (one at a time). */ private OptionalInt latestVersionInSecretStore(EndpointCertificateMetadata originalCertificateMetadata) { try { var certVersions = new HashSet<>(secretStore.listSecretVersions(originalCertificateMetadata.certName())); var keyVersions = new HashSet<>(secretStore.listSecretVersions(originalCertificateMetadata.keyName())); return Sets.intersection(certVersions, keyVersions).stream().mapToInt(Integer::intValue).max(); } catch (SecretNotFoundException s) { return OptionalInt.empty(); } } private void deleteUnusedCertificates() { var oneMonthAgo = clock.instant().minus(30, ChronoUnit.DAYS); curator.readAllEndpointCertificateMetadata().forEach((applicationId, storedMetaData) -> { var lastRequested = Instant.ofEpochSecond(storedMetaData.lastRequested()); if (lastRequested.isBefore(oneMonthAgo) && hasNoDeployments(applicationId)) { try (Mutex lock = lock(applicationId)) { if (Optional.of(storedMetaData).equals(curator.readEndpointCertificateMetadata(applicationId))) { log.log(Level.INFO, "Cert for app " + applicationId.serializedForm() + " has not been requested in a month and app has no deployments, deleting from provider and ZK"); endpointCertificateProvider.deleteCertificate(applicationId, storedMetaData.rootRequestId()); curator.deleteEndpointCertificateMetadata(applicationId); } } } }); } private Mutex lock(ApplicationId applicationId) { return curator.lock(TenantAndApplicationId.from(applicationId)); } private boolean hasNoDeployments(ApplicationId applicationId) { return controller().applications().getInstance(applicationId) .map(Instance::deployments) .orElseGet(Map::of) .isEmpty(); } private void deleteOrReportUnmanagedCertificates() { List<EndpointCertificateRequestMetadata> endpointCertificateMetadata = endpointCertificateProvider.listCertificates(); Map<ApplicationId, EndpointCertificateMetadata> storedEndpointCertificateMetadata = curator.readAllEndpointCertificateMetadata(); List<String> leafRequestIds = storedEndpointCertificateMetadata.values().stream().flatMap(m -> m.leafRequestId().stream()).collect(Collectors.toList()); List<String> rootRequestIds = storedEndpointCertificateMetadata.values().stream().map(EndpointCertificateMetadata::rootRequestId).collect(Collectors.toList()); for (var providerCertificateMetadata : endpointCertificateMetadata) { if (!rootRequestIds.contains(providerCertificateMetadata.requestId()) && !leafRequestIds.contains(providerCertificateMetadata.requestId())) { EndpointCertificateDetails unknownCertDetails = endpointCertificateProvider.certificateDetails(providerCertificateMetadata.requestId()); boolean matchFound = false; for (Map.Entry<ApplicationId, EndpointCertificateMetadata> storedAppEntry : storedEndpointCertificateMetadata.entrySet()) { ApplicationId storedApp = storedAppEntry.getKey(); EndpointCertificateMetadata storedAppMetadata = storedAppEntry.getValue(); if (storedAppMetadata.certName().equals(unknownCertDetails.cert_key_keyname())) { matchFound = true; try (Mutex lock = lock(storedApp)) { if (Optional.of(storedAppMetadata).equals(curator.readEndpointCertificateMetadata(storedApp))) { log.log(Level.INFO, "Cert for app " + storedApp.serializedForm() + " has a new leafRequestId " + unknownCertDetails.request_id() + ", updating in ZK"); curator.writeEndpointCertificateMetadata(storedApp, storedAppMetadata.withLeafRequestId(Optional.of(unknownCertDetails.request_id()))); } break; } } } if (!matchFound) { if (Instant.parse(providerCertificateMetadata.createTime()).isBefore(Instant.now().minus(7, ChronoUnit.DAYS))) { log.log(Level.INFO, String.format("Deleting unmaintained certificate with request_id %s and SANs %s", providerCertificateMetadata.requestId(), providerCertificateMetadata.dnsNames().stream().map(d -> d.dnsName).collect(Collectors.joining(", ")))); endpointCertificateProvider.deleteCertificate(ApplicationId.fromSerializedForm("applicationid:is:unknown"), providerCertificateMetadata.requestId()); } } } } } }
class EndpointCertificateMaintainer extends ControllerMaintainer { private static final Logger log = Logger.getLogger(EndpointCertificateMaintainer.class.getName()); private final DeploymentTrigger deploymentTrigger; private final Clock clock; private final CuratorDb curator; private final SecretStore secretStore; private final EndpointCertificateProvider endpointCertificateProvider; final Comparator<EligibleJob> oldestFirst = Comparator.comparing(e -> e.deployment.at()); @Inject public EndpointCertificateMaintainer(Controller controller, Duration interval) { super(controller, interval); this.deploymentTrigger = controller.applications().deploymentTrigger(); this.clock = controller.clock(); this.secretStore = controller.secretStore(); this.curator = controller().curator(); this.endpointCertificateProvider = controller.serviceRegistry().endpointCertificateProvider(); } @Override protected double maintain() { try { deployRefreshedCertificates(); updateRefreshedCertificates(); deleteUnusedCertificates(); deleteOrReportUnmanagedCertificates(); } catch (Exception e) { log.log(Level.SEVERE, "Exception caught while maintaining endpoint certificates", e); return 0.0; } return 1.0; } private void updateRefreshedCertificates() { curator.readAllEndpointCertificateMetadata().forEach(((applicationId, endpointCertificateMetadata) -> { var latestAvailableVersion = latestVersionInSecretStore(endpointCertificateMetadata); if (latestAvailableVersion.isPresent() && latestAvailableVersion.getAsInt() > endpointCertificateMetadata.version()) { var refreshedCertificateMetadata = endpointCertificateMetadata .withVersion(latestAvailableVersion.getAsInt()) .withLastRefreshed(clock.instant().getEpochSecond()); try (Mutex lock = lock(applicationId)) { if (Optional.of(endpointCertificateMetadata).equals(curator.readEndpointCertificateMetadata(applicationId))) { curator.writeEndpointCertificateMetadata(applicationId, refreshedCertificateMetadata); } } } })); } record EligibleJob(Deployment deployment, ApplicationId applicationId, JobType job) {} /** * If it's been four days since the cert has been refreshed, re-trigger prod deployment jobs (one at a time). */ private OptionalInt latestVersionInSecretStore(EndpointCertificateMetadata originalCertificateMetadata) { try { var certVersions = new HashSet<>(secretStore.listSecretVersions(originalCertificateMetadata.certName())); var keyVersions = new HashSet<>(secretStore.listSecretVersions(originalCertificateMetadata.keyName())); return Sets.intersection(certVersions, keyVersions).stream().mapToInt(Integer::intValue).max(); } catch (SecretNotFoundException s) { return OptionalInt.empty(); } } private void deleteUnusedCertificates() { var oneMonthAgo = clock.instant().minus(30, ChronoUnit.DAYS); curator.readAllEndpointCertificateMetadata().forEach((applicationId, storedMetaData) -> { var lastRequested = Instant.ofEpochSecond(storedMetaData.lastRequested()); if (lastRequested.isBefore(oneMonthAgo) && hasNoDeployments(applicationId)) { try (Mutex lock = lock(applicationId)) { if (Optional.of(storedMetaData).equals(curator.readEndpointCertificateMetadata(applicationId))) { log.log(Level.INFO, "Cert for app " + applicationId.serializedForm() + " has not been requested in a month and app has no deployments, deleting from provider and ZK"); endpointCertificateProvider.deleteCertificate(applicationId, storedMetaData.rootRequestId()); curator.deleteEndpointCertificateMetadata(applicationId); } } } }); } private Mutex lock(ApplicationId applicationId) { return curator.lock(TenantAndApplicationId.from(applicationId)); } private boolean hasNoDeployments(ApplicationId applicationId) { return controller().applications().getInstance(applicationId) .map(Instance::deployments) .orElseGet(Map::of) .isEmpty(); } private void deleteOrReportUnmanagedCertificates() { List<EndpointCertificateRequestMetadata> endpointCertificateMetadata = endpointCertificateProvider.listCertificates(); Map<ApplicationId, EndpointCertificateMetadata> storedEndpointCertificateMetadata = curator.readAllEndpointCertificateMetadata(); List<String> leafRequestIds = storedEndpointCertificateMetadata.values().stream().flatMap(m -> m.leafRequestId().stream()).toList(); List<String> rootRequestIds = storedEndpointCertificateMetadata.values().stream().map(EndpointCertificateMetadata::rootRequestId).toList(); for (var providerCertificateMetadata : endpointCertificateMetadata) { if (!rootRequestIds.contains(providerCertificateMetadata.requestId()) && !leafRequestIds.contains(providerCertificateMetadata.requestId())) { EndpointCertificateDetails unknownCertDetails = endpointCertificateProvider.certificateDetails(providerCertificateMetadata.requestId()); boolean matchFound = false; for (Map.Entry<ApplicationId, EndpointCertificateMetadata> storedAppEntry : storedEndpointCertificateMetadata.entrySet()) { ApplicationId storedApp = storedAppEntry.getKey(); EndpointCertificateMetadata storedAppMetadata = storedAppEntry.getValue(); if (storedAppMetadata.certName().equals(unknownCertDetails.cert_key_keyname())) { matchFound = true; try (Mutex lock = lock(storedApp)) { if (Optional.of(storedAppMetadata).equals(curator.readEndpointCertificateMetadata(storedApp))) { log.log(Level.INFO, "Cert for app " + storedApp.serializedForm() + " has a new leafRequestId " + unknownCertDetails.request_id() + ", updating in ZK"); curator.writeEndpointCertificateMetadata(storedApp, storedAppMetadata.withLeafRequestId(Optional.of(unknownCertDetails.request_id()))); } break; } } } if (!matchFound) { if (Instant.parse(providerCertificateMetadata.createTime()).isBefore(Instant.now().minus(7, ChronoUnit.DAYS))) { log.log(Level.INFO, String.format("Deleting unmaintained certificate with request_id %s and SANs %s", providerCertificateMetadata.requestId(), providerCertificateMetadata.dnsNames().stream().map(d -> d.dnsName).collect(Collectors.joining(", ")))); endpointCertificateProvider.deleteCertificate(ApplicationId.fromSerializedForm("applicationid:is:unknown"), providerCertificateMetadata.requestId()); } } } } } }
That makes sense
private void deployRefreshedCertificates() { var now = clock.instant(); var jobsTriggered = new AtomicInteger(0); curator.readAllEndpointCertificateMetadata().forEach((applicationId, endpointCertificateMetadata) -> endpointCertificateMetadata.lastRefreshed().ifPresent(lastRefreshTime -> { Instant refreshTime = Instant.ofEpochSecond(lastRefreshTime); if (now.isAfter(refreshTime.plus(4, ChronoUnit.DAYS))) { controller().applications().getInstance(applicationId) .ifPresent(instance -> instance.productionDeployments().forEach((zone, deployment) -> { if (deployment.at().isBefore(refreshTime) && jobsTriggered.compareAndSet(0, 1)) { JobType job = JobType.deploymentTo(zone); deploymentTrigger.reTrigger(applicationId, job, "re-triggered by EndpointCertificateMaintainer"); log.info("Re-triggering deployment job " + job.jobName() + " for instance " + applicationId.serializedForm() + " to roll out refreshed endpoint certificate"); } })); } })); }
if (deployment.at().isBefore(refreshTime) && jobsTriggered.compareAndSet(0, 1)) {
private void deployRefreshedCertificates() { var now = clock.instant(); var eligibleJobs = new ArrayList<EligibleJob>(); curator.readAllEndpointCertificateMetadata().forEach((applicationId, endpointCertificateMetadata) -> endpointCertificateMetadata.lastRefreshed().ifPresent(lastRefreshTime -> { Instant refreshTime = Instant.ofEpochSecond(lastRefreshTime); if (now.isAfter(refreshTime.plus(4, ChronoUnit.DAYS))) { controller().applications().getInstance(applicationId) .ifPresent(instance -> instance.productionDeployments().forEach((zone, deployment) -> { if (deployment.at().isBefore(refreshTime)) { JobType job = JobType.deploymentTo(zone); eligibleJobs.add(new EligibleJob(deployment, applicationId, job)); } })); } })); eligibleJobs.stream() .min(oldestFirst) .ifPresent(e -> { deploymentTrigger.reTrigger(e.applicationId, e.job, "re-triggered by EndpointCertificateMaintainer"); log.info("Re-triggering deployment job " + e.job.jobName() + " for instance " + e.applicationId.serializedForm() + " to roll out refreshed endpoint certificate"); }); }
class EndpointCertificateMaintainer extends ControllerMaintainer { private static final Logger log = Logger.getLogger(EndpointCertificateMaintainer.class.getName()); private final DeploymentTrigger deploymentTrigger; private final Clock clock; private final CuratorDb curator; private final SecretStore secretStore; private final EndpointCertificateProvider endpointCertificateProvider; @Inject public EndpointCertificateMaintainer(Controller controller, Duration interval) { super(controller, interval); this.deploymentTrigger = controller.applications().deploymentTrigger(); this.clock = controller.clock(); this.secretStore = controller.secretStore(); this.curator = controller().curator(); this.endpointCertificateProvider = controller.serviceRegistry().endpointCertificateProvider(); } @Override protected double maintain() { try { deployRefreshedCertificates(); updateRefreshedCertificates(); deleteUnusedCertificates(); deleteOrReportUnmanagedCertificates(); } catch (Exception e) { log.log(Level.SEVERE, "Exception caught while maintaining endpoint certificates", e); return 0.0; } return 1.0; } private void updateRefreshedCertificates() { curator.readAllEndpointCertificateMetadata().forEach(((applicationId, endpointCertificateMetadata) -> { var latestAvailableVersion = latestVersionInSecretStore(endpointCertificateMetadata); if (latestAvailableVersion.isPresent() && latestAvailableVersion.getAsInt() > endpointCertificateMetadata.version()) { var refreshedCertificateMetadata = endpointCertificateMetadata .withVersion(latestAvailableVersion.getAsInt()) .withLastRefreshed(clock.instant().getEpochSecond()); try (Mutex lock = lock(applicationId)) { if (Optional.of(endpointCertificateMetadata).equals(curator.readEndpointCertificateMetadata(applicationId))) { curator.writeEndpointCertificateMetadata(applicationId, refreshedCertificateMetadata); } } } })); } /** * If it's been four days since the cert has been refreshed, re-trigger prod deployment jobs (one at a time). */ private OptionalInt latestVersionInSecretStore(EndpointCertificateMetadata originalCertificateMetadata) { try { var certVersions = new HashSet<>(secretStore.listSecretVersions(originalCertificateMetadata.certName())); var keyVersions = new HashSet<>(secretStore.listSecretVersions(originalCertificateMetadata.keyName())); return Sets.intersection(certVersions, keyVersions).stream().mapToInt(Integer::intValue).max(); } catch (SecretNotFoundException s) { return OptionalInt.empty(); } } private void deleteUnusedCertificates() { var oneMonthAgo = clock.instant().minus(30, ChronoUnit.DAYS); curator.readAllEndpointCertificateMetadata().forEach((applicationId, storedMetaData) -> { var lastRequested = Instant.ofEpochSecond(storedMetaData.lastRequested()); if (lastRequested.isBefore(oneMonthAgo) && hasNoDeployments(applicationId)) { try (Mutex lock = lock(applicationId)) { if (Optional.of(storedMetaData).equals(curator.readEndpointCertificateMetadata(applicationId))) { log.log(Level.INFO, "Cert for app " + applicationId.serializedForm() + " has not been requested in a month and app has no deployments, deleting from provider and ZK"); endpointCertificateProvider.deleteCertificate(applicationId, storedMetaData.rootRequestId()); curator.deleteEndpointCertificateMetadata(applicationId); } } } }); } private Mutex lock(ApplicationId applicationId) { return curator.lock(TenantAndApplicationId.from(applicationId)); } private boolean hasNoDeployments(ApplicationId applicationId) { return controller().applications().getInstance(applicationId) .map(Instance::deployments) .orElseGet(Map::of) .isEmpty(); } private void deleteOrReportUnmanagedCertificates() { List<EndpointCertificateRequestMetadata> endpointCertificateMetadata = endpointCertificateProvider.listCertificates(); Map<ApplicationId, EndpointCertificateMetadata> storedEndpointCertificateMetadata = curator.readAllEndpointCertificateMetadata(); List<String> leafRequestIds = storedEndpointCertificateMetadata.values().stream().flatMap(m -> m.leafRequestId().stream()).collect(Collectors.toList()); List<String> rootRequestIds = storedEndpointCertificateMetadata.values().stream().map(EndpointCertificateMetadata::rootRequestId).collect(Collectors.toList()); for (var providerCertificateMetadata : endpointCertificateMetadata) { if (!rootRequestIds.contains(providerCertificateMetadata.requestId()) && !leafRequestIds.contains(providerCertificateMetadata.requestId())) { EndpointCertificateDetails unknownCertDetails = endpointCertificateProvider.certificateDetails(providerCertificateMetadata.requestId()); boolean matchFound = false; for (Map.Entry<ApplicationId, EndpointCertificateMetadata> storedAppEntry : storedEndpointCertificateMetadata.entrySet()) { ApplicationId storedApp = storedAppEntry.getKey(); EndpointCertificateMetadata storedAppMetadata = storedAppEntry.getValue(); if (storedAppMetadata.certName().equals(unknownCertDetails.cert_key_keyname())) { matchFound = true; try (Mutex lock = lock(storedApp)) { if (Optional.of(storedAppMetadata).equals(curator.readEndpointCertificateMetadata(storedApp))) { log.log(Level.INFO, "Cert for app " + storedApp.serializedForm() + " has a new leafRequestId " + unknownCertDetails.request_id() + ", updating in ZK"); curator.writeEndpointCertificateMetadata(storedApp, storedAppMetadata.withLeafRequestId(Optional.of(unknownCertDetails.request_id()))); } break; } } } if (!matchFound) { if (Instant.parse(providerCertificateMetadata.createTime()).isBefore(Instant.now().minus(7, ChronoUnit.DAYS))) { log.log(Level.INFO, String.format("Deleting unmaintained certificate with request_id %s and SANs %s", providerCertificateMetadata.requestId(), providerCertificateMetadata.dnsNames().stream().map(d -> d.dnsName).collect(Collectors.joining(", ")))); endpointCertificateProvider.deleteCertificate(ApplicationId.fromSerializedForm("applicationid:is:unknown"), providerCertificateMetadata.requestId()); } } } } } }
class EndpointCertificateMaintainer extends ControllerMaintainer { private static final Logger log = Logger.getLogger(EndpointCertificateMaintainer.class.getName()); private final DeploymentTrigger deploymentTrigger; private final Clock clock; private final CuratorDb curator; private final SecretStore secretStore; private final EndpointCertificateProvider endpointCertificateProvider; final Comparator<EligibleJob> oldestFirst = Comparator.comparing(e -> e.deployment.at()); @Inject public EndpointCertificateMaintainer(Controller controller, Duration interval) { super(controller, interval); this.deploymentTrigger = controller.applications().deploymentTrigger(); this.clock = controller.clock(); this.secretStore = controller.secretStore(); this.curator = controller().curator(); this.endpointCertificateProvider = controller.serviceRegistry().endpointCertificateProvider(); } @Override protected double maintain() { try { deployRefreshedCertificates(); updateRefreshedCertificates(); deleteUnusedCertificates(); deleteOrReportUnmanagedCertificates(); } catch (Exception e) { log.log(Level.SEVERE, "Exception caught while maintaining endpoint certificates", e); return 0.0; } return 1.0; } private void updateRefreshedCertificates() { curator.readAllEndpointCertificateMetadata().forEach(((applicationId, endpointCertificateMetadata) -> { var latestAvailableVersion = latestVersionInSecretStore(endpointCertificateMetadata); if (latestAvailableVersion.isPresent() && latestAvailableVersion.getAsInt() > endpointCertificateMetadata.version()) { var refreshedCertificateMetadata = endpointCertificateMetadata .withVersion(latestAvailableVersion.getAsInt()) .withLastRefreshed(clock.instant().getEpochSecond()); try (Mutex lock = lock(applicationId)) { if (Optional.of(endpointCertificateMetadata).equals(curator.readEndpointCertificateMetadata(applicationId))) { curator.writeEndpointCertificateMetadata(applicationId, refreshedCertificateMetadata); } } } })); } record EligibleJob(Deployment deployment, ApplicationId applicationId, JobType job) {} /** * If it's been four days since the cert has been refreshed, re-trigger prod deployment jobs (one at a time). */ private OptionalInt latestVersionInSecretStore(EndpointCertificateMetadata originalCertificateMetadata) { try { var certVersions = new HashSet<>(secretStore.listSecretVersions(originalCertificateMetadata.certName())); var keyVersions = new HashSet<>(secretStore.listSecretVersions(originalCertificateMetadata.keyName())); return Sets.intersection(certVersions, keyVersions).stream().mapToInt(Integer::intValue).max(); } catch (SecretNotFoundException s) { return OptionalInt.empty(); } } private void deleteUnusedCertificates() { var oneMonthAgo = clock.instant().minus(30, ChronoUnit.DAYS); curator.readAllEndpointCertificateMetadata().forEach((applicationId, storedMetaData) -> { var lastRequested = Instant.ofEpochSecond(storedMetaData.lastRequested()); if (lastRequested.isBefore(oneMonthAgo) && hasNoDeployments(applicationId)) { try (Mutex lock = lock(applicationId)) { if (Optional.of(storedMetaData).equals(curator.readEndpointCertificateMetadata(applicationId))) { log.log(Level.INFO, "Cert for app " + applicationId.serializedForm() + " has not been requested in a month and app has no deployments, deleting from provider and ZK"); endpointCertificateProvider.deleteCertificate(applicationId, storedMetaData.rootRequestId()); curator.deleteEndpointCertificateMetadata(applicationId); } } } }); } private Mutex lock(ApplicationId applicationId) { return curator.lock(TenantAndApplicationId.from(applicationId)); } private boolean hasNoDeployments(ApplicationId applicationId) { return controller().applications().getInstance(applicationId) .map(Instance::deployments) .orElseGet(Map::of) .isEmpty(); } private void deleteOrReportUnmanagedCertificates() { List<EndpointCertificateRequestMetadata> endpointCertificateMetadata = endpointCertificateProvider.listCertificates(); Map<ApplicationId, EndpointCertificateMetadata> storedEndpointCertificateMetadata = curator.readAllEndpointCertificateMetadata(); List<String> leafRequestIds = storedEndpointCertificateMetadata.values().stream().flatMap(m -> m.leafRequestId().stream()).toList(); List<String> rootRequestIds = storedEndpointCertificateMetadata.values().stream().map(EndpointCertificateMetadata::rootRequestId).toList(); for (var providerCertificateMetadata : endpointCertificateMetadata) { if (!rootRequestIds.contains(providerCertificateMetadata.requestId()) && !leafRequestIds.contains(providerCertificateMetadata.requestId())) { EndpointCertificateDetails unknownCertDetails = endpointCertificateProvider.certificateDetails(providerCertificateMetadata.requestId()); boolean matchFound = false; for (Map.Entry<ApplicationId, EndpointCertificateMetadata> storedAppEntry : storedEndpointCertificateMetadata.entrySet()) { ApplicationId storedApp = storedAppEntry.getKey(); EndpointCertificateMetadata storedAppMetadata = storedAppEntry.getValue(); if (storedAppMetadata.certName().equals(unknownCertDetails.cert_key_keyname())) { matchFound = true; try (Mutex lock = lock(storedApp)) { if (Optional.of(storedAppMetadata).equals(curator.readEndpointCertificateMetadata(storedApp))) { log.log(Level.INFO, "Cert for app " + storedApp.serializedForm() + " has a new leafRequestId " + unknownCertDetails.request_id() + ", updating in ZK"); curator.writeEndpointCertificateMetadata(storedApp, storedAppMetadata.withLeafRequestId(Optional.of(unknownCertDetails.request_id()))); } break; } } } if (!matchFound) { if (Instant.parse(providerCertificateMetadata.createTime()).isBefore(Instant.now().minus(7, ChronoUnit.DAYS))) { log.log(Level.INFO, String.format("Deleting unmaintained certificate with request_id %s and SANs %s", providerCertificateMetadata.requestId(), providerCertificateMetadata.dnsNames().stream().map(d -> d.dnsName).collect(Collectors.joining(", ")))); endpointCertificateProvider.deleteCertificate(ApplicationId.fromSerializedForm("applicationid:is:unknown"), providerCertificateMetadata.requestId()); } } } } } }
`ConnectionException` is defined as error, feels like they should be the same (whatever it is)?
public void suspend(final String hostName) { UpdateHostResponse response; try { var params = new ConfigServerApi .Params<UpdateHostResponse>() .setConnectionTimeout(CONNECTION_TIMEOUT) .setRetryPolicy(createRetryPolicyForSuspend()); response = configServerApi.put(getSuspendPath(hostName), Optional.empty(), UpdateHostResponse.class, params); } catch (HttpException.NotFoundException n) { throw new OrchestratorNotFoundException("Failed to suspend " + hostName + ", host not found"); } catch (HttpException e) { throw new OrchestratorException("Failed to suspend " + hostName + ": " + e.toString()); } catch (ConnectionException e) { throw ConvergenceException.ofTransient("Failed to suspend " + hostName + ": " + e.getMessage()); } catch (RuntimeException e) { throw new RuntimeException("Got error on suspend", e); } Optional.ofNullable(response.reason()).ifPresent(reason -> { throw new OrchestratorException(reason.message()); }); }
throw ConvergenceException.ofTransient("Failed to suspend " + hostName + ": " + e.getMessage());
public void suspend(final String hostName) { UpdateHostResponse response; try { var params = new ConfigServerApi .Params<UpdateHostResponse>() .setConnectionTimeout(CONNECTION_TIMEOUT) .setRetryPolicy(createRetryPolicyForSuspend()); response = configServerApi.put(getSuspendPath(hostName), Optional.empty(), UpdateHostResponse.class, params); } catch (HttpException.NotFoundException n) { throw new OrchestratorNotFoundException("Failed to suspend " + hostName + ", host not found"); } catch (HttpException e) { throw new OrchestratorException("Failed to suspend " + hostName + ": " + e.toString()); } catch (ConnectionException e) { throw ConvergenceException.ofTransient("Failed to suspend " + hostName + ": " + e.getMessage()); } catch (RuntimeException e) { throw new RuntimeException("Got error on suspend", e); } Optional.ofNullable(response.reason()).ifPresent(reason -> { throw new OrchestratorException(reason.message()); }); }
class OrchestratorImpl implements Orchestrator { private static final Logger logger = Logger.getLogger(OrchestratorImpl.class.getName()); private static final Duration CONNECTION_TIMEOUT = Duration.ofSeconds(15); private static final String ORCHESTRATOR_PATH_PREFIX = "/orchestrator"; static final String ORCHESTRATOR_PATH_PREFIX_HOST_API = ORCHESTRATOR_PATH_PREFIX + "/v1/hosts"; static final String ORCHESTRATOR_PATH_PREFIX_HOST_SUSPENSION_API = ORCHESTRATOR_PATH_PREFIX + "/v1/suspensions/hosts"; private final ConfigServerApi configServerApi; public OrchestratorImpl(ConfigServerApi configServerApi) { this.configServerApi = configServerApi; } @Override private static ConfigServerApi.RetryPolicy<UpdateHostResponse> createRetryPolicyForSuspend() { return new ConfigServerApi.RetryPolicy<UpdateHostResponse>() { @Override public boolean tryNextConfigServer(URI configServerEndpoint, UpdateHostResponse response) { HostStateChangeDenialReason reason = response.reason(); if (reason == null) { return false; } if ("unknown-service-status".equals(reason.constraintName())) { logger.warning("Config server at [" + configServerEndpoint + "] failed with transient error (will try next): " + reason.message()); return true; } return false; } }; } @Override public void suspend(String parentHostName, List<String> hostNames) { final BatchOperationResult batchOperationResult; try { var params = new ConfigServerApi.Params<BatchOperationResult>().setConnectionTimeout(CONNECTION_TIMEOUT); String hostnames = String.join("&hostname=", hostNames); String url = String.format("%s/%s?hostname=%s", ORCHESTRATOR_PATH_PREFIX_HOST_SUSPENSION_API, parentHostName, hostnames); batchOperationResult = configServerApi.put(url, Optional.empty(), BatchOperationResult.class, params); } catch (HttpException e) { throw new OrchestratorException("Failed to batch suspend for " + parentHostName + ": " + e.toString()); } catch (ConnectionException e) { throw ConvergenceException.ofTransient("Failed to batch suspend for " + parentHostName + ": " + e.getMessage()); } catch (RuntimeException e) { throw new RuntimeException("Got error on batch suspend for " + parentHostName + ", with nodes " + hostNames, e); } batchOperationResult.getFailureReason().ifPresent(reason -> { throw new OrchestratorException(reason); }); } @Override public void resume(final String hostName) { UpdateHostResponse response; try { String path = getSuspendPath(hostName); response = configServerApi.delete(path, UpdateHostResponse.class); } catch (HttpException.NotFoundException n) { throw new OrchestratorNotFoundException("Failed to resume " + hostName + ", host not found"); } catch (HttpException e) { throw new OrchestratorException("Failed to resume " + hostName + ": " + e.toString()); } catch (ConnectionException e) { throw ConvergenceException.ofTransient("Failed to resume " + hostName + ": " + e.getMessage()); } catch (RuntimeException e) { throw new RuntimeException("Got error on resume", e); } Optional.ofNullable(response.reason()).ifPresent(reason -> { throw new OrchestratorException(reason.message()); }); } private String getSuspendPath(String hostName) { return ORCHESTRATOR_PATH_PREFIX_HOST_API + "/" + hostName + "/suspended"; } }
class OrchestratorImpl implements Orchestrator { private static final Logger logger = Logger.getLogger(OrchestratorImpl.class.getName()); private static final Duration CONNECTION_TIMEOUT = Duration.ofSeconds(15); private static final String ORCHESTRATOR_PATH_PREFIX = "/orchestrator"; static final String ORCHESTRATOR_PATH_PREFIX_HOST_API = ORCHESTRATOR_PATH_PREFIX + "/v1/hosts"; static final String ORCHESTRATOR_PATH_PREFIX_HOST_SUSPENSION_API = ORCHESTRATOR_PATH_PREFIX + "/v1/suspensions/hosts"; private final ConfigServerApi configServerApi; public OrchestratorImpl(ConfigServerApi configServerApi) { this.configServerApi = configServerApi; } @Override private static ConfigServerApi.RetryPolicy<UpdateHostResponse> createRetryPolicyForSuspend() { return new ConfigServerApi.RetryPolicy<UpdateHostResponse>() { @Override public boolean tryNextConfigServer(URI configServerEndpoint, UpdateHostResponse response) { HostStateChangeDenialReason reason = response.reason(); if (reason == null) { return false; } if ("unknown-service-status".equals(reason.constraintName())) { logger.warning("Config server at [" + configServerEndpoint + "] failed with transient error (will try next): " + reason.message()); return true; } return false; } }; } @Override public void suspend(String parentHostName, List<String> hostNames) { final BatchOperationResult batchOperationResult; try { var params = new ConfigServerApi.Params<BatchOperationResult>().setConnectionTimeout(CONNECTION_TIMEOUT); String hostnames = String.join("&hostname=", hostNames); String url = String.format("%s/%s?hostname=%s", ORCHESTRATOR_PATH_PREFIX_HOST_SUSPENSION_API, parentHostName, hostnames); batchOperationResult = configServerApi.put(url, Optional.empty(), BatchOperationResult.class, params); } catch (HttpException e) { throw new OrchestratorException("Failed to batch suspend for " + parentHostName + ": " + e.toString()); } catch (ConnectionException e) { throw ConvergenceException.ofTransient("Failed to batch suspend for " + parentHostName + ": " + e.getMessage()); } catch (RuntimeException e) { throw new RuntimeException("Got error on batch suspend for " + parentHostName + ", with nodes " + hostNames, e); } batchOperationResult.getFailureReason().ifPresent(reason -> { throw new OrchestratorException(reason); }); } @Override public void resume(final String hostName) { UpdateHostResponse response; try { String path = getSuspendPath(hostName); response = configServerApi.delete(path, UpdateHostResponse.class); } catch (HttpException.NotFoundException n) { throw new OrchestratorNotFoundException("Failed to resume " + hostName + ", host not found"); } catch (HttpException e) { throw new OrchestratorException("Failed to resume " + hostName + ": " + e.toString()); } catch (ConnectionException e) { throw ConvergenceException.ofTransient("Failed to resume " + hostName + ": " + e.getMessage()); } catch (RuntimeException e) { throw new RuntimeException("Got error on resume", e); } Optional.ofNullable(response.reason()).ifPresent(reason -> { throw new OrchestratorException(reason.message()); }); } private String getSuspendPath(String hostName) { return ORCHESTRATOR_PATH_PREFIX_HOST_API + "/" + hostName + "/suspended"; } }
Who are "they"? Suspension failures is expected to happen and expected to be transient. During upgrades there may well be connection failures etc without that necessarily indicating anything wrong. The strategy is to avoid incrementing unhandled_exceptions in the common cases of Vespa upgrades etc. So that's why a ConnectionException is converted to a transient convergence exception here. But connection exceptions in other circumstances should generally increment unhandled_exceptions.
public void suspend(final String hostName) { UpdateHostResponse response; try { var params = new ConfigServerApi .Params<UpdateHostResponse>() .setConnectionTimeout(CONNECTION_TIMEOUT) .setRetryPolicy(createRetryPolicyForSuspend()); response = configServerApi.put(getSuspendPath(hostName), Optional.empty(), UpdateHostResponse.class, params); } catch (HttpException.NotFoundException n) { throw new OrchestratorNotFoundException("Failed to suspend " + hostName + ", host not found"); } catch (HttpException e) { throw new OrchestratorException("Failed to suspend " + hostName + ": " + e.toString()); } catch (ConnectionException e) { throw ConvergenceException.ofTransient("Failed to suspend " + hostName + ": " + e.getMessage()); } catch (RuntimeException e) { throw new RuntimeException("Got error on suspend", e); } Optional.ofNullable(response.reason()).ifPresent(reason -> { throw new OrchestratorException(reason.message()); }); }
throw ConvergenceException.ofTransient("Failed to suspend " + hostName + ": " + e.getMessage());
public void suspend(final String hostName) { UpdateHostResponse response; try { var params = new ConfigServerApi .Params<UpdateHostResponse>() .setConnectionTimeout(CONNECTION_TIMEOUT) .setRetryPolicy(createRetryPolicyForSuspend()); response = configServerApi.put(getSuspendPath(hostName), Optional.empty(), UpdateHostResponse.class, params); } catch (HttpException.NotFoundException n) { throw new OrchestratorNotFoundException("Failed to suspend " + hostName + ", host not found"); } catch (HttpException e) { throw new OrchestratorException("Failed to suspend " + hostName + ": " + e.toString()); } catch (ConnectionException e) { throw ConvergenceException.ofTransient("Failed to suspend " + hostName + ": " + e.getMessage()); } catch (RuntimeException e) { throw new RuntimeException("Got error on suspend", e); } Optional.ofNullable(response.reason()).ifPresent(reason -> { throw new OrchestratorException(reason.message()); }); }
class OrchestratorImpl implements Orchestrator { private static final Logger logger = Logger.getLogger(OrchestratorImpl.class.getName()); private static final Duration CONNECTION_TIMEOUT = Duration.ofSeconds(15); private static final String ORCHESTRATOR_PATH_PREFIX = "/orchestrator"; static final String ORCHESTRATOR_PATH_PREFIX_HOST_API = ORCHESTRATOR_PATH_PREFIX + "/v1/hosts"; static final String ORCHESTRATOR_PATH_PREFIX_HOST_SUSPENSION_API = ORCHESTRATOR_PATH_PREFIX + "/v1/suspensions/hosts"; private final ConfigServerApi configServerApi; public OrchestratorImpl(ConfigServerApi configServerApi) { this.configServerApi = configServerApi; } @Override private static ConfigServerApi.RetryPolicy<UpdateHostResponse> createRetryPolicyForSuspend() { return new ConfigServerApi.RetryPolicy<UpdateHostResponse>() { @Override public boolean tryNextConfigServer(URI configServerEndpoint, UpdateHostResponse response) { HostStateChangeDenialReason reason = response.reason(); if (reason == null) { return false; } if ("unknown-service-status".equals(reason.constraintName())) { logger.warning("Config server at [" + configServerEndpoint + "] failed with transient error (will try next): " + reason.message()); return true; } return false; } }; } @Override public void suspend(String parentHostName, List<String> hostNames) { final BatchOperationResult batchOperationResult; try { var params = new ConfigServerApi.Params<BatchOperationResult>().setConnectionTimeout(CONNECTION_TIMEOUT); String hostnames = String.join("&hostname=", hostNames); String url = String.format("%s/%s?hostname=%s", ORCHESTRATOR_PATH_PREFIX_HOST_SUSPENSION_API, parentHostName, hostnames); batchOperationResult = configServerApi.put(url, Optional.empty(), BatchOperationResult.class, params); } catch (HttpException e) { throw new OrchestratorException("Failed to batch suspend for " + parentHostName + ": " + e.toString()); } catch (ConnectionException e) { throw ConvergenceException.ofTransient("Failed to batch suspend for " + parentHostName + ": " + e.getMessage()); } catch (RuntimeException e) { throw new RuntimeException("Got error on batch suspend for " + parentHostName + ", with nodes " + hostNames, e); } batchOperationResult.getFailureReason().ifPresent(reason -> { throw new OrchestratorException(reason); }); } @Override public void resume(final String hostName) { UpdateHostResponse response; try { String path = getSuspendPath(hostName); response = configServerApi.delete(path, UpdateHostResponse.class); } catch (HttpException.NotFoundException n) { throw new OrchestratorNotFoundException("Failed to resume " + hostName + ", host not found"); } catch (HttpException e) { throw new OrchestratorException("Failed to resume " + hostName + ": " + e.toString()); } catch (ConnectionException e) { throw ConvergenceException.ofTransient("Failed to resume " + hostName + ": " + e.getMessage()); } catch (RuntimeException e) { throw new RuntimeException("Got error on resume", e); } Optional.ofNullable(response.reason()).ifPresent(reason -> { throw new OrchestratorException(reason.message()); }); } private String getSuspendPath(String hostName) { return ORCHESTRATOR_PATH_PREFIX_HOST_API + "/" + hostName + "/suspended"; } }
class OrchestratorImpl implements Orchestrator { private static final Logger logger = Logger.getLogger(OrchestratorImpl.class.getName()); private static final Duration CONNECTION_TIMEOUT = Duration.ofSeconds(15); private static final String ORCHESTRATOR_PATH_PREFIX = "/orchestrator"; static final String ORCHESTRATOR_PATH_PREFIX_HOST_API = ORCHESTRATOR_PATH_PREFIX + "/v1/hosts"; static final String ORCHESTRATOR_PATH_PREFIX_HOST_SUSPENSION_API = ORCHESTRATOR_PATH_PREFIX + "/v1/suspensions/hosts"; private final ConfigServerApi configServerApi; public OrchestratorImpl(ConfigServerApi configServerApi) { this.configServerApi = configServerApi; } @Override private static ConfigServerApi.RetryPolicy<UpdateHostResponse> createRetryPolicyForSuspend() { return new ConfigServerApi.RetryPolicy<UpdateHostResponse>() { @Override public boolean tryNextConfigServer(URI configServerEndpoint, UpdateHostResponse response) { HostStateChangeDenialReason reason = response.reason(); if (reason == null) { return false; } if ("unknown-service-status".equals(reason.constraintName())) { logger.warning("Config server at [" + configServerEndpoint + "] failed with transient error (will try next): " + reason.message()); return true; } return false; } }; } @Override public void suspend(String parentHostName, List<String> hostNames) { final BatchOperationResult batchOperationResult; try { var params = new ConfigServerApi.Params<BatchOperationResult>().setConnectionTimeout(CONNECTION_TIMEOUT); String hostnames = String.join("&hostname=", hostNames); String url = String.format("%s/%s?hostname=%s", ORCHESTRATOR_PATH_PREFIX_HOST_SUSPENSION_API, parentHostName, hostnames); batchOperationResult = configServerApi.put(url, Optional.empty(), BatchOperationResult.class, params); } catch (HttpException e) { throw new OrchestratorException("Failed to batch suspend for " + parentHostName + ": " + e.toString()); } catch (ConnectionException e) { throw ConvergenceException.ofTransient("Failed to batch suspend for " + parentHostName + ": " + e.getMessage()); } catch (RuntimeException e) { throw new RuntimeException("Got error on batch suspend for " + parentHostName + ", with nodes " + hostNames, e); } batchOperationResult.getFailureReason().ifPresent(reason -> { throw new OrchestratorException(reason); }); } @Override public void resume(final String hostName) { UpdateHostResponse response; try { String path = getSuspendPath(hostName); response = configServerApi.delete(path, UpdateHostResponse.class); } catch (HttpException.NotFoundException n) { throw new OrchestratorNotFoundException("Failed to resume " + hostName + ", host not found"); } catch (HttpException e) { throw new OrchestratorException("Failed to resume " + hostName + ": " + e.toString()); } catch (ConnectionException e) { throw ConvergenceException.ofTransient("Failed to resume " + hostName + ": " + e.getMessage()); } catch (RuntimeException e) { throw new RuntimeException("Got error on resume", e); } Optional.ofNullable(response.reason()).ifPresent(reason -> { throw new OrchestratorException(reason.message()); }); } private String getSuspendPath(String hostName) { return ORCHESTRATOR_PATH_PREFIX_HOST_API + "/" + hostName + "/suspended"; } }
```suggestion String reason = "Disabled in: %s. Current region: %s.".formatted(disablingRegions.isEmpty() ? "no regions" : "regions " + String.join(", ", disablingRegions), thisRegion); ```
public ConditionEvaluationResult evaluateExecutionCondition(ExtensionContext context) { Optional<DisabledInRegions> annotation = AnnotationUtils.findAnnotation(context.getElement(), DisabledInRegions.class); if (annotation.isEmpty()) return ConditionEvaluationResult.enabled(DisabledInRegions.class.getSimpleName() + " is not present"); List<String> disablingRegions = List.of(annotation.get().value()); String thisRegion = TestRuntime.get().application().instance(); String reason = "Disabled in: %s. Current region: %s.".formatted(disablingRegions.isEmpty() ? "no instances" : " instances " + String.join(", ", disablingRegions), thisRegion); return disablingRegions.contains(thisRegion) ? ConditionEvaluationResult.disabled(reason) : ConditionEvaluationResult.enabled(reason); }
String reason = "Disabled in: %s. Current region: %s.".formatted(disablingRegions.isEmpty() ? "no instances" : " instances " + String.join(", ", disablingRegions), thisRegion);
public ConditionEvaluationResult evaluateExecutionCondition(ExtensionContext context) { Optional<DisabledInRegions> annotation = AnnotationUtils.findAnnotation(context.getElement(), DisabledInRegions.class); if (annotation.isEmpty()) return ConditionEvaluationResult.enabled(DisabledInRegions.class.getSimpleName() + " is not present"); List<String> disablingRegions = List.of(annotation.get().value()); String thisRegion = TestRuntime.get().application().instance(); String reason = "Disabled in: %s. Current region: %s.".formatted(disablingRegions.isEmpty() ? "no regions" : "regions " + String.join(", ", disablingRegions), thisRegion); return disablingRegions.contains(thisRegion) ? ConditionEvaluationResult.disabled(reason) : ConditionEvaluationResult.enabled(reason); }
class DisabledInRegionsCondition implements ExecutionCondition { @Override }
class DisabledInRegionsCondition implements ExecutionCondition { @Override }
```suggestion String reason = "Disabled in: %s. Current instance: %s.".formatted(disablingInstances.isEmpty() ? "no instances" : "instances " + String.join(", ", disablingInstances), thisInstance); ```
public ConditionEvaluationResult evaluateExecutionCondition(ExtensionContext context) { Optional<DisabledInInstances> annotation = AnnotationUtils.findAnnotation(context.getElement(), DisabledInInstances.class); if (annotation.isEmpty()) return ConditionEvaluationResult.enabled(DisabledInInstances.class.getSimpleName() + " is not present"); List<String> disablingInstances = List.of(annotation.get().value()); String thisInstance = TestRuntime.get().application().instance(); String reason = "Disabled in: %s. Current instance: %s.".formatted(disablingInstances.isEmpty() ? "no instances" : " instances " + String.join(", ", disablingInstances), thisInstance); return disablingInstances.contains(thisInstance) ? ConditionEvaluationResult.disabled(reason) : ConditionEvaluationResult.enabled(reason); }
String reason = "Disabled in: %s. Current instance: %s.".formatted(disablingInstances.isEmpty() ? "no instances" : " instances " + String.join(", ", disablingInstances), thisInstance);
public ConditionEvaluationResult evaluateExecutionCondition(ExtensionContext context) { Optional<DisabledInInstances> annotation = AnnotationUtils.findAnnotation(context.getElement(), DisabledInInstances.class); if (annotation.isEmpty()) return ConditionEvaluationResult.enabled(DisabledInInstances.class.getSimpleName() + " is not present"); List<String> disablingInstances = List.of(annotation.get().value()); String thisInstance = TestRuntime.get().application().instance(); String reason = "Disabled in: %s. Current instance: %s.".formatted(disablingInstances.isEmpty() ? "no instances" : "instances " + String.join(", ", disablingInstances), thisInstance); return disablingInstances.contains(thisInstance) ? ConditionEvaluationResult.disabled(reason) : ConditionEvaluationResult.enabled(reason); }
class DisabledInInstancesCondition implements ExecutionCondition { @Override }
class DisabledInInstancesCondition implements ExecutionCondition { @Override }
```suggestion String reason = "Enabled in: %s. Current region: %s.".formatted(enablingRegions.isEmpty() ? "no regions" : "regions " + String.join(", ", enablingRegions), thisRegion); ```
public ConditionEvaluationResult evaluateExecutionCondition(ExtensionContext context) { Optional<EnabledInRegions> annotation = AnnotationUtils.findAnnotation(context.getElement(), EnabledInRegions.class); if (annotation.isEmpty()) return ConditionEvaluationResult.enabled(EnabledInRegions.class.getSimpleName() + " is not present"); List<String> enablingRegions = List.of(annotation.get().value()); String thisRegion = TestRuntime.get().application().instance(); String reason = "Enabled in: %s. Current region: %s.".formatted(enablingRegions.isEmpty() ? "no instances" : " instances " + String.join(", ", enablingRegions), thisRegion); return enablingRegions.contains(thisRegion) ? ConditionEvaluationResult.enabled(reason) : ConditionEvaluationResult.disabled(reason); }
String reason = "Enabled in: %s. Current region: %s.".formatted(enablingRegions.isEmpty() ? "no instances" : " instances " + String.join(", ", enablingRegions), thisRegion);
public ConditionEvaluationResult evaluateExecutionCondition(ExtensionContext context) { Optional<EnabledInRegions> annotation = AnnotationUtils.findAnnotation(context.getElement(), EnabledInRegions.class); if (annotation.isEmpty()) return ConditionEvaluationResult.enabled(EnabledInRegions.class.getSimpleName() + " is not present"); List<String> enablingRegions = List.of(annotation.get().value()); String thisRegion = TestRuntime.get().application().instance(); String reason = "Enabled in: %s. Current region: %s.".formatted(enablingRegions.isEmpty() ? "no regions" : "regions " + String.join(", ", enablingRegions), thisRegion); return enablingRegions.contains(thisRegion) ? ConditionEvaluationResult.enabled(reason) : ConditionEvaluationResult.disabled(reason); }
class EnabledInRegionsCondition implements ExecutionCondition { @Override }
class EnabledInRegionsCondition implements ExecutionCondition { @Override }
```suggestion String reason = "Enabled in: %s. Current instance: %s.".formatted(enablingInstances.isEmpty() ? "no instances" : "instances " + String.join(", ", enablingInstances), thisInstance); ```
public ConditionEvaluationResult evaluateExecutionCondition(ExtensionContext context) { Optional<EnabledInInstances> annotation = AnnotationUtils.findAnnotation(context.getElement(), EnabledInInstances.class); if (annotation.isEmpty()) return ConditionEvaluationResult.enabled(EnabledInInstances.class.getSimpleName() + " is not present"); List<String> enablingInstances = List.of(annotation.get().value()); String thisInstance = TestRuntime.get().application().instance(); String reason = "Enabled in: %s. Current instance: %s.".formatted(enablingInstances.isEmpty() ? "no instances" : " instances " + String.join(", ", enablingInstances), thisInstance); return enablingInstances.contains(thisInstance) ? ConditionEvaluationResult.enabled(reason) : ConditionEvaluationResult.disabled(reason); }
String reason = "Enabled in: %s. Current instance: %s.".formatted(enablingInstances.isEmpty() ? "no instances" : " instances " + String.join(", ", enablingInstances), thisInstance);
public ConditionEvaluationResult evaluateExecutionCondition(ExtensionContext context) { Optional<EnabledInInstances> annotation = AnnotationUtils.findAnnotation(context.getElement(), EnabledInInstances.class); if (annotation.isEmpty()) return ConditionEvaluationResult.enabled(EnabledInInstances.class.getSimpleName() + " is not present"); List<String> enablingInstances = List.of(annotation.get().value()); String thisInstance = TestRuntime.get().application().instance(); String reason = "Enabled in: %s. Current instance: %s.".formatted(enablingInstances.isEmpty() ? "no instances" : "instances " + String.join(", ", enablingInstances), thisInstance); return enablingInstances.contains(thisInstance) ? ConditionEvaluationResult.enabled(reason) : ConditionEvaluationResult.disabled(reason); }
class EnabledInInstancesCondition implements ExecutionCondition { @Override }
class EnabledInInstancesCondition implements ExecutionCondition { @Override }
```suggestion if (speedTest) query.add("dryRun=true"); ```
static String getQuery(OperationParameters params, boolean speedTest) { StringJoiner query = new StringJoiner("&", "?", "").setEmptyValue(""); if (params.createIfNonExistent()) query.add("create=true"); params.testAndSetCondition().ifPresent(condition -> query.add("condition=" + encode(condition))); params.timeout().ifPresent(timeout -> query.add("timeout=" + timeout.toMillis() + "ms")); params.route().ifPresent(route -> query.add("route=" + encode(route))); params.tracelevel().ifPresent(tracelevel -> query.add("tracelevel=" + tracelevel)); if (speedTest) query.add("speedTest=true"); return query.toString(); }
if (speedTest) query.add("speedTest=true");
static String getQuery(OperationParameters params, boolean speedTest) { StringJoiner query = new StringJoiner("&", "?", "").setEmptyValue(""); if (params.createIfNonExistent()) query.add("create=true"); params.testAndSetCondition().ifPresent(condition -> query.add("condition=" + encode(condition))); params.timeout().ifPresent(timeout -> query.add("timeout=" + timeout.toMillis() + "ms")); params.route().ifPresent(route -> query.add("route=" + encode(route))); params.tracelevel().ifPresent(tracelevel -> query.add("tracelevel=" + tracelevel)); if (speedTest) query.add("dryRun=true"); return query.toString(); }
class HttpFeedClient implements FeedClient { private static final JsonFactory factory = new JsonFactory(); private final Map<String, Supplier<String>> requestHeaders; private final RequestStrategy requestStrategy; private final AtomicBoolean closed = new AtomicBoolean(); private final boolean speedTest; HttpFeedClient(FeedClientBuilderImpl builder) throws IOException { this(builder, new HttpRequestStrategy(builder)); } HttpFeedClient(FeedClientBuilderImpl builder, RequestStrategy requestStrategy) { this.requestHeaders = new HashMap<>(builder.requestHeaders); this.requestStrategy = requestStrategy; this.speedTest = builder.speedTest; } @Override public CompletableFuture<Result> put(DocumentId documentId, String documentJson, OperationParameters params) { return send("POST", documentId, requireNonNull(documentJson), params); } @Override public CompletableFuture<Result> update(DocumentId documentId, String updateJson, OperationParameters params) { return send("PUT", documentId, requireNonNull(updateJson), params); } @Override public CompletableFuture<Result> remove(DocumentId documentId, OperationParameters params) { return send("DELETE", documentId, null, params); } @Override public OperationStats stats() { return requestStrategy.stats(); } @Override public CircuitBreaker.State circuitBreakerState() { return requestStrategy.circuitBreakerState(); } @Override public void close(boolean graceful) { closed.set(true); if (graceful) requestStrategy.await(); requestStrategy.destroy(); } private CompletableFuture<Result> send(String method, DocumentId documentId, String operationJson, OperationParameters params) { if (closed.get()) throw new IllegalStateException("Client is closed"); HttpRequest request = new HttpRequest(method, getPath(documentId) + getQuery(params, speedTest), requestHeaders, operationJson == null ? null : operationJson.getBytes(UTF_8), params.timeout().orElse(null)); CompletableFuture<Result> promise = new CompletableFuture<>(); requestStrategy.enqueue(documentId, request) .thenApply(response -> toResult(request, response, documentId)) .whenComplete((result, thrown) -> { if (thrown != null) { while (thrown instanceof CompletionException) thrown = thrown.getCause(); promise.completeExceptionally(thrown); } else promise.complete(result); }); return promise; } private enum Outcome { success, conditionNotMet, vespaFailure, transportFailure }; static Result.Type toResultType(Outcome outcome) { switch (outcome) { case success: return Result.Type.success; case conditionNotMet: return Result.Type.conditionNotMet; default: throw new IllegalArgumentException("No corresponding result type for '" + outcome + "'"); } } static Result toResult(HttpRequest request, HttpResponse response, DocumentId documentId) { Outcome outcome; switch (response.code()) { case 200: outcome = Outcome.success; break; case 412: outcome = Outcome.conditionNotMet; break; case 502: case 504: case 507: outcome = Outcome.vespaFailure; break; default: outcome = Outcome.transportFailure; } String message = null; String trace = null; try { JsonParser parser = factory.createParser(response.body()); if (parser.nextToken() != JsonToken.START_OBJECT) throw new ResultParseException( documentId, "Expected '" + JsonToken.START_OBJECT + "', but found '" + parser.currentToken() + "' in: " + new String(response.body(), UTF_8)); String name; while ((name = parser.nextFieldName()) != null) { switch (name) { case "message": message = parser.nextTextValue(); break; case "trace": { if (parser.nextToken() != JsonToken.START_ARRAY) throw new ResultParseException(documentId, "Expected 'trace' to be an array, but got '" + parser.currentToken() + "' in: " + new String(response.body(), UTF_8)); int start = (int) parser.getTokenLocation().getByteOffset(); int depth = 1; while (depth > 0) switch (parser.nextToken()) { case START_ARRAY: ++depth; break; case END_ARRAY: --depth; break; } int end = (int) parser.getTokenLocation().getByteOffset() + 1; trace = new String(response.body(), start, end - start, UTF_8); }; break; default: parser.nextToken(); } } if (parser.currentToken() != JsonToken.END_OBJECT) throw new ResultParseException( documentId, "Expected '" + JsonToken.END_OBJECT + "', but found '" + parser.currentToken() + "' in: " + new String(response.body(), UTF_8)); } catch (IOException e) { throw new ResultParseException(documentId, e); } if (outcome == Outcome.transportFailure) throw new FeedException( documentId, "Status " + response.code() + " executing '" + request + "': " + (message == null ? new String(response.body(), UTF_8) : message)); if (outcome == Outcome.vespaFailure) throw new ResultException(documentId, message, trace); return new ResultImpl(toResultType(outcome), documentId, message, trace); } static String getPath(DocumentId documentId) { StringJoiner path = new StringJoiner("/", "/", ""); path.add("document"); path.add("v1"); path.add(encode(documentId.namespace())); path.add(encode(documentId.documentType())); if (documentId.number().isPresent()) { path.add("number"); path.add(Long.toUnsignedString(documentId.number().getAsLong())); } else if (documentId.group().isPresent()) { path.add("group"); path.add(encode(documentId.group().get())); } else { path.add("docid"); } path.add(encode(documentId.userSpecific())); return path.toString(); } static String encode(String raw) { try { return URLEncoder.encode(raw, UTF_8.name()); } catch (UnsupportedEncodingException e) { throw new IllegalStateException(e); } } }
class HttpFeedClient implements FeedClient { private static final JsonFactory factory = new JsonFactory(); private final Map<String, Supplier<String>> requestHeaders; private final RequestStrategy requestStrategy; private final AtomicBoolean closed = new AtomicBoolean(); private final boolean speedTest; HttpFeedClient(FeedClientBuilderImpl builder) throws IOException { this(builder, new HttpRequestStrategy(builder)); } HttpFeedClient(FeedClientBuilderImpl builder, RequestStrategy requestStrategy) { this.requestHeaders = new HashMap<>(builder.requestHeaders); this.requestStrategy = requestStrategy; this.speedTest = builder.speedTest; } @Override public CompletableFuture<Result> put(DocumentId documentId, String documentJson, OperationParameters params) { return send("POST", documentId, requireNonNull(documentJson), params); } @Override public CompletableFuture<Result> update(DocumentId documentId, String updateJson, OperationParameters params) { return send("PUT", documentId, requireNonNull(updateJson), params); } @Override public CompletableFuture<Result> remove(DocumentId documentId, OperationParameters params) { return send("DELETE", documentId, null, params); } @Override public OperationStats stats() { return requestStrategy.stats(); } @Override public CircuitBreaker.State circuitBreakerState() { return requestStrategy.circuitBreakerState(); } @Override public void close(boolean graceful) { closed.set(true); if (graceful) requestStrategy.await(); requestStrategy.destroy(); } private CompletableFuture<Result> send(String method, DocumentId documentId, String operationJson, OperationParameters params) { if (closed.get()) throw new IllegalStateException("Client is closed"); HttpRequest request = new HttpRequest(method, getPath(documentId) + getQuery(params, speedTest), requestHeaders, operationJson == null ? null : operationJson.getBytes(UTF_8), params.timeout().orElse(null)); CompletableFuture<Result> promise = new CompletableFuture<>(); requestStrategy.enqueue(documentId, request) .thenApply(response -> toResult(request, response, documentId)) .whenComplete((result, thrown) -> { if (thrown != null) { while (thrown instanceof CompletionException) thrown = thrown.getCause(); promise.completeExceptionally(thrown); } else promise.complete(result); }); return promise; } private enum Outcome { success, conditionNotMet, vespaFailure, transportFailure }; static Result.Type toResultType(Outcome outcome) { switch (outcome) { case success: return Result.Type.success; case conditionNotMet: return Result.Type.conditionNotMet; default: throw new IllegalArgumentException("No corresponding result type for '" + outcome + "'"); } } static Result toResult(HttpRequest request, HttpResponse response, DocumentId documentId) { Outcome outcome; switch (response.code()) { case 200: outcome = Outcome.success; break; case 412: outcome = Outcome.conditionNotMet; break; case 502: case 504: case 507: outcome = Outcome.vespaFailure; break; default: outcome = Outcome.transportFailure; } String message = null; String trace = null; try { JsonParser parser = factory.createParser(response.body()); if (parser.nextToken() != JsonToken.START_OBJECT) throw new ResultParseException( documentId, "Expected '" + JsonToken.START_OBJECT + "', but found '" + parser.currentToken() + "' in: " + new String(response.body(), UTF_8)); String name; while ((name = parser.nextFieldName()) != null) { switch (name) { case "message": message = parser.nextTextValue(); break; case "trace": { if (parser.nextToken() != JsonToken.START_ARRAY) throw new ResultParseException(documentId, "Expected 'trace' to be an array, but got '" + parser.currentToken() + "' in: " + new String(response.body(), UTF_8)); int start = (int) parser.getTokenLocation().getByteOffset(); int depth = 1; while (depth > 0) switch (parser.nextToken()) { case START_ARRAY: ++depth; break; case END_ARRAY: --depth; break; } int end = (int) parser.getTokenLocation().getByteOffset() + 1; trace = new String(response.body(), start, end - start, UTF_8); }; break; default: parser.nextToken(); } } if (parser.currentToken() != JsonToken.END_OBJECT) throw new ResultParseException( documentId, "Expected '" + JsonToken.END_OBJECT + "', but found '" + parser.currentToken() + "' in: " + new String(response.body(), UTF_8)); } catch (IOException e) { throw new ResultParseException(documentId, e); } if (outcome == Outcome.transportFailure) throw new FeedException( documentId, "Status " + response.code() + " executing '" + request + "': " + (message == null ? new String(response.body(), UTF_8) : message)); if (outcome == Outcome.vespaFailure) throw new ResultException(documentId, message, trace); return new ResultImpl(toResultType(outcome), documentId, message, trace); } static String getPath(DocumentId documentId) { StringJoiner path = new StringJoiner("/", "/", ""); path.add("document"); path.add("v1"); path.add(encode(documentId.namespace())); path.add(encode(documentId.documentType())); if (documentId.number().isPresent()) { path.add("number"); path.add(Long.toUnsignedString(documentId.number().getAsLong())); } else if (documentId.group().isPresent()) { path.add("group"); path.add(encode(documentId.group().get())); } else { path.add("docid"); } path.add(encode(documentId.userSpecific())); return path.toString(); } static String encode(String raw) { try { return URLEncoder.encode(raw, UTF_8.name()); } catch (UnsupportedEncodingException e) { throw new IllegalStateException(e); } } }
Prefer Instant.now().isAfter(endTime) as above for consistency and readability.
public void waitForNodesHavingSystemStateVersionEqualToOrAbove(int version, int nodeCount, Duration timeout) throws InterruptedException { Instant endTime = Instant.now().plus(timeout); synchronized (monitor) { while (true) { int ackedNodes = 0; for (NodeInfo node : cluster.getNodeInfos()) { if (node.getClusterStateVersionBundleAcknowledged() >= version) { ++ackedNodes; } } if (ackedNodes >= nodeCount) { context.log(logger, Level.INFO, ackedNodes + " nodes now have acked system state " + version + " or higher."); return; } Duration remainingTime = Duration.between(Instant.now(), endTime); if (remainingTime.isNegative()) { throw new IllegalStateException("Did not get " + nodeCount + " nodes to system state " + version + " within timeout of " + timeout); } monitor.wait(10); } } }
if (remainingTime.isNegative()) {
public void waitForNodesHavingSystemStateVersionEqualToOrAbove(int version, int nodeCount, Duration timeout) throws InterruptedException { Instant endTime = Instant.now().plus(timeout); synchronized (monitor) { while (true) { int ackedNodes = 0; for (NodeInfo node : cluster.getNodeInfos()) { if (node.getClusterStateVersionBundleAcknowledged() >= version) { ++ackedNodes; } } if (ackedNodes >= nodeCount) { context.log(logger, Level.INFO, ackedNodes + " nodes now have acked system state " + version + " or higher."); return; } if (Instant.now().isAfter(endTime)) { throw new IllegalStateException("Did not get " + nodeCount + " nodes to system state " + version + " within timeout of " + timeout); } monitor.wait(10); } } }
class FleetController implements NodeListener, SlobrokListener, SystemStateListener, Runnable, RemoteClusterControllerTaskScheduler { private static final Logger logger = Logger.getLogger(FleetController.class.getName()); private final FleetControllerContext context; private final Timer timer; private final Object monitor; private final EventLog eventLog; private final NodeLookup nodeLookup; private final ContentCluster cluster; private final Communicator communicator; private final NodeStateGatherer stateGatherer; private final StateChangeHandler stateChangeHandler; private final SystemStateBroadcaster systemStateBroadcaster; private final StateVersionTracker stateVersionTracker; private final StatusPageServerInterface statusPageServer; private final RpcServer rpcServer; private final DatabaseHandler database; private final MasterElectionHandler masterElectionHandler; private Thread runner = null; private final AtomicBoolean running = new AtomicBoolean(true); private FleetControllerOptions options; private FleetControllerOptions nextOptions; private final List<SystemStateListener> systemStateListeners = new CopyOnWriteArrayList<>(); private boolean processingCycle = false; private boolean wantedStateChanged = false; private long cycleCount = 0; private long lastMetricUpdateCycleCount = 0; private long nextStateSendTime = 0; private Long controllerThreadId = null; private boolean waitingForCycle = false; private final StatusPageServer.PatternRequestRouter statusRequestRouter = new StatusPageServer.PatternRequestRouter(); private final List<ClusterStateBundle> newStates = new ArrayList<>(); private final List<ClusterStateBundle> convergedStates = new ArrayList<>(); private final Queue<RemoteClusterControllerTask> remoteTasks = new LinkedList<>(); private final MetricUpdater metricUpdater; private boolean isMaster = false; private boolean inMasterMoratorium = false; private boolean isStateGatherer = false; private long firstAllowedStateBroadcast = Long.MAX_VALUE; private long tickStartTime = Long.MAX_VALUE; private final List<RemoteClusterControllerTask> tasksPendingStateRecompute = new ArrayList<>(); private final Queue<VersionDependentTaskCompletion> taskCompletionQueue = new ArrayDeque<>(); private Set<String> configuredBucketSpaces = Collections.emptySet(); private final RunDataExtractor dataExtractor = new RunDataExtractor() { @Override public FleetControllerOptions getOptions() { return options; } @Override public ContentCluster getCluster() { return cluster; } }; public FleetController(FleetControllerContext context, Timer timer, EventLog eventLog, ContentCluster cluster, NodeStateGatherer nodeStateGatherer, Communicator communicator, StatusPageServerInterface statusPage, RpcServer server, NodeLookup nodeLookup, DatabaseHandler database, StateChangeHandler stateChangeHandler, SystemStateBroadcaster systemStateBroadcaster, MasterElectionHandler masterElectionHandler, MetricUpdater metricUpdater, FleetControllerOptions options) { context.log(logger, Level.INFO, "Created"); this.context = context; this.timer = timer; this.monitor = timer; this.eventLog = eventLog; this.options = options; this.nodeLookup = nodeLookup; this.cluster = cluster; this.communicator = communicator; this.database = database; this.stateGatherer = nodeStateGatherer; this.stateChangeHandler = stateChangeHandler; this.systemStateBroadcaster = systemStateBroadcaster; this.stateVersionTracker = new StateVersionTracker(options.minMergeCompletionRatio); this.metricUpdater = metricUpdater; this.statusPageServer = statusPage; this.rpcServer = server; this.masterElectionHandler = masterElectionHandler; this.statusRequestRouter.addHandler( "^/node=([a-z]+)\\.(\\d+)$", new LegacyNodePageRequestHandler(timer, eventLog, cluster)); this.statusRequestRouter.addHandler( "^/state.*", new NodeHealthRequestHandler()); this.statusRequestRouter.addHandler( "^/clusterstate", new ClusterStateRequestHandler(stateVersionTracker)); this.statusRequestRouter.addHandler( "^/$", new LegacyIndexPageRequestHandler(timer, cluster, masterElectionHandler, stateVersionTracker, eventLog, timer.getCurrentTimeInMillis(), dataExtractor)); propagateOptions(); } public static FleetController create(FleetControllerOptions options, StatusPageServerInterface statusPageServer, MetricReporter metricReporter) throws Exception { var context = new FleetControllerContextImpl(options); var timer = new RealTimer(); var metricUpdater = new MetricUpdater(metricReporter, options.fleetControllerIndex, options.clusterName); var log = new EventLog(timer, metricUpdater); var cluster = new ContentCluster( options.clusterName, options.nodes, options.storageDistribution); var stateGatherer = new NodeStateGatherer(timer, timer, log); var communicator = new RPCCommunicator( RPCCommunicator.createRealSupervisor(), timer, options.fleetControllerIndex, options.nodeStateRequestTimeoutMS, options.nodeStateRequestTimeoutEarliestPercentage, options.nodeStateRequestTimeoutLatestPercentage, options.nodeStateRequestRoundTripTimeMaxSeconds); var database = new DatabaseHandler(context, new ZooKeeperDatabaseFactory(context), timer, options.zooKeeperServerAddress, timer); var lookUp = new SlobrokClient(context, timer); var stateGenerator = new StateChangeHandler(context, timer, log); var stateBroadcaster = new SystemStateBroadcaster(context, timer, timer); var masterElectionHandler = new MasterElectionHandler(context, options.fleetControllerIndex, options.fleetControllerCount, timer, timer); var controller = new FleetController(context, timer, log, cluster, stateGatherer, communicator, statusPageServer, null, lookUp, database, stateGenerator, stateBroadcaster, masterElectionHandler, metricUpdater, options); controller.start(); return controller; } public void start() { runner = new Thread(this); runner.start(); } public Object getMonitor() { return monitor; } public boolean isRunning() { return running.get(); } public boolean isMaster() { synchronized (monitor) { return isMaster; } } public ClusterState getClusterState() { synchronized (monitor) { return systemStateBroadcaster.getClusterState(); } } public ClusterStateBundle getClusterStateBundle() { synchronized (monitor) { return systemStateBroadcaster.getClusterStateBundle(); } } public void schedule(RemoteClusterControllerTask task) { synchronized (monitor) { context.log(logger, Level.FINE, "Scheduled remote task " + task.getClass().getName() + " for execution"); remoteTasks.add(task); } } /** Used for unit testing. */ public void addSystemStateListener(SystemStateListener listener) { systemStateListeners.add(listener); com.yahoo.vdslib.state.ClusterState state = getSystemState(); if (state == null) { throw new NullPointerException("Cluster state should never be null at this point"); } listener.handleNewPublishedState(ClusterStateBundle.ofBaselineOnly(AnnotatedClusterState.withoutAnnotations(state))); ClusterStateBundle convergedState = systemStateBroadcaster.getLastClusterStateBundleConverged(); if (convergedState != null) { listener.handleStateConvergedInCluster(convergedState); } } public FleetControllerOptions getOptions() { synchronized(monitor) { return options.clone(); } } public NodeState getReportedNodeState(Node n) { synchronized(monitor) { NodeInfo node = cluster.getNodeInfo(n); if (node == null) { throw new IllegalStateException("Did not find node " + n + " in cluster " + cluster); } return node.getReportedState(); } } public NodeState getWantedNodeState(Node n) { synchronized(monitor) { return cluster.getNodeInfo(n).getWantedState(); } } public com.yahoo.vdslib.state.ClusterState getSystemState() { synchronized(monitor) { return stateVersionTracker.getVersionedClusterState(); } } public int getRpcPort() { return rpcServer.getPort(); } public void shutdown() throws InterruptedException, java.io.IOException { if (runner != null && isRunning()) { context.log(logger, Level.INFO, "Joining event thread."); running.set(false); synchronized(monitor) { monitor.notifyAll(); } runner.join(); } context.log(logger, Level.INFO, "FleetController done shutting down event thread."); controllerThreadId = Thread.currentThread().getId(); database.shutdown(databaseContext); if (statusPageServer != null) { statusPageServer.shutdown(); } if (rpcServer != null) { rpcServer.shutdown(); } communicator.shutdown(); nodeLookup.shutdown(); } public void updateOptions(FleetControllerOptions options) { var newId = FleetControllerId.fromOptions(options); synchronized(monitor) { assert newId.equals(context.id()); context.log(logger, Level.INFO, "FleetController has new options"); nextOptions = options.clone(); monitor.notifyAll(); } } private void verifyInControllerThread() { if (controllerThreadId != null && controllerThreadId != Thread.currentThread().getId()) { throw new IllegalStateException("Function called from non-controller thread. Shouldn't happen."); } } private ClusterState latestCandidateClusterState() { return stateVersionTracker.getLatestCandidateState().getClusterState(); } @Override public void handleNewNodeState(NodeInfo node, NodeState newState) { verifyInControllerThread(); stateChangeHandler.handleNewReportedNodeState(latestCandidateClusterState(), node, newState, this); } @Override public void handleNewWantedNodeState(NodeInfo node, NodeState newState) { verifyInControllerThread(); wantedStateChanged = true; stateChangeHandler.proposeNewNodeState(stateVersionTracker.getVersionedClusterState(), node, newState); } @Override public void handleRemovedNode(Node node) { verifyInControllerThread(); wantedStateChanged = true; } @Override public void handleUpdatedHostInfo(NodeInfo nodeInfo, HostInfo newHostInfo) { verifyInControllerThread(); triggerBundleRecomputationIfResourceExhaustionStateChanged(nodeInfo, newHostInfo); stateVersionTracker.handleUpdatedHostInfo(nodeInfo, newHostInfo); } private void triggerBundleRecomputationIfResourceExhaustionStateChanged(NodeInfo nodeInfo, HostInfo newHostInfo) { if (!options.clusterFeedBlockEnabled) { return; } var calc = createResourceExhaustionCalculator(); var previouslyExhausted = calc.enumerateNodeResourceExhaustions(nodeInfo); var nowExhausted = calc.resourceExhaustionsFromHostInfo(nodeInfo, newHostInfo); if (!previouslyExhausted.equals(nowExhausted)) { context.log(logger, Level.FINE, () -> String.format("Triggering state recomputation due to change in cluster feed block: %s -> %s", previouslyExhausted, nowExhausted)); stateChangeHandler.setStateChangedFlag(); } } @Override public void handleNewNode(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleNewNode(node); } @Override public void handleMissingNode(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleMissingNode(stateVersionTracker.getVersionedClusterState(), node, this); } @Override public void handleNewRpcAddress(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleNewRpcAddress(node); } @Override public void handleReturnedRpcAddress(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleReturnedRpcAddress(node); } @Override public void handleNewPublishedState(ClusterStateBundle stateBundle) { verifyInControllerThread(); ClusterState baselineState = stateBundle.getBaselineClusterState(); newStates.add(stateBundle); metricUpdater.updateClusterStateMetrics(cluster, baselineState, ResourceUsageStats.calculateFrom(cluster.getNodeInfos(), options.clusterFeedBlockLimit, stateBundle.getFeedBlock())); lastMetricUpdateCycleCount = cycleCount; systemStateBroadcaster.handleNewClusterStates(stateBundle); if (isMaster) { storeClusterStateMetaDataToZooKeeper(stateBundle); } } private boolean maybePublishOldMetrics() { verifyInControllerThread(); if (isMaster() && cycleCount > 300 + lastMetricUpdateCycleCount) { ClusterStateBundle stateBundle = stateVersionTracker.getVersionedClusterStateBundle(); ClusterState baselineState = stateBundle.getBaselineClusterState(); metricUpdater.updateClusterStateMetrics(cluster, baselineState, ResourceUsageStats.calculateFrom(cluster.getNodeInfos(), options.clusterFeedBlockLimit, stateBundle.getFeedBlock())); lastMetricUpdateCycleCount = cycleCount; return true; } else { return false; } } private void storeClusterStateMetaDataToZooKeeper(ClusterStateBundle stateBundle) { try { database.saveLatestSystemStateVersion(databaseContext, stateBundle.getVersion()); database.saveLatestClusterStateBundle(databaseContext, stateBundle); } catch (InterruptedException e) { throw new RuntimeException("ZooKeeper write interrupted", e); } } /** * This function gives data of the current state in master election. * The keys in the given map are indices of fleet controllers. * The values are what fleetcontroller that fleetcontroller wants to * become master. * * If more than half the fleetcontrollers want a node to be master and * that node also wants itself as master, that node is the single master. * If this condition is not met, there is currently no master. */ public void handleFleetData(Map<Integer, Integer> data) { verifyInControllerThread(); context.log(logger, Level.FINEST, "Sending fleet data event on to master election handler"); metricUpdater.updateMasterElectionMetrics(data); masterElectionHandler.handleFleetData(data); } /** * Called when we can no longer contact database. */ public void lostDatabaseConnection() { verifyInControllerThread(); boolean wasMaster = isMaster; masterElectionHandler.lostDatabaseConnection(); if (wasMaster) { dropLeadershipState(); metricUpdater.updateMasterState(false); } } private void failAllVersionDependentTasks() { tasksPendingStateRecompute.forEach(task -> { task.handleFailure(RemoteClusterControllerTask.Failure.of( RemoteClusterControllerTask.FailureCondition.LEADERSHIP_LOST)); task.notifyCompleted(); }); tasksPendingStateRecompute.clear(); taskCompletionQueue.forEach(task -> { task.getTask().handleFailure(RemoteClusterControllerTask.Failure.of( RemoteClusterControllerTask.FailureCondition.LEADERSHIP_LOST)); task.getTask().notifyCompleted(); }); taskCompletionQueue.clear(); } /** Called when all distributors have acked newest cluster state version. */ public void handleAllDistributorsInSync(DatabaseHandler database, DatabaseHandler.DatabaseContext dbContext) throws InterruptedException { Set<ConfiguredNode> nodes = new HashSet<>(cluster.clusterInfo().getConfiguredNodes().values()); var currentBundle = stateVersionTracker.getVersionedClusterStateBundle(); context.log(logger, Level.FINE, () -> String.format("All distributors have ACKed cluster state version %d", currentBundle.getVersion())); stateChangeHandler.handleAllDistributorsInSync(currentBundle.getBaselineClusterState(), nodes, database, dbContext); convergedStates.add(currentBundle); } private boolean changesConfiguredNodeSet(Collection<ConfiguredNode> newNodes) { if (newNodes.size() != cluster.getConfiguredNodes().size()) return true; if (! cluster.getConfiguredNodes().values().containsAll(newNodes)) return true; for (ConfiguredNode node : newNodes) { if (node.retired() != cluster.getConfiguredNodes().get(node.index()).retired()) { return true; } } return false; } /** This is called when the options field has been set to a new set of options */ private void propagateOptions() { verifyInControllerThread(); selfTerminateIfConfiguredNodeIndexHasChanged(); if (changesConfiguredNodeSet(options.nodes)) { cluster.setSlobrokGenerationCount(0); } configuredBucketSpaces = Set.of(FixedBucketSpaces.defaultSpace(), FixedBucketSpaces.globalSpace()); stateVersionTracker.setMinMergeCompletionRatio(options.minMergeCompletionRatio); communicator.propagateOptions(options); if (nodeLookup instanceof SlobrokClient) { ((SlobrokClient) nodeLookup).setSlobrokConnectionSpecs(options.slobrokConnectionSpecs); } eventLog.setMaxSize(options.eventLogMaxSize, options.eventNodeLogMaxSize); cluster.setPollingFrequency(options.statePollingFrequency); cluster.setDistribution(options.storageDistribution); cluster.setNodes(options.nodes, databaseContext.getNodeStateUpdateListener()); database.setZooKeeperAddress(options.zooKeeperServerAddress, databaseContext); database.setZooKeeperSessionTimeout(options.zooKeeperSessionTimeout, databaseContext); stateGatherer.setMaxSlobrokDisconnectGracePeriod(options.maxSlobrokDisconnectGracePeriod); stateGatherer.setNodeStateRequestTimeout(options.nodeStateRequestTimeoutMS); stateChangeHandler.reconfigureFromOptions(options); stateChangeHandler.setStateChangedFlag(); masterElectionHandler.setFleetControllerCount(options.fleetControllerCount); masterElectionHandler.setMasterZooKeeperCooldownPeriod(options.masterZooKeeperCooldownPeriod); masterElectionHandler.setUsingZooKeeper(options.zooKeeperServerAddress != null && !options.zooKeeperServerAddress.isEmpty()); if (rpcServer != null) { rpcServer.setMasterElectionHandler(masterElectionHandler); try{ rpcServer.setSlobrokConnectionSpecs(options.slobrokConnectionSpecs, options.rpcPort); } catch (ListenFailedException e) { context.log(logger, Level.WARNING, "Failed to bind RPC server to port " + options.rpcPort + ". This may be natural if cluster has altered the services running on this node: " + e.getMessage()); } catch (Exception e) { context.log(logger, Level.WARNING, "Failed to initialize RPC server socket: " + e.getMessage()); } } if (statusPageServer != null) { try{ statusPageServer.setPort(options.httpPort); } catch (Exception e) { context.log(logger, Level.WARNING, "Failed to initialize status server socket. This may be natural if cluster has altered the services running on this node: " + e.getMessage()); } } long currentTime = timer.getCurrentTimeInMillis(); nextStateSendTime = Math.min(currentTime + options.minTimeBetweenNewSystemStates, nextStateSendTime); } private void selfTerminateIfConfiguredNodeIndexHasChanged() { var newId = new FleetControllerId(options.clusterName, options.fleetControllerIndex); if (!newId.equals(context.id())) { context.log(logger, Level.WARNING, context.id() + " got new configuration for " + newId + ". We do not support doing this live; " + "immediately exiting now to force new configuration"); prepareShutdownEdge(); System.exit(1); } } public StatusPageResponse fetchStatusPage(StatusPageServer.HttpRequest httpRequest) { verifyInControllerThread(); StatusPageResponse.ResponseCode responseCode; String message; final String hiddenMessage; try { StatusPageServer.RequestHandler handler = statusRequestRouter.resolveHandler(httpRequest); if (handler == null) { throw new FileNotFoundException("No handler found for request: " + httpRequest.getPath()); } return handler.handle(httpRequest); } catch (FileNotFoundException e) { responseCode = StatusPageResponse.ResponseCode.NOT_FOUND; message = e.getMessage(); hiddenMessage = ""; } catch (Exception e) { responseCode = StatusPageResponse.ResponseCode.INTERNAL_SERVER_ERROR; message = "Internal Server Error"; hiddenMessage = ExceptionUtils.getStackTraceAsString(e); context.log(logger, Level.FINE, () -> "Unknown exception thrown for request " + httpRequest.getRequest() + ": " + hiddenMessage); } TimeZone tz = TimeZone.getTimeZone("UTC"); long currentTime = timer.getCurrentTimeInMillis(); StatusPageResponse response = new StatusPageResponse(); StringBuilder content = new StringBuilder(); response.setContentType("text/html"); response.setResponseCode(responseCode); content.append("<!-- Answer to request " + httpRequest.getRequest() + " -->\n"); content.append("<p>UTC time when creating this page: ").append(RealTimer.printDateNoMilliSeconds(currentTime, tz)).append("</p>"); response.writeHtmlHeader(content, message); response.writeHtmlFooter(content, hiddenMessage); response.writeContent(content.toString()); return response; } public void tick() throws Exception { synchronized (monitor) { boolean didWork; didWork = metricUpdater.forWork("doNextZooKeeperTask", () -> database.doNextZooKeeperTask(databaseContext)); didWork |= metricUpdater.forWork("updateMasterElectionState", this::updateMasterElectionState); didWork |= metricUpdater.forWork("handleLeadershipEdgeTransitions", this::handleLeadershipEdgeTransitions); stateChangeHandler.setMaster(isMaster); if ( ! isRunning()) { return; } didWork |= metricUpdater.forWork("stateGatherer-processResponses", () -> stateGatherer.processResponses(this)); if ( ! isRunning()) { return; } if (masterElectionHandler.isAmongNthFirst(options.stateGatherCount)) { didWork |= resyncLocallyCachedState(); } else { stepDownAsStateGatherer(); } if ( ! isRunning()) { return; } didWork |= metricUpdater.forWork("systemStateBroadcaster-processResponses", systemStateBroadcaster::processResponses); if ( ! isRunning()) { return; } if (isMaster) { didWork |= metricUpdater.forWork("broadcastClusterStateToEligibleNodes", this::broadcastClusterStateToEligibleNodes); systemStateBroadcaster.checkIfClusterStateIsAckedByAllDistributors(database, databaseContext, this); } if ( ! isRunning()) { return; } didWork |= metricUpdater.forWork("processAnyPendingStatusPageRequest", this::processAnyPendingStatusPageRequest); if ( ! isRunning()) { return; } if (rpcServer != null) { didWork |= metricUpdater.forWork("handleRpcRequests", () -> rpcServer.handleRpcRequests(cluster, consolidatedClusterState(), this)); } if ( ! isRunning()) { return; } didWork |= metricUpdater.forWork("processNextQueuedRemoteTask", this::processNextQueuedRemoteTask); didWork |= metricUpdater.forWork("completeSatisfiedVersionDependentTasks", this::completeSatisfiedVersionDependentTasks); didWork |= metricUpdater.forWork("maybePublishOldMetrics", this::maybePublishOldMetrics); processingCycle = false; ++cycleCount; long tickStopTime = timer.getCurrentTimeInMillis(); if (tickStopTime >= tickStartTime) { metricUpdater.addTickTime(tickStopTime - tickStartTime, didWork); } monitor.wait(didWork || waitingForCycle ? 1 : options.cycleWaitTime); if ( ! isRunning()) { return; } tickStartTime = timer.getCurrentTimeInMillis(); processingCycle = true; if (nextOptions != null) { switchToNewConfig(); } } if (isRunning()) { propagateNewStatesToListeners(); } } private boolean updateMasterElectionState() { try { return masterElectionHandler.watchMasterElection(database, databaseContext); } catch (InterruptedException e) { throw new RuntimeException(e); } catch (Exception e) { context.log(logger, Level.WARNING, "Failed to watch master election: " + e); } return false; } private void stepDownAsStateGatherer() { if (isStateGatherer) { cluster.clearStates(); eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer a node state gatherer.", timer.getCurrentTimeInMillis())); } isStateGatherer = false; } private void switchToNewConfig() { options = nextOptions; nextOptions = null; try { propagateOptions(); } catch (Exception e) { context.log(logger, Level.SEVERE, "Failed to handle new fleet controller config", e); } } private boolean processAnyPendingStatusPageRequest() { if (statusPageServer != null) { StatusPageServer.HttpRequest statusRequest = statusPageServer.getCurrentHttpRequest(); if (statusRequest != null) { statusPageServer.answerCurrentStatusRequest(fetchStatusPage(statusRequest)); return true; } } return false; } private boolean broadcastClusterStateToEligibleNodes() { if (database.hasPendingClusterStateMetaDataStore()) { context.log(logger, Level.FINE, "Can't publish current cluster state as it has one or more pending ZooKeeper stores"); return false; } boolean sentAny = false; long currentTime = timer.getCurrentTimeInMillis(); if ((currentTime >= firstAllowedStateBroadcast || cluster.allStatesReported()) && currentTime >= nextStateSendTime) { if (inMasterMoratorium) { context.log(logger, Level.INFO, currentTime < firstAllowedStateBroadcast ? "Master moratorium complete: all nodes have reported in" : "Master moratorium complete: timed out waiting for all nodes to report in"); firstAllowedStateBroadcast = currentTime; inMasterMoratorium = false; } sentAny = systemStateBroadcaster.broadcastNewStateBundleIfRequired( databaseContext, communicator, database.getLastKnownStateBundleVersionWrittenBySelf()); if (sentAny) { nextStateSendTime = currentTime + options.minTimeBetweenNewSystemStates; } } sentAny |= systemStateBroadcaster.broadcastStateActivationsIfRequired(databaseContext, communicator); return sentAny; } private void propagateNewStatesToListeners() { if ( ! newStates.isEmpty()) { synchronized (systemStateListeners) { for (ClusterStateBundle stateBundle : newStates) { for (SystemStateListener listener : systemStateListeners) { listener.handleNewPublishedState(stateBundle); } } newStates.clear(); } } if ( ! convergedStates.isEmpty()) { synchronized (systemStateListeners) { for (ClusterStateBundle stateBundle : convergedStates) { for (SystemStateListener listener : systemStateListeners) { listener.handleStateConvergedInCluster(stateBundle); } } convergedStates.clear(); } } } private boolean processNextQueuedRemoteTask() { metricUpdater.updateRemoteTaskQueueSize(remoteTasks.size()); RemoteClusterControllerTask task = remoteTasks.poll(); if (task == null) { return false; } final RemoteClusterControllerTask.Context taskContext = createRemoteTaskProcessingContext(); context.log(logger, Level.FINEST, () -> String.format("Processing remote task of type '%s'", task.getClass().getName())); task.doRemoteFleetControllerTask(taskContext); if (taskMayBeCompletedImmediately(task)) { context.log(logger, Level.FINEST, () -> String.format("Done processing remote task of type '%s'", task.getClass().getName())); task.notifyCompleted(); } else { context.log(logger, Level.FINEST, () -> String.format("Remote task of type '%s' queued until state recomputation", task.getClass().getName())); tasksPendingStateRecompute.add(task); } return true; } private boolean taskMayBeCompletedImmediately(RemoteClusterControllerTask task) { return (!task.hasVersionAckDependency() || task.isFailed() || !isMaster); } private RemoteClusterControllerTask.Context createRemoteTaskProcessingContext() { final RemoteClusterControllerTask.Context context = new RemoteClusterControllerTask.Context(); context.cluster = cluster; context.currentConsolidatedState = consolidatedClusterState(); context.publishedClusterStateBundle = stateVersionTracker.getVersionedClusterStateBundle(); context.masterInfo = new MasterInterface() { @Override public boolean isMaster() { return isMaster; } @Override public Integer getMaster() { return masterElectionHandler.getMaster(); } @Override public boolean inMasterMoratorium() { return inMasterMoratorium; } }; context.nodeListener = this; context.slobrokListener = this; return context; } private static long effectiveActivatedStateVersion(NodeInfo nodeInfo, ClusterStateBundle bundle) { return bundle.deferredActivation() ? nodeInfo.getClusterStateVersionActivationAcked() : nodeInfo.getClusterStateVersionBundleAcknowledged(); } private List<Node> enumerateNodesNotYetAckedAtLeastVersion(long version) { var bundle = systemStateBroadcaster.getClusterStateBundle(); if (bundle == null) { return List.of(); } return cluster.getNodeInfos().stream(). filter(n -> effectiveActivatedStateVersion(n, bundle) < version). map(NodeInfo::getNode). collect(Collectors.toList()); } private static <E> String stringifyListWithLimits(List<E> list, int limit) { if (list.size() > limit) { var sub = list.subList(0, limit); return String.format("%s (... and %d more)", sub.stream().map(E::toString).collect(Collectors.joining(", ")), list.size() - limit); } else { return list.stream().map(E::toString).collect(Collectors.joining(", ")); } } private String buildNodesNotYetConvergedMessage(long taskConvergeVersion) { var nodes = enumerateNodesNotYetAckedAtLeastVersion(taskConvergeVersion); if (nodes.isEmpty()) { return ""; } return String.format("the following nodes have not converged to at least version %d: %s", taskConvergeVersion, stringifyListWithLimits(nodes, options.maxDivergentNodesPrintedInTaskErrorMessages)); } private boolean completeSatisfiedVersionDependentTasks() { int publishedVersion = systemStateBroadcaster.lastClusterStateVersionInSync(); long queueSizeBefore = taskCompletionQueue.size(); final long now = timer.getCurrentTimeInMillis(); while (!taskCompletionQueue.isEmpty()) { VersionDependentTaskCompletion taskCompletion = taskCompletionQueue.peek(); if (publishedVersion >= taskCompletion.getMinimumVersion()) { context.log(logger, Level.FINE, () -> String.format("Deferred task of type '%s' has minimum version %d, published is %d; completing", taskCompletion.getTask().getClass().getName(), taskCompletion.getMinimumVersion(), publishedVersion)); taskCompletion.getTask().notifyCompleted(); taskCompletionQueue.remove(); } else if (taskCompletion.getDeadlineTimePointMs() <= now) { var details = buildNodesNotYetConvergedMessage(taskCompletion.getMinimumVersion()); context.log(logger, Level.WARNING, () -> String.format("Deferred task of type '%s' has exceeded wait deadline; completing with failure (details: %s)", taskCompletion.getTask().getClass().getName(), details)); taskCompletion.getTask().handleFailure(RemoteClusterControllerTask.Failure.of( RemoteClusterControllerTask.FailureCondition.DEADLINE_EXCEEDED, details)); taskCompletion.getTask().notifyCompleted(); taskCompletionQueue.remove(); } else { break; } } return (taskCompletionQueue.size() != queueSizeBefore); } /** * A "consolidated" cluster state is guaranteed to have up-to-date information on which nodes are * up or down even when the whole cluster is down. The regular, published cluster state is not * normally updated to reflect node events when the cluster is down. */ ClusterState consolidatedClusterState() { final ClusterState publishedState = stateVersionTracker.getVersionedClusterState(); if (publishedState.getClusterState() == State.UP) { return publishedState; } final ClusterState current = stateVersionTracker.getLatestCandidateState().getClusterState().clone(); current.setVersion(publishedState.getVersion()); return current; } /* System test observations: - a node that stops normally (U -> S) then goes down erroneously triggers premature crash handling - long time before content node state convergence (though this seems to be the case for legacy impl as well) */ private boolean resyncLocallyCachedState() { boolean didWork = false; if ( ! isMaster && cycleCount % 100 == 0) { didWork = metricUpdater.forWork("loadWantedStates", () -> database.loadWantedStates(databaseContext)); didWork |= metricUpdater.forWork("loadStartTimestamps", () -> database.loadStartTimestamps(cluster)); } didWork |= metricUpdater.forWork("updateCluster", () -> nodeLookup.updateCluster(cluster, this)); didWork |= metricUpdater.forWork("sendMessages", () -> stateGatherer.sendMessages(cluster, communicator, this)); didWork |= metricUpdater.forWork( "watchTimers", () -> stateChangeHandler.watchTimers(cluster, stateVersionTracker.getLatestCandidateState().getClusterState(), this)); didWork |= metricUpdater.forWork("recomputeClusterStateIfRequired", this::recomputeClusterStateIfRequired); if ( ! isStateGatherer) { if ( ! isMaster) { eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became node state gatherer as we are fleetcontroller master candidate.", timer.getCurrentTimeInMillis())); stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion()); stateChangeHandler.setStateChangedFlag(); } } isStateGatherer = true; return didWork; } private void invokeCandidateStateListeners(ClusterStateBundle candidateBundle) { systemStateListeners.forEach(listener -> listener.handleNewCandidateState(candidateBundle)); } private boolean hasPassedFirstStateBroadcastTimePoint(long timeNowMs) { return timeNowMs >= firstAllowedStateBroadcast || cluster.allStatesReported(); } private boolean recomputeClusterStateIfRequired() { boolean stateWasChanged = false; if (mustRecomputeCandidateClusterState()) { stateChangeHandler.unsetStateChangedFlag(); final AnnotatedClusterState candidate = computeCurrentAnnotatedState(); final ClusterStateBundle candidateBundle = ClusterStateBundle.builder(candidate) .bucketSpaces(configuredBucketSpaces) .stateDeriver(createBucketSpaceStateDeriver()) .deferredActivation(options.enableTwoPhaseClusterStateActivation) .feedBlock(createResourceExhaustionCalculator() .inferContentClusterFeedBlockOrNull(cluster.getNodeInfos())) .deriveAndBuild(); stateVersionTracker.updateLatestCandidateStateBundle(candidateBundle); invokeCandidateStateListeners(candidateBundle); final long timeNowMs = timer.getCurrentTimeInMillis(); if (hasPassedFirstStateBroadcastTimePoint(timeNowMs) && (stateVersionTracker.candidateChangedEnoughFromCurrentToWarrantPublish() || stateVersionTracker.hasReceivedNewVersionFromZooKeeper())) { final ClusterStateBundle before = stateVersionTracker.getVersionedClusterStateBundle(); stateVersionTracker.promoteCandidateToVersionedState(timeNowMs); emitEventsForAlteredStateEdges(before, stateVersionTracker.getVersionedClusterStateBundle(), timeNowMs); handleNewPublishedState(stateVersionTracker.getVersionedClusterStateBundle()); stateWasChanged = true; } } /* * This works transparently for tasks that end up changing the current cluster state (i.e. * requiring a new state to be published) and for those whose changes are no-ops (because * the changes they request are already part of the current state). In the former case the * tasks will depend on the version that was generated based upon them. In the latter case * the tasks will depend on the version that is already published (or in the process of * being published). */ scheduleVersionDependentTasksForFutureCompletion(stateVersionTracker.getCurrentVersion()); return stateWasChanged; } private ClusterStateDeriver createBucketSpaceStateDeriver() { if (options.clusterHasGlobalDocumentTypes) { return new MaintenanceWhenPendingGlobalMerges(stateVersionTracker.createMergePendingChecker(), createDefaultSpaceMaintenanceTransitionConstraint()); } else { return createIdentityClonedBucketSpaceStateDeriver(); } } private ResourceExhaustionCalculator createResourceExhaustionCalculator() { return new ResourceExhaustionCalculator( options.clusterFeedBlockEnabled, options.clusterFeedBlockLimit, stateVersionTracker.getLatestCandidateStateBundle().getFeedBlockOrNull(), options.clusterFeedBlockNoiseLevel); } private static ClusterStateDeriver createIdentityClonedBucketSpaceStateDeriver() { return (state, space) -> state.clone(); } private MaintenanceTransitionConstraint createDefaultSpaceMaintenanceTransitionConstraint() { AnnotatedClusterState currentDefaultSpaceState = stateVersionTracker.getVersionedClusterStateBundle() .getDerivedBucketSpaceStates().getOrDefault(FixedBucketSpaces.defaultSpace(), AnnotatedClusterState.emptyState()); return UpEdgeMaintenanceTransitionConstraint.forPreviouslyPublishedState(currentDefaultSpaceState.getClusterState()); } /** * Move tasks that are dependent on the most recently generated state being published into * a completion queue with a dependency on the provided version argument. Once that version * has been ACKed by all distributors in the system, those tasks will be marked as completed. */ private void scheduleVersionDependentTasksForFutureCompletion(int completeAtVersion) { final long maxDeadlineTimePointMs = timer.getCurrentTimeInMillis() + options.getMaxDeferredTaskVersionWaitTime().toMillis(); for (RemoteClusterControllerTask task : tasksPendingStateRecompute) { context.log(logger, Level.INFO, task + " will be completed at version " + completeAtVersion); taskCompletionQueue.add(new VersionDependentTaskCompletion(completeAtVersion, task, maxDeadlineTimePointMs)); } tasksPendingStateRecompute.clear(); } private AnnotatedClusterState computeCurrentAnnotatedState() { ClusterStateGenerator.Params params = ClusterStateGenerator.Params.fromOptions(options); params.currentTimeInMilllis(timer.getCurrentTimeInMillis()) .cluster(cluster) .lowestObservedDistributionBitCount(stateVersionTracker.getLowestObservedDistributionBits()); return ClusterStateGenerator.generatedStateFrom(params); } private void emitEventsForAlteredStateEdges(final ClusterStateBundle fromState, final ClusterStateBundle toState, final long timeNowMs) { final List<Event> deltaEvents = EventDiffCalculator.computeEventDiff( EventDiffCalculator.params() .cluster(cluster) .fromState(fromState) .toState(toState) .currentTimeMs(timeNowMs) .maxMaintenanceGracePeriodTimeMs(options.storageNodeMaxTransitionTimeMs())); for (Event event : deltaEvents) { eventLog.add(event, isMaster); } emitStateAppliedEvents(timeNowMs, fromState.getBaselineClusterState(), toState.getBaselineClusterState()); } private void emitStateAppliedEvents(long timeNowMs, ClusterState fromClusterState, ClusterState toClusterState) { eventLog.add(new ClusterEvent( ClusterEvent.Type.SYSTEMSTATE, "New cluster state version " + toClusterState.getVersion() + ". Change from last: " + fromClusterState.getTextualDifference(toClusterState), timeNowMs), isMaster); if (toClusterState.getDistributionBitCount() != fromClusterState.getDistributionBitCount()) { eventLog.add(new ClusterEvent( ClusterEvent.Type.SYSTEMSTATE, "Altering distribution bits in system from " + fromClusterState.getDistributionBitCount() + " to " + toClusterState.getDistributionBitCount(), timeNowMs), isMaster); } } private boolean atFirstClusterStateSendTimeEdge() { if (!isMaster || systemStateBroadcaster.hasBroadcastedClusterStateBundle()) { return false; } return hasPassedFirstStateBroadcastTimePoint(timer.getCurrentTimeInMillis()); } private boolean mustRecomputeCandidateClusterState() { return stateChangeHandler.stateMayHaveChanged() || stateVersionTracker.bucketSpaceMergeCompletionStateHasChanged() || atFirstClusterStateSendTimeEdge(); } private boolean handleLeadershipEdgeTransitions() { boolean didWork = false; if (masterElectionHandler.isMaster()) { if ( ! isMaster) { stateChangeHandler.setStateChangedFlag(); systemStateBroadcaster.resetBroadcastedClusterStateBundle(); stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion()); ClusterStateBundle previousBundle = database.getLatestClusterStateBundle(); database.loadStartTimestamps(cluster); database.loadWantedStates(databaseContext); context.log(logger, Level.INFO, () -> String.format("Loaded previous cluster state bundle from ZooKeeper: %s", previousBundle)); stateVersionTracker.setClusterStateBundleRetrievedFromZooKeeper(previousBundle); eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became fleetcontroller master. Bumped version to " + stateVersionTracker.getCurrentVersion() + " to be in line.", timer.getCurrentTimeInMillis())); long currentTime = timer.getCurrentTimeInMillis(); firstAllowedStateBroadcast = currentTime + options.minTimeBeforeFirstSystemStateBroadcast; isMaster = true; inMasterMoratorium = true; context.log(logger, Level.FINE, () -> "At time " + currentTime + " we set first system state broadcast time to be " + options.minTimeBeforeFirstSystemStateBroadcast + " ms after at time " + firstAllowedStateBroadcast + "."); didWork = true; } if (wantedStateChanged) { didWork |= database.saveWantedStates(databaseContext); wantedStateChanged = false; } } else { dropLeadershipState(); } metricUpdater.updateMasterState(isMaster); return didWork; } private void dropLeadershipState() { if (isMaster) { eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer fleetcontroller master.", timer.getCurrentTimeInMillis())); firstAllowedStateBroadcast = Long.MAX_VALUE; failAllVersionDependentTasks(); } wantedStateChanged = false; isMaster = false; inMasterMoratorium = false; } @Override public void run() { controllerThreadId = Thread.currentThread().getId(); context.log(logger, Level.INFO, "Starting tick loop"); try { processingCycle = true; while (isRunning()) { tick(); } context.log(logger, Level.INFO, "Tick loop stopped"); } catch (InterruptedException e) { context.log(logger, Level.INFO, "Event thread stopped by interrupt exception: ", e); } catch (Throwable t) { t.printStackTrace(); context.log(logger, Level.SEVERE, "Fatal error killed fleet controller", t); synchronized (monitor) { running.set(false); } System.exit(1); } finally { prepareShutdownEdge(); } } private void prepareShutdownEdge() { running.set(false); failAllVersionDependentTasks(); synchronized (monitor) { monitor.notifyAll(); } } public DatabaseHandler.DatabaseContext databaseContext = new DatabaseHandler.DatabaseContext() { @Override public ContentCluster getCluster() { return cluster; } @Override public FleetController getFleetController() { return FleetController.this; } @Override public SlobrokListener getNodeAddedOrRemovedListener() { return FleetController.this; } @Override public NodeListener getNodeStateUpdateListener() { return FleetController.this; } }; public void waitForCompleteCycle(Duration timeout) { Instant endTime = Instant.now().plus(timeout); synchronized (monitor) { long wantedCycle = cycleCount + (processingCycle ? 2 : 1); waitingForCycle = true; try{ while (cycleCount < wantedCycle) { if (Instant.now().isAfter(endTime)) throw new IllegalStateException("Timed out waiting for cycle to complete. Not completed after " + timeout); if ( !isRunning() ) throw new IllegalStateException("Fleetcontroller not running. Will never complete cycles"); try{ monitor.wait(100); } catch (InterruptedException e) {} } } finally { waitingForCycle = false; } } } /** * This function might not be 100% threadsafe, as in theory cluster can be changing while accessed. * But it is only used in unit tests that should not trigger any thread issues. Don't want to add locks that reduce * live performance to remove a non-problem. */ public void waitForNodesInSlobrok(int distNodeCount, int storNodeCount, Duration timeout) throws InterruptedException { Instant endTime = Instant.now().plus(timeout); synchronized (monitor) { while (true) { int distCount = 0, storCount = 0; for (NodeInfo info : cluster.getNodeInfos()) { if (!info.isRpcAddressOutdated()) { if (info.isDistributor()) ++distCount; else ++storCount; } } if (distCount == distNodeCount && storCount == storNodeCount) return; Duration remainingTime = Duration.between(Instant.now(), endTime); if (remainingTime.isNegative()) { throw new IllegalStateException("Did not get all " + distNodeCount + " distributors and " + storNodeCount + " storage nodes registered in slobrok within timeout of " + timeout + ". (Got " + distCount + " distributors and " + storCount + " storage nodes)"); } monitor.wait(10); } } } public boolean hasZookeeperConnection() { return !database.isClosed(); } public int getSlobrokMirrorUpdates() { return ((SlobrokClient)nodeLookup).getMirror().updates(); } public ContentCluster getCluster() { return cluster; } public List<NodeEvent> getNodeEvents(Node n) { return eventLog.getNodeEvents(n); } public EventLog getEventLog() { return eventLog; } }
class FleetController implements NodeListener, SlobrokListener, SystemStateListener, Runnable, RemoteClusterControllerTaskScheduler { private static final Logger logger = Logger.getLogger(FleetController.class.getName()); private final FleetControllerContext context; private final Timer timer; private final Object monitor; private final EventLog eventLog; private final NodeLookup nodeLookup; private final ContentCluster cluster; private final Communicator communicator; private final NodeStateGatherer stateGatherer; private final StateChangeHandler stateChangeHandler; private final SystemStateBroadcaster systemStateBroadcaster; private final StateVersionTracker stateVersionTracker; private final StatusPageServerInterface statusPageServer; private final RpcServer rpcServer; private final DatabaseHandler database; private final MasterElectionHandler masterElectionHandler; private Thread runner = null; private final AtomicBoolean running = new AtomicBoolean(true); private FleetControllerOptions options; private FleetControllerOptions nextOptions; private final List<SystemStateListener> systemStateListeners = new CopyOnWriteArrayList<>(); private boolean processingCycle = false; private boolean wantedStateChanged = false; private long cycleCount = 0; private long lastMetricUpdateCycleCount = 0; private long nextStateSendTime = 0; private Long controllerThreadId = null; private boolean waitingForCycle = false; private final StatusPageServer.PatternRequestRouter statusRequestRouter = new StatusPageServer.PatternRequestRouter(); private final List<ClusterStateBundle> newStates = new ArrayList<>(); private final List<ClusterStateBundle> convergedStates = new ArrayList<>(); private final Queue<RemoteClusterControllerTask> remoteTasks = new LinkedList<>(); private final MetricUpdater metricUpdater; private boolean isMaster = false; private boolean inMasterMoratorium = false; private boolean isStateGatherer = false; private long firstAllowedStateBroadcast = Long.MAX_VALUE; private long tickStartTime = Long.MAX_VALUE; private final List<RemoteClusterControllerTask> tasksPendingStateRecompute = new ArrayList<>(); private final Queue<VersionDependentTaskCompletion> taskCompletionQueue = new ArrayDeque<>(); private Set<String> configuredBucketSpaces = Collections.emptySet(); private final RunDataExtractor dataExtractor = new RunDataExtractor() { @Override public FleetControllerOptions getOptions() { return options; } @Override public ContentCluster getCluster() { return cluster; } }; public FleetController(FleetControllerContext context, Timer timer, EventLog eventLog, ContentCluster cluster, NodeStateGatherer nodeStateGatherer, Communicator communicator, StatusPageServerInterface statusPage, RpcServer server, NodeLookup nodeLookup, DatabaseHandler database, StateChangeHandler stateChangeHandler, SystemStateBroadcaster systemStateBroadcaster, MasterElectionHandler masterElectionHandler, MetricUpdater metricUpdater, FleetControllerOptions options) { context.log(logger, Level.INFO, "Created"); this.context = context; this.timer = timer; this.monitor = timer; this.eventLog = eventLog; this.options = options; this.nodeLookup = nodeLookup; this.cluster = cluster; this.communicator = communicator; this.database = database; this.stateGatherer = nodeStateGatherer; this.stateChangeHandler = stateChangeHandler; this.systemStateBroadcaster = systemStateBroadcaster; this.stateVersionTracker = new StateVersionTracker(options.minMergeCompletionRatio); this.metricUpdater = metricUpdater; this.statusPageServer = statusPage; this.rpcServer = server; this.masterElectionHandler = masterElectionHandler; this.statusRequestRouter.addHandler( "^/node=([a-z]+)\\.(\\d+)$", new LegacyNodePageRequestHandler(timer, eventLog, cluster)); this.statusRequestRouter.addHandler( "^/state.*", new NodeHealthRequestHandler()); this.statusRequestRouter.addHandler( "^/clusterstate", new ClusterStateRequestHandler(stateVersionTracker)); this.statusRequestRouter.addHandler( "^/$", new LegacyIndexPageRequestHandler(timer, cluster, masterElectionHandler, stateVersionTracker, eventLog, timer.getCurrentTimeInMillis(), dataExtractor)); propagateOptions(); } public static FleetController create(FleetControllerOptions options, StatusPageServerInterface statusPageServer, MetricReporter metricReporter) throws Exception { var context = new FleetControllerContextImpl(options); var timer = new RealTimer(); var metricUpdater = new MetricUpdater(metricReporter, options.fleetControllerIndex, options.clusterName); var log = new EventLog(timer, metricUpdater); var cluster = new ContentCluster( options.clusterName, options.nodes, options.storageDistribution); var stateGatherer = new NodeStateGatherer(timer, timer, log); var communicator = new RPCCommunicator( RPCCommunicator.createRealSupervisor(), timer, options.fleetControllerIndex, options.nodeStateRequestTimeoutMS, options.nodeStateRequestTimeoutEarliestPercentage, options.nodeStateRequestTimeoutLatestPercentage, options.nodeStateRequestRoundTripTimeMaxSeconds); var database = new DatabaseHandler(context, new ZooKeeperDatabaseFactory(context), timer, options.zooKeeperServerAddress, timer); var lookUp = new SlobrokClient(context, timer); var stateGenerator = new StateChangeHandler(context, timer, log); var stateBroadcaster = new SystemStateBroadcaster(context, timer, timer); var masterElectionHandler = new MasterElectionHandler(context, options.fleetControllerIndex, options.fleetControllerCount, timer, timer); var controller = new FleetController(context, timer, log, cluster, stateGatherer, communicator, statusPageServer, null, lookUp, database, stateGenerator, stateBroadcaster, masterElectionHandler, metricUpdater, options); controller.start(); return controller; } public void start() { runner = new Thread(this); runner.start(); } public Object getMonitor() { return monitor; } public boolean isRunning() { return running.get(); } public boolean isMaster() { synchronized (monitor) { return isMaster; } } public ClusterState getClusterState() { synchronized (monitor) { return systemStateBroadcaster.getClusterState(); } } public ClusterStateBundle getClusterStateBundle() { synchronized (monitor) { return systemStateBroadcaster.getClusterStateBundle(); } } public void schedule(RemoteClusterControllerTask task) { synchronized (monitor) { context.log(logger, Level.FINE, "Scheduled remote task " + task.getClass().getName() + " for execution"); remoteTasks.add(task); } } /** Used for unit testing. */ public void addSystemStateListener(SystemStateListener listener) { systemStateListeners.add(listener); com.yahoo.vdslib.state.ClusterState state = getSystemState(); if (state == null) { throw new NullPointerException("Cluster state should never be null at this point"); } listener.handleNewPublishedState(ClusterStateBundle.ofBaselineOnly(AnnotatedClusterState.withoutAnnotations(state))); ClusterStateBundle convergedState = systemStateBroadcaster.getLastClusterStateBundleConverged(); if (convergedState != null) { listener.handleStateConvergedInCluster(convergedState); } } public FleetControllerOptions getOptions() { synchronized(monitor) { return options.clone(); } } public NodeState getReportedNodeState(Node n) { synchronized(monitor) { NodeInfo node = cluster.getNodeInfo(n); if (node == null) { throw new IllegalStateException("Did not find node " + n + " in cluster " + cluster); } return node.getReportedState(); } } public NodeState getWantedNodeState(Node n) { synchronized(monitor) { return cluster.getNodeInfo(n).getWantedState(); } } public com.yahoo.vdslib.state.ClusterState getSystemState() { synchronized(monitor) { return stateVersionTracker.getVersionedClusterState(); } } public int getRpcPort() { return rpcServer.getPort(); } public void shutdown() throws InterruptedException, java.io.IOException { if (runner != null && isRunning()) { context.log(logger, Level.INFO, "Joining event thread."); running.set(false); synchronized(monitor) { monitor.notifyAll(); } runner.join(); } context.log(logger, Level.INFO, "FleetController done shutting down event thread."); controllerThreadId = Thread.currentThread().getId(); database.shutdown(databaseContext); if (statusPageServer != null) { statusPageServer.shutdown(); } if (rpcServer != null) { rpcServer.shutdown(); } communicator.shutdown(); nodeLookup.shutdown(); } public void updateOptions(FleetControllerOptions options) { var newId = FleetControllerId.fromOptions(options); synchronized(monitor) { assert newId.equals(context.id()); context.log(logger, Level.INFO, "FleetController has new options"); nextOptions = options.clone(); monitor.notifyAll(); } } private void verifyInControllerThread() { if (controllerThreadId != null && controllerThreadId != Thread.currentThread().getId()) { throw new IllegalStateException("Function called from non-controller thread. Shouldn't happen."); } } private ClusterState latestCandidateClusterState() { return stateVersionTracker.getLatestCandidateState().getClusterState(); } @Override public void handleNewNodeState(NodeInfo node, NodeState newState) { verifyInControllerThread(); stateChangeHandler.handleNewReportedNodeState(latestCandidateClusterState(), node, newState, this); } @Override public void handleNewWantedNodeState(NodeInfo node, NodeState newState) { verifyInControllerThread(); wantedStateChanged = true; stateChangeHandler.proposeNewNodeState(stateVersionTracker.getVersionedClusterState(), node, newState); } @Override public void handleRemovedNode(Node node) { verifyInControllerThread(); wantedStateChanged = true; } @Override public void handleUpdatedHostInfo(NodeInfo nodeInfo, HostInfo newHostInfo) { verifyInControllerThread(); triggerBundleRecomputationIfResourceExhaustionStateChanged(nodeInfo, newHostInfo); stateVersionTracker.handleUpdatedHostInfo(nodeInfo, newHostInfo); } private void triggerBundleRecomputationIfResourceExhaustionStateChanged(NodeInfo nodeInfo, HostInfo newHostInfo) { if (!options.clusterFeedBlockEnabled) { return; } var calc = createResourceExhaustionCalculator(); var previouslyExhausted = calc.enumerateNodeResourceExhaustions(nodeInfo); var nowExhausted = calc.resourceExhaustionsFromHostInfo(nodeInfo, newHostInfo); if (!previouslyExhausted.equals(nowExhausted)) { context.log(logger, Level.FINE, () -> String.format("Triggering state recomputation due to change in cluster feed block: %s -> %s", previouslyExhausted, nowExhausted)); stateChangeHandler.setStateChangedFlag(); } } @Override public void handleNewNode(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleNewNode(node); } @Override public void handleMissingNode(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleMissingNode(stateVersionTracker.getVersionedClusterState(), node, this); } @Override public void handleNewRpcAddress(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleNewRpcAddress(node); } @Override public void handleReturnedRpcAddress(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleReturnedRpcAddress(node); } @Override public void handleNewPublishedState(ClusterStateBundle stateBundle) { verifyInControllerThread(); ClusterState baselineState = stateBundle.getBaselineClusterState(); newStates.add(stateBundle); metricUpdater.updateClusterStateMetrics(cluster, baselineState, ResourceUsageStats.calculateFrom(cluster.getNodeInfos(), options.clusterFeedBlockLimit, stateBundle.getFeedBlock())); lastMetricUpdateCycleCount = cycleCount; systemStateBroadcaster.handleNewClusterStates(stateBundle); if (isMaster) { storeClusterStateMetaDataToZooKeeper(stateBundle); } } private boolean maybePublishOldMetrics() { verifyInControllerThread(); if (isMaster() && cycleCount > 300 + lastMetricUpdateCycleCount) { ClusterStateBundle stateBundle = stateVersionTracker.getVersionedClusterStateBundle(); ClusterState baselineState = stateBundle.getBaselineClusterState(); metricUpdater.updateClusterStateMetrics(cluster, baselineState, ResourceUsageStats.calculateFrom(cluster.getNodeInfos(), options.clusterFeedBlockLimit, stateBundle.getFeedBlock())); lastMetricUpdateCycleCount = cycleCount; return true; } else { return false; } } private void storeClusterStateMetaDataToZooKeeper(ClusterStateBundle stateBundle) { try { database.saveLatestSystemStateVersion(databaseContext, stateBundle.getVersion()); database.saveLatestClusterStateBundle(databaseContext, stateBundle); } catch (InterruptedException e) { throw new RuntimeException("ZooKeeper write interrupted", e); } } /** * This function gives data of the current state in master election. * The keys in the given map are indices of fleet controllers. * The values are what fleetcontroller that fleetcontroller wants to * become master. * * If more than half the fleetcontrollers want a node to be master and * that node also wants itself as master, that node is the single master. * If this condition is not met, there is currently no master. */ public void handleFleetData(Map<Integer, Integer> data) { verifyInControllerThread(); context.log(logger, Level.FINEST, "Sending fleet data event on to master election handler"); metricUpdater.updateMasterElectionMetrics(data); masterElectionHandler.handleFleetData(data); } /** * Called when we can no longer contact database. */ public void lostDatabaseConnection() { verifyInControllerThread(); boolean wasMaster = isMaster; masterElectionHandler.lostDatabaseConnection(); if (wasMaster) { dropLeadershipState(); metricUpdater.updateMasterState(false); } } private void failAllVersionDependentTasks() { tasksPendingStateRecompute.forEach(task -> { task.handleFailure(RemoteClusterControllerTask.Failure.of( RemoteClusterControllerTask.FailureCondition.LEADERSHIP_LOST)); task.notifyCompleted(); }); tasksPendingStateRecompute.clear(); taskCompletionQueue.forEach(task -> { task.getTask().handleFailure(RemoteClusterControllerTask.Failure.of( RemoteClusterControllerTask.FailureCondition.LEADERSHIP_LOST)); task.getTask().notifyCompleted(); }); taskCompletionQueue.clear(); } /** Called when all distributors have acked newest cluster state version. */ public void handleAllDistributorsInSync(DatabaseHandler database, DatabaseHandler.DatabaseContext dbContext) throws InterruptedException { Set<ConfiguredNode> nodes = new HashSet<>(cluster.clusterInfo().getConfiguredNodes().values()); var currentBundle = stateVersionTracker.getVersionedClusterStateBundle(); context.log(logger, Level.FINE, () -> String.format("All distributors have ACKed cluster state version %d", currentBundle.getVersion())); stateChangeHandler.handleAllDistributorsInSync(currentBundle.getBaselineClusterState(), nodes, database, dbContext); convergedStates.add(currentBundle); } private boolean changesConfiguredNodeSet(Collection<ConfiguredNode> newNodes) { if (newNodes.size() != cluster.getConfiguredNodes().size()) return true; if (! cluster.getConfiguredNodes().values().containsAll(newNodes)) return true; for (ConfiguredNode node : newNodes) { if (node.retired() != cluster.getConfiguredNodes().get(node.index()).retired()) { return true; } } return false; } /** This is called when the options field has been set to a new set of options */ private void propagateOptions() { verifyInControllerThread(); selfTerminateIfConfiguredNodeIndexHasChanged(); if (changesConfiguredNodeSet(options.nodes)) { cluster.setSlobrokGenerationCount(0); } configuredBucketSpaces = Set.of(FixedBucketSpaces.defaultSpace(), FixedBucketSpaces.globalSpace()); stateVersionTracker.setMinMergeCompletionRatio(options.minMergeCompletionRatio); communicator.propagateOptions(options); if (nodeLookup instanceof SlobrokClient) { ((SlobrokClient) nodeLookup).setSlobrokConnectionSpecs(options.slobrokConnectionSpecs); } eventLog.setMaxSize(options.eventLogMaxSize, options.eventNodeLogMaxSize); cluster.setPollingFrequency(options.statePollingFrequency); cluster.setDistribution(options.storageDistribution); cluster.setNodes(options.nodes, databaseContext.getNodeStateUpdateListener()); database.setZooKeeperAddress(options.zooKeeperServerAddress, databaseContext); database.setZooKeeperSessionTimeout(options.zooKeeperSessionTimeout, databaseContext); stateGatherer.setMaxSlobrokDisconnectGracePeriod(options.maxSlobrokDisconnectGracePeriod); stateGatherer.setNodeStateRequestTimeout(options.nodeStateRequestTimeoutMS); stateChangeHandler.reconfigureFromOptions(options); stateChangeHandler.setStateChangedFlag(); masterElectionHandler.setFleetControllerCount(options.fleetControllerCount); masterElectionHandler.setMasterZooKeeperCooldownPeriod(options.masterZooKeeperCooldownPeriod); masterElectionHandler.setUsingZooKeeper(options.zooKeeperServerAddress != null && !options.zooKeeperServerAddress.isEmpty()); if (rpcServer != null) { rpcServer.setMasterElectionHandler(masterElectionHandler); try{ rpcServer.setSlobrokConnectionSpecs(options.slobrokConnectionSpecs, options.rpcPort); } catch (ListenFailedException e) { context.log(logger, Level.WARNING, "Failed to bind RPC server to port " + options.rpcPort + ". This may be natural if cluster has altered the services running on this node: " + e.getMessage()); } catch (Exception e) { context.log(logger, Level.WARNING, "Failed to initialize RPC server socket: " + e.getMessage()); } } if (statusPageServer != null) { try{ statusPageServer.setPort(options.httpPort); } catch (Exception e) { context.log(logger, Level.WARNING, "Failed to initialize status server socket. This may be natural if cluster has altered the services running on this node: " + e.getMessage()); } } long currentTime = timer.getCurrentTimeInMillis(); nextStateSendTime = Math.min(currentTime + options.minTimeBetweenNewSystemStates, nextStateSendTime); } private void selfTerminateIfConfiguredNodeIndexHasChanged() { var newId = new FleetControllerId(options.clusterName, options.fleetControllerIndex); if (!newId.equals(context.id())) { context.log(logger, Level.WARNING, context.id() + " got new configuration for " + newId + ". We do not support doing this live; " + "immediately exiting now to force new configuration"); prepareShutdownEdge(); System.exit(1); } } public StatusPageResponse fetchStatusPage(StatusPageServer.HttpRequest httpRequest) { verifyInControllerThread(); StatusPageResponse.ResponseCode responseCode; String message; final String hiddenMessage; try { StatusPageServer.RequestHandler handler = statusRequestRouter.resolveHandler(httpRequest); if (handler == null) { throw new FileNotFoundException("No handler found for request: " + httpRequest.getPath()); } return handler.handle(httpRequest); } catch (FileNotFoundException e) { responseCode = StatusPageResponse.ResponseCode.NOT_FOUND; message = e.getMessage(); hiddenMessage = ""; } catch (Exception e) { responseCode = StatusPageResponse.ResponseCode.INTERNAL_SERVER_ERROR; message = "Internal Server Error"; hiddenMessage = ExceptionUtils.getStackTraceAsString(e); context.log(logger, Level.FINE, () -> "Unknown exception thrown for request " + httpRequest.getRequest() + ": " + hiddenMessage); } TimeZone tz = TimeZone.getTimeZone("UTC"); long currentTime = timer.getCurrentTimeInMillis(); StatusPageResponse response = new StatusPageResponse(); StringBuilder content = new StringBuilder(); response.setContentType("text/html"); response.setResponseCode(responseCode); content.append("<!-- Answer to request " + httpRequest.getRequest() + " -->\n"); content.append("<p>UTC time when creating this page: ").append(RealTimer.printDateNoMilliSeconds(currentTime, tz)).append("</p>"); response.writeHtmlHeader(content, message); response.writeHtmlFooter(content, hiddenMessage); response.writeContent(content.toString()); return response; } public void tick() throws Exception { synchronized (monitor) { boolean didWork; didWork = metricUpdater.forWork("doNextZooKeeperTask", () -> database.doNextZooKeeperTask(databaseContext)); didWork |= metricUpdater.forWork("updateMasterElectionState", this::updateMasterElectionState); didWork |= metricUpdater.forWork("handleLeadershipEdgeTransitions", this::handleLeadershipEdgeTransitions); stateChangeHandler.setMaster(isMaster); if ( ! isRunning()) { return; } didWork |= metricUpdater.forWork("stateGatherer-processResponses", () -> stateGatherer.processResponses(this)); if ( ! isRunning()) { return; } if (masterElectionHandler.isAmongNthFirst(options.stateGatherCount)) { didWork |= resyncLocallyCachedState(); } else { stepDownAsStateGatherer(); } if ( ! isRunning()) { return; } didWork |= metricUpdater.forWork("systemStateBroadcaster-processResponses", systemStateBroadcaster::processResponses); if ( ! isRunning()) { return; } if (isMaster) { didWork |= metricUpdater.forWork("broadcastClusterStateToEligibleNodes", this::broadcastClusterStateToEligibleNodes); systemStateBroadcaster.checkIfClusterStateIsAckedByAllDistributors(database, databaseContext, this); } if ( ! isRunning()) { return; } didWork |= metricUpdater.forWork("processAnyPendingStatusPageRequest", this::processAnyPendingStatusPageRequest); if ( ! isRunning()) { return; } if (rpcServer != null) { didWork |= metricUpdater.forWork("handleRpcRequests", () -> rpcServer.handleRpcRequests(cluster, consolidatedClusterState(), this)); } if ( ! isRunning()) { return; } didWork |= metricUpdater.forWork("processNextQueuedRemoteTask", this::processNextQueuedRemoteTask); didWork |= metricUpdater.forWork("completeSatisfiedVersionDependentTasks", this::completeSatisfiedVersionDependentTasks); didWork |= metricUpdater.forWork("maybePublishOldMetrics", this::maybePublishOldMetrics); processingCycle = false; ++cycleCount; long tickStopTime = timer.getCurrentTimeInMillis(); if (tickStopTime >= tickStartTime) { metricUpdater.addTickTime(tickStopTime - tickStartTime, didWork); } monitor.wait(didWork || waitingForCycle ? 1 : options.cycleWaitTime); if ( ! isRunning()) { return; } tickStartTime = timer.getCurrentTimeInMillis(); processingCycle = true; if (nextOptions != null) { switchToNewConfig(); } } if (isRunning()) { propagateNewStatesToListeners(); } } private boolean updateMasterElectionState() { try { return masterElectionHandler.watchMasterElection(database, databaseContext); } catch (InterruptedException e) { throw new RuntimeException(e); } catch (Exception e) { context.log(logger, Level.WARNING, "Failed to watch master election: " + e); } return false; } private void stepDownAsStateGatherer() { if (isStateGatherer) { cluster.clearStates(); eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer a node state gatherer.", timer.getCurrentTimeInMillis())); } isStateGatherer = false; } private void switchToNewConfig() { options = nextOptions; nextOptions = null; try { propagateOptions(); } catch (Exception e) { context.log(logger, Level.SEVERE, "Failed to handle new fleet controller config", e); } } private boolean processAnyPendingStatusPageRequest() { if (statusPageServer != null) { StatusPageServer.HttpRequest statusRequest = statusPageServer.getCurrentHttpRequest(); if (statusRequest != null) { statusPageServer.answerCurrentStatusRequest(fetchStatusPage(statusRequest)); return true; } } return false; } private boolean broadcastClusterStateToEligibleNodes() { if (database.hasPendingClusterStateMetaDataStore()) { context.log(logger, Level.FINE, "Can't publish current cluster state as it has one or more pending ZooKeeper stores"); return false; } boolean sentAny = false; long currentTime = timer.getCurrentTimeInMillis(); if ((currentTime >= firstAllowedStateBroadcast || cluster.allStatesReported()) && currentTime >= nextStateSendTime) { if (inMasterMoratorium) { context.log(logger, Level.INFO, currentTime < firstAllowedStateBroadcast ? "Master moratorium complete: all nodes have reported in" : "Master moratorium complete: timed out waiting for all nodes to report in"); firstAllowedStateBroadcast = currentTime; inMasterMoratorium = false; } sentAny = systemStateBroadcaster.broadcastNewStateBundleIfRequired( databaseContext, communicator, database.getLastKnownStateBundleVersionWrittenBySelf()); if (sentAny) { nextStateSendTime = currentTime + options.minTimeBetweenNewSystemStates; } } sentAny |= systemStateBroadcaster.broadcastStateActivationsIfRequired(databaseContext, communicator); return sentAny; } private void propagateNewStatesToListeners() { if ( ! newStates.isEmpty()) { synchronized (systemStateListeners) { for (ClusterStateBundle stateBundle : newStates) { for (SystemStateListener listener : systemStateListeners) { listener.handleNewPublishedState(stateBundle); } } newStates.clear(); } } if ( ! convergedStates.isEmpty()) { synchronized (systemStateListeners) { for (ClusterStateBundle stateBundle : convergedStates) { for (SystemStateListener listener : systemStateListeners) { listener.handleStateConvergedInCluster(stateBundle); } } convergedStates.clear(); } } } private boolean processNextQueuedRemoteTask() { metricUpdater.updateRemoteTaskQueueSize(remoteTasks.size()); RemoteClusterControllerTask task = remoteTasks.poll(); if (task == null) { return false; } final RemoteClusterControllerTask.Context taskContext = createRemoteTaskProcessingContext(); context.log(logger, Level.FINEST, () -> String.format("Processing remote task of type '%s'", task.getClass().getName())); task.doRemoteFleetControllerTask(taskContext); if (taskMayBeCompletedImmediately(task)) { context.log(logger, Level.FINEST, () -> String.format("Done processing remote task of type '%s'", task.getClass().getName())); task.notifyCompleted(); } else { context.log(logger, Level.FINEST, () -> String.format("Remote task of type '%s' queued until state recomputation", task.getClass().getName())); tasksPendingStateRecompute.add(task); } return true; } private boolean taskMayBeCompletedImmediately(RemoteClusterControllerTask task) { return (!task.hasVersionAckDependency() || task.isFailed() || !isMaster); } private RemoteClusterControllerTask.Context createRemoteTaskProcessingContext() { final RemoteClusterControllerTask.Context context = new RemoteClusterControllerTask.Context(); context.cluster = cluster; context.currentConsolidatedState = consolidatedClusterState(); context.publishedClusterStateBundle = stateVersionTracker.getVersionedClusterStateBundle(); context.masterInfo = new MasterInterface() { @Override public boolean isMaster() { return isMaster; } @Override public Integer getMaster() { return masterElectionHandler.getMaster(); } @Override public boolean inMasterMoratorium() { return inMasterMoratorium; } }; context.nodeListener = this; context.slobrokListener = this; return context; } private static long effectiveActivatedStateVersion(NodeInfo nodeInfo, ClusterStateBundle bundle) { return bundle.deferredActivation() ? nodeInfo.getClusterStateVersionActivationAcked() : nodeInfo.getClusterStateVersionBundleAcknowledged(); } private List<Node> enumerateNodesNotYetAckedAtLeastVersion(long version) { var bundle = systemStateBroadcaster.getClusterStateBundle(); if (bundle == null) { return List.of(); } return cluster.getNodeInfos().stream(). filter(n -> effectiveActivatedStateVersion(n, bundle) < version). map(NodeInfo::getNode). collect(Collectors.toList()); } private static <E> String stringifyListWithLimits(List<E> list, int limit) { if (list.size() > limit) { var sub = list.subList(0, limit); return String.format("%s (... and %d more)", sub.stream().map(E::toString).collect(Collectors.joining(", ")), list.size() - limit); } else { return list.stream().map(E::toString).collect(Collectors.joining(", ")); } } private String buildNodesNotYetConvergedMessage(long taskConvergeVersion) { var nodes = enumerateNodesNotYetAckedAtLeastVersion(taskConvergeVersion); if (nodes.isEmpty()) { return ""; } return String.format("the following nodes have not converged to at least version %d: %s", taskConvergeVersion, stringifyListWithLimits(nodes, options.maxDivergentNodesPrintedInTaskErrorMessages)); } private boolean completeSatisfiedVersionDependentTasks() { int publishedVersion = systemStateBroadcaster.lastClusterStateVersionInSync(); long queueSizeBefore = taskCompletionQueue.size(); final long now = timer.getCurrentTimeInMillis(); while (!taskCompletionQueue.isEmpty()) { VersionDependentTaskCompletion taskCompletion = taskCompletionQueue.peek(); if (publishedVersion >= taskCompletion.getMinimumVersion()) { context.log(logger, Level.FINE, () -> String.format("Deferred task of type '%s' has minimum version %d, published is %d; completing", taskCompletion.getTask().getClass().getName(), taskCompletion.getMinimumVersion(), publishedVersion)); taskCompletion.getTask().notifyCompleted(); taskCompletionQueue.remove(); } else if (taskCompletion.getDeadlineTimePointMs() <= now) { var details = buildNodesNotYetConvergedMessage(taskCompletion.getMinimumVersion()); context.log(logger, Level.WARNING, () -> String.format("Deferred task of type '%s' has exceeded wait deadline; completing with failure (details: %s)", taskCompletion.getTask().getClass().getName(), details)); taskCompletion.getTask().handleFailure(RemoteClusterControllerTask.Failure.of( RemoteClusterControllerTask.FailureCondition.DEADLINE_EXCEEDED, details)); taskCompletion.getTask().notifyCompleted(); taskCompletionQueue.remove(); } else { break; } } return (taskCompletionQueue.size() != queueSizeBefore); } /** * A "consolidated" cluster state is guaranteed to have up-to-date information on which nodes are * up or down even when the whole cluster is down. The regular, published cluster state is not * normally updated to reflect node events when the cluster is down. */ ClusterState consolidatedClusterState() { final ClusterState publishedState = stateVersionTracker.getVersionedClusterState(); if (publishedState.getClusterState() == State.UP) { return publishedState; } final ClusterState current = stateVersionTracker.getLatestCandidateState().getClusterState().clone(); current.setVersion(publishedState.getVersion()); return current; } /* System test observations: - a node that stops normally (U -> S) then goes down erroneously triggers premature crash handling - long time before content node state convergence (though this seems to be the case for legacy impl as well) */ private boolean resyncLocallyCachedState() { boolean didWork = false; if ( ! isMaster && cycleCount % 100 == 0) { didWork = metricUpdater.forWork("loadWantedStates", () -> database.loadWantedStates(databaseContext)); didWork |= metricUpdater.forWork("loadStartTimestamps", () -> database.loadStartTimestamps(cluster)); } didWork |= metricUpdater.forWork("updateCluster", () -> nodeLookup.updateCluster(cluster, this)); didWork |= metricUpdater.forWork("sendMessages", () -> stateGatherer.sendMessages(cluster, communicator, this)); didWork |= metricUpdater.forWork( "watchTimers", () -> stateChangeHandler.watchTimers(cluster, stateVersionTracker.getLatestCandidateState().getClusterState(), this)); didWork |= metricUpdater.forWork("recomputeClusterStateIfRequired", this::recomputeClusterStateIfRequired); if ( ! isStateGatherer) { if ( ! isMaster) { eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became node state gatherer as we are fleetcontroller master candidate.", timer.getCurrentTimeInMillis())); stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion()); stateChangeHandler.setStateChangedFlag(); } } isStateGatherer = true; return didWork; } private void invokeCandidateStateListeners(ClusterStateBundle candidateBundle) { systemStateListeners.forEach(listener -> listener.handleNewCandidateState(candidateBundle)); } private boolean hasPassedFirstStateBroadcastTimePoint(long timeNowMs) { return timeNowMs >= firstAllowedStateBroadcast || cluster.allStatesReported(); } private boolean recomputeClusterStateIfRequired() { boolean stateWasChanged = false; if (mustRecomputeCandidateClusterState()) { stateChangeHandler.unsetStateChangedFlag(); final AnnotatedClusterState candidate = computeCurrentAnnotatedState(); final ClusterStateBundle candidateBundle = ClusterStateBundle.builder(candidate) .bucketSpaces(configuredBucketSpaces) .stateDeriver(createBucketSpaceStateDeriver()) .deferredActivation(options.enableTwoPhaseClusterStateActivation) .feedBlock(createResourceExhaustionCalculator() .inferContentClusterFeedBlockOrNull(cluster.getNodeInfos())) .deriveAndBuild(); stateVersionTracker.updateLatestCandidateStateBundle(candidateBundle); invokeCandidateStateListeners(candidateBundle); final long timeNowMs = timer.getCurrentTimeInMillis(); if (hasPassedFirstStateBroadcastTimePoint(timeNowMs) && (stateVersionTracker.candidateChangedEnoughFromCurrentToWarrantPublish() || stateVersionTracker.hasReceivedNewVersionFromZooKeeper())) { final ClusterStateBundle before = stateVersionTracker.getVersionedClusterStateBundle(); stateVersionTracker.promoteCandidateToVersionedState(timeNowMs); emitEventsForAlteredStateEdges(before, stateVersionTracker.getVersionedClusterStateBundle(), timeNowMs); handleNewPublishedState(stateVersionTracker.getVersionedClusterStateBundle()); stateWasChanged = true; } } /* * This works transparently for tasks that end up changing the current cluster state (i.e. * requiring a new state to be published) and for those whose changes are no-ops (because * the changes they request are already part of the current state). In the former case the * tasks will depend on the version that was generated based upon them. In the latter case * the tasks will depend on the version that is already published (or in the process of * being published). */ scheduleVersionDependentTasksForFutureCompletion(stateVersionTracker.getCurrentVersion()); return stateWasChanged; } private ClusterStateDeriver createBucketSpaceStateDeriver() { if (options.clusterHasGlobalDocumentTypes) { return new MaintenanceWhenPendingGlobalMerges(stateVersionTracker.createMergePendingChecker(), createDefaultSpaceMaintenanceTransitionConstraint()); } else { return createIdentityClonedBucketSpaceStateDeriver(); } } private ResourceExhaustionCalculator createResourceExhaustionCalculator() { return new ResourceExhaustionCalculator( options.clusterFeedBlockEnabled, options.clusterFeedBlockLimit, stateVersionTracker.getLatestCandidateStateBundle().getFeedBlockOrNull(), options.clusterFeedBlockNoiseLevel); } private static ClusterStateDeriver createIdentityClonedBucketSpaceStateDeriver() { return (state, space) -> state.clone(); } private MaintenanceTransitionConstraint createDefaultSpaceMaintenanceTransitionConstraint() { AnnotatedClusterState currentDefaultSpaceState = stateVersionTracker.getVersionedClusterStateBundle() .getDerivedBucketSpaceStates().getOrDefault(FixedBucketSpaces.defaultSpace(), AnnotatedClusterState.emptyState()); return UpEdgeMaintenanceTransitionConstraint.forPreviouslyPublishedState(currentDefaultSpaceState.getClusterState()); } /** * Move tasks that are dependent on the most recently generated state being published into * a completion queue with a dependency on the provided version argument. Once that version * has been ACKed by all distributors in the system, those tasks will be marked as completed. */ private void scheduleVersionDependentTasksForFutureCompletion(int completeAtVersion) { final long maxDeadlineTimePointMs = timer.getCurrentTimeInMillis() + options.getMaxDeferredTaskVersionWaitTime().toMillis(); for (RemoteClusterControllerTask task : tasksPendingStateRecompute) { context.log(logger, Level.INFO, task + " will be completed at version " + completeAtVersion); taskCompletionQueue.add(new VersionDependentTaskCompletion(completeAtVersion, task, maxDeadlineTimePointMs)); } tasksPendingStateRecompute.clear(); } private AnnotatedClusterState computeCurrentAnnotatedState() { ClusterStateGenerator.Params params = ClusterStateGenerator.Params.fromOptions(options); params.currentTimeInMilllis(timer.getCurrentTimeInMillis()) .cluster(cluster) .lowestObservedDistributionBitCount(stateVersionTracker.getLowestObservedDistributionBits()); return ClusterStateGenerator.generatedStateFrom(params); } private void emitEventsForAlteredStateEdges(final ClusterStateBundle fromState, final ClusterStateBundle toState, final long timeNowMs) { final List<Event> deltaEvents = EventDiffCalculator.computeEventDiff( EventDiffCalculator.params() .cluster(cluster) .fromState(fromState) .toState(toState) .currentTimeMs(timeNowMs) .maxMaintenanceGracePeriodTimeMs(options.storageNodeMaxTransitionTimeMs())); for (Event event : deltaEvents) { eventLog.add(event, isMaster); } emitStateAppliedEvents(timeNowMs, fromState.getBaselineClusterState(), toState.getBaselineClusterState()); } private void emitStateAppliedEvents(long timeNowMs, ClusterState fromClusterState, ClusterState toClusterState) { eventLog.add(new ClusterEvent( ClusterEvent.Type.SYSTEMSTATE, "New cluster state version " + toClusterState.getVersion() + ". Change from last: " + fromClusterState.getTextualDifference(toClusterState), timeNowMs), isMaster); if (toClusterState.getDistributionBitCount() != fromClusterState.getDistributionBitCount()) { eventLog.add(new ClusterEvent( ClusterEvent.Type.SYSTEMSTATE, "Altering distribution bits in system from " + fromClusterState.getDistributionBitCount() + " to " + toClusterState.getDistributionBitCount(), timeNowMs), isMaster); } } private boolean atFirstClusterStateSendTimeEdge() { if (!isMaster || systemStateBroadcaster.hasBroadcastedClusterStateBundle()) { return false; } return hasPassedFirstStateBroadcastTimePoint(timer.getCurrentTimeInMillis()); } private boolean mustRecomputeCandidateClusterState() { return stateChangeHandler.stateMayHaveChanged() || stateVersionTracker.bucketSpaceMergeCompletionStateHasChanged() || atFirstClusterStateSendTimeEdge(); } private boolean handleLeadershipEdgeTransitions() { boolean didWork = false; if (masterElectionHandler.isMaster()) { if ( ! isMaster) { stateChangeHandler.setStateChangedFlag(); systemStateBroadcaster.resetBroadcastedClusterStateBundle(); stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion()); ClusterStateBundle previousBundle = database.getLatestClusterStateBundle(); database.loadStartTimestamps(cluster); database.loadWantedStates(databaseContext); context.log(logger, Level.INFO, () -> String.format("Loaded previous cluster state bundle from ZooKeeper: %s", previousBundle)); stateVersionTracker.setClusterStateBundleRetrievedFromZooKeeper(previousBundle); eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became fleetcontroller master. Bumped version to " + stateVersionTracker.getCurrentVersion() + " to be in line.", timer.getCurrentTimeInMillis())); long currentTime = timer.getCurrentTimeInMillis(); firstAllowedStateBroadcast = currentTime + options.minTimeBeforeFirstSystemStateBroadcast; isMaster = true; inMasterMoratorium = true; context.log(logger, Level.FINE, () -> "At time " + currentTime + " we set first system state broadcast time to be " + options.minTimeBeforeFirstSystemStateBroadcast + " ms after at time " + firstAllowedStateBroadcast + "."); didWork = true; } if (wantedStateChanged) { didWork |= database.saveWantedStates(databaseContext); wantedStateChanged = false; } } else { dropLeadershipState(); } metricUpdater.updateMasterState(isMaster); return didWork; } private void dropLeadershipState() { if (isMaster) { eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer fleetcontroller master.", timer.getCurrentTimeInMillis())); firstAllowedStateBroadcast = Long.MAX_VALUE; failAllVersionDependentTasks(); } wantedStateChanged = false; isMaster = false; inMasterMoratorium = false; } @Override public void run() { controllerThreadId = Thread.currentThread().getId(); context.log(logger, Level.INFO, "Starting tick loop"); try { processingCycle = true; while (isRunning()) { tick(); } context.log(logger, Level.INFO, "Tick loop stopped"); } catch (InterruptedException e) { context.log(logger, Level.INFO, "Event thread stopped by interrupt exception: ", e); } catch (Throwable t) { t.printStackTrace(); context.log(logger, Level.SEVERE, "Fatal error killed fleet controller", t); synchronized (monitor) { running.set(false); } System.exit(1); } finally { prepareShutdownEdge(); } } private void prepareShutdownEdge() { running.set(false); failAllVersionDependentTasks(); synchronized (monitor) { monitor.notifyAll(); } } public DatabaseHandler.DatabaseContext databaseContext = new DatabaseHandler.DatabaseContext() { @Override public ContentCluster getCluster() { return cluster; } @Override public FleetController getFleetController() { return FleetController.this; } @Override public SlobrokListener getNodeAddedOrRemovedListener() { return FleetController.this; } @Override public NodeListener getNodeStateUpdateListener() { return FleetController.this; } }; public void waitForCompleteCycle(Duration timeout) { Instant endTime = Instant.now().plus(timeout); synchronized (monitor) { long wantedCycle = cycleCount + (processingCycle ? 2 : 1); waitingForCycle = true; try{ while (cycleCount < wantedCycle) { if (Instant.now().isAfter(endTime)) throw new IllegalStateException("Timed out waiting for cycle to complete. Not completed after " + timeout); if ( !isRunning() ) throw new IllegalStateException("Fleetcontroller not running. Will never complete cycles"); try{ monitor.wait(100); } catch (InterruptedException e) {} } } finally { waitingForCycle = false; } } } /** * This function might not be 100% threadsafe, as in theory cluster can be changing while accessed. * But it is only used in unit tests that should not trigger any thread issues. Don't want to add locks that reduce * live performance to remove a non-problem. */ public void waitForNodesInSlobrok(int distNodeCount, int storNodeCount, Duration timeout) throws InterruptedException { Instant endTime = Instant.now().plus(timeout); synchronized (monitor) { while (true) { int distCount = 0, storCount = 0; for (NodeInfo info : cluster.getNodeInfos()) { if (!info.isRpcAddressOutdated()) { if (info.isDistributor()) ++distCount; else ++storCount; } } if (distCount == distNodeCount && storCount == storNodeCount) return; if (Instant.now().isAfter(endTime)) { throw new IllegalStateException("Did not get all " + distNodeCount + " distributors and " + storNodeCount + " storage nodes registered in slobrok within timeout of " + timeout + ". (Got " + distCount + " distributors and " + storCount + " storage nodes)"); } monitor.wait(10); } } } public boolean hasZookeeperConnection() { return !database.isClosed(); } public int getSlobrokMirrorUpdates() { return ((SlobrokClient)nodeLookup).getMirror().updates(); } public ContentCluster getCluster() { return cluster; } public List<NodeEvent> getNodeEvents(Node n) { return eventLog.getNodeEvents(n); } public EventLog getEventLog() { return eventLog; } }
Missing space between text end and URL
private void validateMajorVersion(TenantAndApplicationId id, Submission submission) { submission.applicationPackage().deploymentSpec().majorVersion().ifPresent(explicitMajor -> { if (explicitMajor < 8) controller.notificationsDb().setNotification(NotificationSource.from(id), Type.applicationPackage, Notification.Level.warning, "Vespa 7 will soon be end of life, upgrade to Vespa 8 now:" + "https: else controller.notificationsDb().removeNotification(NotificationSource.from(id), Type.applicationPackage); }); }
"Vespa 7 will soon be end of life, upgrade to Vespa 8 now:" +
private void validateMajorVersion(TenantAndApplicationId id, Submission submission) { submission.applicationPackage().deploymentSpec().majorVersion().ifPresent(explicitMajor -> { if (explicitMajor < 8) controller.notificationsDb().setNotification(NotificationSource.from(id), Type.applicationPackage, Notification.Level.warning, "Vespa 7 will soon be end of life, upgrade to Vespa 8 now:" + "https: else controller.notificationsDb().removeNotification(NotificationSource.from(id), Type.applicationPackage); }); }
class JobController { public static final Duration maxHistoryAge = Duration.ofDays(60); private static final Logger log = Logger.getLogger(JobController.class.getName()); private final int historyLength; private final Controller controller; private final CuratorDb curator; private final BufferedLogStore logs; private final TesterCloud cloud; private final JobMetrics metric; private final AtomicReference<Consumer<Run>> runner = new AtomicReference<>(__ -> { }); public JobController(Controller controller) { this.historyLength = controller.system().isCd() ? 256 : 64; this.controller = controller; this.curator = controller.curator(); this.logs = new BufferedLogStore(curator, controller.serviceRegistry().runDataStore()); this.cloud = controller.serviceRegistry().testerCloud(); this.metric = new JobMetrics(controller.metric(), controller::system); } public TesterCloud cloud() { return cloud; } public int historyLength() { return historyLength; } public void setRunner(Consumer<Run> runner) { this.runner.set(runner); } /** Rewrite all job data with the newest format. */ public void updateStorage() { for (ApplicationId id : instances()) for (JobType type : jobs(id)) { locked(id, type, runs -> { curator.readLastRun(id, type).ifPresent(curator::writeLastRun); }); } } /** Returns all entries currently logged for the given run. */ public Optional<RunLog> details(RunId id) { return details(id, -1); } /** Returns the logged entries for the given run, which are after the given id threshold. */ public Optional<RunLog> details(RunId id, long after) { try (Mutex __ = curator.lock(id.application(), id.type())) { Run run = runs(id.application(), id.type()).get(id); if (run == null) return Optional.empty(); return active(id).isPresent() ? Optional.of(logs.readActive(id.application(), id.type(), after)) : logs.readFinished(id, after); } } /** Stores the given log entries for the given run and step. */ public void log(RunId id, Step step, List<LogEntry> entries) { locked(id, __ -> { logs.append(id.application(), id.type(), step, entries, true); return __; }); } /** Stores the given log messages for the given run and step. */ public void log(RunId id, Step step, Level level, List<String> messages) { log(id, step, messages.stream() .map(message -> new LogEntry(0, controller.clock().instant(), LogEntry.typeOf(level), message)) .collect(toList())); } /** Stores the given log message for the given run and step. */ public void log(RunId id, Step step, Level level, String message) { log(id, step, level, Collections.singletonList(message)); } /** Fetches any new Vespa log entries, and records the timestamp of the last of these, for continuation. */ public void updateVespaLog(RunId id) { locked(id, run -> { if ( ! run.hasStep(copyVespaLogs)) return run; storeVespaLogs(id); ZoneId zone = id.type().zone(); Optional<Deployment> deployment = Optional.ofNullable(controller.applications().requireInstance(id.application()) .deployments().get(zone)); if (deployment.isEmpty() || deployment.get().at().isBefore(run.start())) return run; List<LogEntry> log; Instant deployedAt; Instant from; if ( ! run.id().type().isProduction()) { deployedAt = run.stepInfo(installInitialReal).or(() -> run.stepInfo(installReal)).flatMap(StepInfo::startTime).orElseThrow(); from = run.lastVespaLogTimestamp().isAfter(run.start()) ? run.lastVespaLogTimestamp() : deployedAt.minusSeconds(10); log = LogEntry.parseVespaLog(controller.serviceRegistry().configServer() .getLogs(new DeploymentId(id.application(), zone), Map.of("from", Long.toString(from.toEpochMilli()))), from); } else log = List.of(); if (id.type().isTest()) { deployedAt = run.stepInfo(installTester).flatMap(StepInfo::startTime).orElseThrow(); from = run.lastVespaLogTimestamp().isAfter(run.start()) ? run.lastVespaLogTimestamp() : deployedAt.minusSeconds(10); List<LogEntry> testerLog = LogEntry.parseVespaLog(controller.serviceRegistry().configServer() .getLogs(new DeploymentId(id.tester().id(), zone), Map.of("from", Long.toString(from.toEpochMilli()))), from); Instant justNow = controller.clock().instant().minusSeconds(2); log = Stream.concat(log.stream(), testerLog.stream()) .filter(entry -> entry.at().isBefore(justNow)) .sorted(comparing(LogEntry::at)) .collect(toUnmodifiableList()); } if (log.isEmpty()) return run; logs.append(id.application(), id.type(), Step.copyVespaLogs, log, false); return run.with(log.get(log.size() - 1).at()); }); } public InputStream getVespaLogs(RunId id, long fromMillis, boolean tester) { Run run = run(id); return run.stepStatus(copyVespaLogs).map(succeeded::equals).orElse(false) ? controller.serviceRegistry().runDataStore().getLogs(id, tester) : getVespaLogsFromLogserver(run, fromMillis, tester).orElse(InputStream.nullInputStream()); } public static Optional<Instant> deploymentCompletedAt(Run run, boolean tester) { return (tester ? run.stepInfo(installTester) : run.stepInfo(installInitialReal).or(() -> run.stepInfo(installReal))) .flatMap(StepInfo::startTime).map(start -> start.minusSeconds(10)); } public void storeVespaLogs(RunId id) { Run run = run(id); if ( ! id.type().isProduction()) { getVespaLogsFromLogserver(run, 0, false).ifPresent(logs -> { try (logs) { controller.serviceRegistry().runDataStore().putLogs(id, false, logs); } catch (IOException e) { throw new UncheckedIOException(e); } }); } if (id.type().isTest()) { getVespaLogsFromLogserver(run, 0, true).ifPresent(logs -> { try (logs) { controller.serviceRegistry().runDataStore().putLogs(id, true, logs); } catch(IOException e){ throw new UncheckedIOException(e); } }); } } private Optional<InputStream> getVespaLogsFromLogserver(Run run, long fromMillis, boolean tester) { return deploymentCompletedAt(run, tester).map(at -> controller.serviceRegistry().configServer().getLogs(new DeploymentId(tester ? run.id().tester().id() : run.id().application(), run.id().type().zone()), Map.of("from", Long.toString(Math.max(fromMillis, at.toEpochMilli())), "to", Long.toString(run.end().orElse(controller.clock().instant()).toEpochMilli())))); } /** Fetches any new test log entries, and records the id of the last of these, for continuation. */ public void updateTestLog(RunId id) { locked(id, run -> { Optional<Step> step = Stream.of(endStagingSetup, endTests) .filter(run.readySteps()::contains) .findAny(); if (step.isEmpty()) return run; List<LogEntry> entries = cloud.getLog(new DeploymentId(id.tester().id(), id.type().zone()), run.lastTestLogEntry()); if (entries.isEmpty()) return run; logs.append(id.application(), id.type(), step.get(), entries, false); return run.with(entries.stream().mapToLong(LogEntry::id).max().getAsLong()); }); } public void updateTestReport(RunId id) { locked(id, run -> { Optional<TestReport> report = cloud.getTestReport(new DeploymentId(id.tester().id(), id.type().zone())); if (report.isEmpty()) { return run; } logs.writeTestReport(id, report.get()); return run; }); } public Optional<String> getTestReports(RunId id) { return logs.readTestReports(id); } /** Stores the given certificate as the tester certificate for this run, or throws if it's already set. */ public void storeTesterCertificate(RunId id, X509Certificate testerCertificate) { locked(id, run -> run.with(testerCertificate)); } /** Returns a list of all instances of applications which have registered. */ public List<ApplicationId> instances() { return controller.applications().readable().stream() .flatMap(application -> application.instances().values().stream()) .map(Instance::id) .collect(toUnmodifiableList()); } /** Returns all job types which have been run for the given application. */ private List<JobType> jobs(ApplicationId id) { return JobType.allIn(controller.zoneRegistry()).stream() .filter(type -> last(id, type).isPresent()) .collect(toUnmodifiableList()); } /** Returns an immutable map of all known runs for the given application and job type. */ public NavigableMap<RunId, Run> runs(JobId id) { return runs(id.application(), id.type()); } /** Lists the start time of non-redeployment runs of the given job, in order of increasing age. */ public List<Instant> jobStarts(JobId id) { return runs(id).descendingMap().values().stream() .filter(run -> ! run.isRedeployment()) .map(Run::start) .collect(toUnmodifiableList()); } /** Returns when given deployment last started deploying, falling back to time of deployment if it cannot be determined from job runs */ public Instant lastDeploymentStart(ApplicationId instanceId, Deployment deployment) { return jobStarts(new JobId(instanceId, JobType.deploymentTo(deployment.zone()))).stream() .findFirst() .orElseGet(deployment::at); } /** Returns an immutable map of all known runs for the given application and job type. */ public NavigableMap<RunId, Run> runs(ApplicationId id, JobType type) { ImmutableSortedMap.Builder<RunId, Run> runs = ImmutableSortedMap.orderedBy(Comparator.comparing(RunId::number)); Optional<Run> last = last(id, type); curator.readHistoricRuns(id, type).forEach((runId, run) -> { if (last.isEmpty() || ! runId.equals(last.get().id())) runs.put(runId, run); }); last.ifPresent(run -> runs.put(run.id(), run)); return runs.build(); } /** Returns the run with the given id, or throws if no such run exists. */ public Run run(RunId id) { return runs(id.application(), id.type()).values().stream() .filter(run -> run.id().equals(id)) .findAny() .orElseThrow(() -> new NoSuchElementException("no run with id '" + id + "' exists")); } /** Returns the last run of the given type, for the given application, if one has been run. */ public Optional<Run> last(JobId job) { return curator.readLastRun(job.application(), job.type()); } /** Returns the last run of the given type, for the given application, if one has been run. */ public Optional<Run> last(ApplicationId id, JobType type) { return curator.readLastRun(id, type); } /** Returns the last completed of the given job. */ public Optional<Run> lastCompleted(JobId id) { return JobStatus.lastCompleted(runs(id)); } /** Returns the first failing of the given job. */ public Optional<Run> firstFailing(JobId id) { return JobStatus.firstFailing(runs(id)); } /** Returns the last success of the given job. */ public Optional<Run> lastSuccess(JobId id) { return JobStatus.lastSuccess(runs(id)); } /** Returns the run with the given id, provided it is still active. */ public Optional<Run> active(RunId id) { return last(id.application(), id.type()) .filter(run -> ! run.hasEnded()) .filter(run -> run.id().equals(id)); } /** Returns a list of all active runs. */ public List<Run> active() { return controller.applications().idList().stream() .flatMap(id -> active(id).stream()) .toList(); } /** Returns a list of all active runs for the given application. */ public List<Run> active(TenantAndApplicationId id) { return controller.applications().requireApplication(id).instances().keySet().stream() .flatMap(name -> JobType.allIn(controller.zoneRegistry()).stream() .map(type -> last(id.instance(name), type)) .flatMap(Optional::stream) .filter(run -> ! run.hasEnded())) .toList(); } /** Returns a list of all active runs for the given instance. */ public List<Run> active(ApplicationId id) { return JobType.allIn(controller.zoneRegistry()).stream() .map(type -> last(id, type)) .flatMap(Optional::stream) .filter(run -> !run.hasEnded()) .toList(); } /** Returns the job status of the given job, possibly empty. */ public JobStatus jobStatus(JobId id) { return new JobStatus(id, runs(id)); } /** Returns the deployment status of the given application. */ public DeploymentStatus deploymentStatus(Application application) { VersionStatus versionStatus = controller.readVersionStatus(); return deploymentStatus(application, versionStatus, controller.systemVersion(versionStatus)); } private DeploymentStatus deploymentStatus(Application application, VersionStatus versionStatus, Version systemVersion) { return new DeploymentStatus(application, this::jobStatus, controller.zoneRegistry(), versionStatus, systemVersion, instance -> controller.applications().versionCompatibility(application.id().instance(instance)), controller.clock().instant()); } /** Adds deployment status to each of the given applications. */ public DeploymentStatusList deploymentStatuses(ApplicationList applications, VersionStatus versionStatus) { Version systemVersion = controller.systemVersion(versionStatus); return DeploymentStatusList.from(applications.asList().stream() .map(application -> deploymentStatus(application, versionStatus, systemVersion)) .toList()); } /** Adds deployment status to each of the given applications. Calling this will do an implicit read of the controller's version status */ public DeploymentStatusList deploymentStatuses(ApplicationList applications) { VersionStatus versionStatus = controller.readVersionStatus(); return deploymentStatuses(applications, versionStatus); } /** Changes the status of the given step, for the given run, provided it is still active. */ public void update(RunId id, RunStatus status, LockedStep step) { locked(id, run -> run.with(status, step)); } /** * Changes the status of the given run to inactive, and stores it as a historic run. * Throws TimeoutException if some step in this job is still being run. */ public void finish(RunId id) throws TimeoutException { Deque<Mutex> locks = new ArrayDeque<>(); try { Run unlockedRun = run(id); locks.push(curator.lock(id.application(), id.type(), report)); for (Step step : report.allPrerequisites(unlockedRun.steps().keySet())) locks.push(curator.lock(id.application(), id.type(), step)); locked(id, run -> { if (run.status() == reset) { for (Step step : run.steps().keySet()) log(id, step, INFO, List.of(" return run.reset(); } if (run.status() == running && run.stepStatuses().values().stream().anyMatch(not(succeeded::equals))) return run; Run finishedRun = run.finished(controller.clock().instant()); locked(id.application(), id.type(), runs -> { runs.put(run.id(), finishedRun); long last = id.number(); long successes = runs.values().stream().filter(Run::hasSucceeded).count(); var oldEntries = runs.entrySet().iterator(); for (var old = oldEntries.next(); old.getKey().number() <= last - historyLength || old.getValue().start().isBefore(controller.clock().instant().minus(maxHistoryAge)); old = oldEntries.next()) { if ( successes == 1 && old.getValue().hasSucceeded() && ! old.getValue().start().isBefore(controller.clock().instant().minus(maxHistoryAge))) { oldEntries.next(); continue; } logs.delete(old.getKey()); oldEntries.remove(); } }); logs.flush(id); metric.jobFinished(run.id().job(), finishedRun.status()); pruneRevisions(unlockedRun); return finishedRun; }); } finally { for (Mutex lock : locks) { try { lock.close(); } catch (Throwable t) { log.log(WARNING, "Failed to close the lock " + lock + ": the lock may or may not " + "have been released in ZooKeeper, and if not this controller " + "must be restarted to release the lock", t); } } } } /** Marks the given run as aborted; no further normal steps will run, but run-always steps will try to succeed. */ public void abort(RunId id, String reason) { locked(id, run -> { run.stepStatuses().entrySet().stream() .filter(entry -> entry.getValue() == unfinished) .forEach(entry -> log(id, entry.getKey(), INFO, "Aborting run: " + reason)); return run.aborted(); }); } /** Accepts and stores a new application package and test jar pair under a generated application version key. */ public ApplicationVersion submit(TenantAndApplicationId id, Submission submission, long projectId) { ApplicationController applications = controller.applications(); AtomicReference<ApplicationVersion> version = new AtomicReference<>(); applications.lockApplicationOrThrow(id, application -> { Optional<ApplicationVersion> previousVersion = application.get().revisions().last(); Optional<ApplicationPackage> previousPackage = previousVersion.flatMap(previous -> applications.applicationStore().find(id.tenant(), id.application(), previous.buildNumber().getAsLong())) .map(ApplicationPackage::new); long previousBuild = previousVersion.map(latestVersion -> latestVersion.buildNumber().getAsLong()).orElse(0L); version.set(submission.toApplicationVersion(1 + previousBuild)); byte[] diff = previousPackage.map(previous -> ApplicationPackageDiff.diff(previous, submission.applicationPackage())) .orElseGet(() -> ApplicationPackageDiff.diffAgainstEmpty(submission.applicationPackage())); applications.applicationStore().put(id.tenant(), id.application(), version.get().id(), submission.applicationPackage().zippedContent(), submission.testPackage(), diff); applications.applicationStore().putMeta(id.tenant(), id.application(), controller.clock().instant(), submission.applicationPackage().metaDataZip()); application = application.withProjectId(projectId == -1 ? OptionalLong.empty() : OptionalLong.of(projectId)); application = application.withRevisions(revisions -> revisions.with(version.get())); application = withPrunedPackages(application, version.get().id()); validate(id, submission); applications.storeWithUpdatedConfig(application, submission.applicationPackage()); if (application.get().projectId().isPresent()) applications.deploymentTrigger().triggerNewRevision(id); }); return version.get(); } private void validate(TenantAndApplicationId id, Submission submission) { validateTests(id, submission); validateParentVersion(id, submission); validateMajorVersion(id, submission); } private void validateTests(TenantAndApplicationId id, Submission submission) { TestSummary testSummary = TestPackage.validateTests(submission.applicationPackage().deploymentSpec(), submission.testPackage()); if (testSummary.problems().isEmpty()) controller.notificationsDb().removeNotification(NotificationSource.from(id), Type.testPackage); else controller.notificationsDb().setNotification(NotificationSource.from(id), Type.testPackage, Notification.Level.warning, testSummary.problems()); } private void validateParentVersion(TenantAndApplicationId id, Submission submission) { submission.applicationPackage().parentVersion().ifPresent(parent -> { if (parent.getMajor() < controller.readSystemVersion().getMajor()) controller.notificationsDb().setNotification(NotificationSource.from(id), Type.submission, Notification.Level.warning, "Parent version used to compile the application is on a " + "lower major version than the current Vespa Cloud version"); else controller.notificationsDb().removeNotification(NotificationSource.from(id), Type.submission); }); } private LockedApplication withPrunedPackages(LockedApplication application, RevisionId latest){ TenantAndApplicationId id = application.get().id(); Application wrapped = application.get(); RevisionId oldestDeployed = application.get().oldestDeployedRevision() .or(() -> wrapped.instances().values().stream() .flatMap(instance -> instance.change().revision().stream()) .min(naturalOrder())) .orElse(latest); controller.applications().applicationStore().prune(id.tenant(), id.application(), oldestDeployed); for (ApplicationVersion version : application.get().revisions().withPackage()) if (version.id().compareTo(oldestDeployed) < 0) application = application.withRevisions(revisions -> revisions.with(version.withoutPackage())); return application; } /** Forget revisions no longer present in any relevant job history. */ private void pruneRevisions(Run run) { TenantAndApplicationId applicationId = TenantAndApplicationId.from(run.id().application()); boolean isProduction = run.versions().targetRevision().isProduction(); (isProduction ? deploymentStatus(controller.applications().requireApplication(applicationId)).jobs().asList().stream() : Stream.of(jobStatus(run.id().job()))) .flatMap(jobs -> jobs.runs().values().stream()) .map(r -> r.versions().targetRevision()) .filter(id -> id.isProduction() == isProduction) .min(naturalOrder()) .ifPresent(oldestRevision -> { controller.applications().lockApplicationOrThrow(applicationId, application -> { if (isProduction) { controller.applications().applicationStore().pruneDiffs(run.id().application().tenant(), run.id().application().application(), oldestRevision.number()); controller.applications().store(application.withRevisions(revisions -> revisions.withoutOlderThan(oldestRevision))); } else { controller.applications().applicationStore().pruneDevDiffs(new DeploymentId(run.id().application(), run.id().job().type().zone()), oldestRevision.number()); controller.applications().store(application.withRevisions(revisions -> revisions.withoutOlderThan(oldestRevision, run.id().job()))); } }); }); } /** Orders a run of the given type, or throws an IllegalStateException if that job type is already running. */ public void start(ApplicationId id, JobType type, Versions versions, boolean isRedeployment, Optional<String> reason) { start(id, type, versions, isRedeployment, JobProfile.of(type), reason); } /** Orders a run of the given type, or throws an IllegalStateException if that job type is already running. */ public void start(ApplicationId id, JobType type, Versions versions, boolean isRedeployment, JobProfile profile, Optional<String> reason) { ApplicationVersion revision = controller.applications().requireApplication(TenantAndApplicationId.from(id)).revisions().get(versions.targetRevision()); if (revision.compileVersion() .map(version -> controller.applications().versionCompatibility(id).refuse(versions.targetPlatform(), version)) .orElse(false)) throw new IllegalArgumentException("Will not start " + type + " for " + id + " with incompatible platform version (" + versions.targetPlatform() + ") " + "and compile versions (" + revision.compileVersion().get() + ")"); locked(id, type, __ -> { Optional<Run> last = last(id, type); if (last.flatMap(run -> active(run.id())).isPresent()) throw new IllegalArgumentException("Cannot start " + type + " for " + id + "; it is already running!"); RunId newId = new RunId(id, type, last.map(run -> run.id().number()).orElse(0L) + 1); curator.writeLastRun(Run.initial(newId, versions, isRedeployment, controller.clock().instant(), profile, reason)); metric.jobStarted(newId.job()); }); } /** Stores the given package and starts a deployment of it, after aborting any such ongoing deployment. */ public void deploy(ApplicationId id, JobType type, Optional<Version> platform, ApplicationPackage applicationPackage) { deploy(id, type, platform, applicationPackage, false); } /** Stores the given package and starts a deployment of it, after aborting any such ongoing deployment.*/ public void deploy(ApplicationId id, JobType type, Optional<Version> platform, ApplicationPackage applicationPackage, boolean dryRun) { if ( ! controller.zoneRegistry().hasZone(type.zone())) throw new IllegalArgumentException(type.zone() + " is not present in this system"); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { if ( ! application.get().instances().containsKey(id.instance())) application = controller.applications().withNewInstance(application, id); controller.applications().store(application); }); DeploymentId deploymentId = new DeploymentId(id, type.zone()); Optional<Run> lastRun = last(id, type); lastRun.filter(run -> ! run.hasEnded()).ifPresent(run -> abortAndWait(run.id(), Duration.ofMinutes(2))); long build = 1 + lastRun.map(run -> run.versions().targetRevision().number()).orElse(0L); RevisionId revisionId = RevisionId.forDevelopment(build, new JobId(id, type)); ApplicationVersion version = ApplicationVersion.forDevelopment(revisionId, applicationPackage.compileVersion()); byte[] diff = getDiff(applicationPackage, deploymentId, lastRun); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { controller.applications().applicationStore().putDev(deploymentId, version.id(), applicationPackage.zippedContent(), diff); Version targetPlatform = platform.orElseGet(() -> findTargetPlatform(applicationPackage, deploymentId, application.get().get(id.instance()))); controller.applications().store(application.withRevisions(revisions -> revisions.with(version))); start(id, type, new Versions(targetPlatform, version.id(), lastRun.map(run -> run.versions().targetPlatform()), lastRun.map(run -> run.versions().targetRevision())), false, dryRun ? JobProfile.developmentDryRun : JobProfile.development, Optional.empty()); }); locked(id, type, __ -> { runner.get().accept(last(id, type).get()); }); } /* Application package diff against previous version, or against empty version if previous does not exist or is invalid */ private byte[] getDiff(ApplicationPackage applicationPackage, DeploymentId deploymentId, Optional<Run> lastRun) { return lastRun.map(run -> run.versions().targetRevision()) .map(prevVersion -> { ApplicationPackage previous; try { previous = new ApplicationPackage(controller.applications().applicationStore().get(deploymentId, prevVersion)); } catch (IllegalArgumentException e) { return ApplicationPackageDiff.diffAgainstEmpty(applicationPackage); } return ApplicationPackageDiff.diff(previous, applicationPackage); }) .orElseGet(() -> ApplicationPackageDiff.diffAgainstEmpty(applicationPackage)); } private Version findTargetPlatform(ApplicationPackage applicationPackage, DeploymentId id, Optional<Instance> instance) { List<Version> versions = controller.readVersionStatus().deployableVersions().stream() .map(VespaVersion::versionNumber) .collect(toList()); instance.map(Instance::deployments) .map(deployments -> deployments.get(id.zoneId())) .map(Deployment::version) .ifPresent(versions::add); if (versions.isEmpty()) throw new IllegalStateException("no deployable platform version found in the system"); VersionCompatibility compatibility = controller.applications().versionCompatibility(id.applicationId()); List<Version> compatibleVersions = new ArrayList<>(); for (Version target : reversed(versions)) if (applicationPackage.compileVersion().isEmpty() || compatibility.accept(target, applicationPackage.compileVersion().get())) compatibleVersions.add(target); if (compatibleVersions.isEmpty()) throw new IllegalArgumentException("no platforms are compatible with compile version " + applicationPackage.compileVersion().get()); Optional<Integer> major = applicationPackage.deploymentSpec().majorVersion(); List<Version> versionOnRightMajor = new ArrayList<>(); for (Version target : reversed(versions)) if (major.isEmpty() || major.get() == target.getMajor()) versionOnRightMajor.add(target); if (versionOnRightMajor.isEmpty()) throw new IllegalArgumentException("no platforms were found for major version " + major.get() + " specified in deployment.xml"); for (Version target : compatibleVersions) if (versionOnRightMajor.contains(target)) return target; throw new IllegalArgumentException("no platforms on major version " + major.get() + " specified in deployment.xml " + "are compatible with compile version " + applicationPackage.compileVersion().get()); } /** Aborts a run and waits for it complete. */ private void abortAndWait(RunId id, Duration timeout) { abort(id, "replaced by new deployment"); runner.get().accept(last(id.application(), id.type()).get()); Instant doom = controller.clock().instant().plus(timeout); Duration sleep = Duration.ofMillis(100); while ( ! last(id.application(), id.type()).get().hasEnded()) { if (controller.clock().instant().plus(sleep).isAfter(doom)) throw new UncheckedTimeoutException("timeout waiting for " + id + " to abort and finish"); try { Thread.sleep(sleep.toMillis()); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new RuntimeException(e); } } } /** Deletes run data and tester deployments for applications which are unknown, or no longer built internally. */ public void collectGarbage() { Set<ApplicationId> applicationsToBuild = new HashSet<>(instances()); curator.applicationsWithJobs().stream() .filter(id -> ! applicationsToBuild.contains(id)) .forEach(id -> { try { TesterId tester = TesterId.of(id); for (JobType type : jobs(id)) locked(id, type, deactivateTester, __ -> { try (Mutex ___ = curator.lock(id, type)) { try { deactivateTester(tester, type); } catch (Exception e) { } curator.deleteRunData(id, type); } }); logs.delete(id); curator.deleteRunData(id); } catch (Exception e) { log.log(WARNING, "failed cleaning up after deleted application", e); } }); } public void deactivateTester(TesterId id, JobType type) { controller.serviceRegistry().configServer().deactivate(new DeploymentId(id.id(), type.zone())); } /** Locks all runs and modifies the list of historic runs for the given application and job type. */ private void locked(ApplicationId id, JobType type, Consumer<SortedMap<RunId, Run>> modifications) { try (Mutex __ = curator.lock(id, type)) { SortedMap<RunId, Run> runs = new TreeMap<>(curator.readHistoricRuns(id, type)); modifications.accept(runs); curator.writeHistoricRuns(id, type, runs.values()); } } /** Locks and modifies the run with the given id, provided it is still active. */ public void locked(RunId id, UnaryOperator<Run> modifications) { try (Mutex __ = curator.lock(id.application(), id.type())) { active(id).ifPresent(run -> { Run modified = modifications.apply(run); if (modified != null) curator.writeLastRun(modified); }); } } /** Locks the given step and checks none of its prerequisites are running, then performs the given actions. */ public void locked(ApplicationId id, JobType type, Step step, Consumer<LockedStep> action) throws TimeoutException { try (Mutex lock = curator.lock(id, type, step)) { for (Step prerequisite : step.allPrerequisites(last(id, type).get().steps().keySet())) try (Mutex __ = curator.lock(id, type, prerequisite)) { ; } action.accept(new LockedStep(lock, step)); } } }
class JobController { public static final Duration maxHistoryAge = Duration.ofDays(60); private static final Logger log = Logger.getLogger(JobController.class.getName()); private final int historyLength; private final Controller controller; private final CuratorDb curator; private final BufferedLogStore logs; private final TesterCloud cloud; private final JobMetrics metric; private final AtomicReference<Consumer<Run>> runner = new AtomicReference<>(__ -> { }); public JobController(Controller controller) { this.historyLength = controller.system().isCd() ? 256 : 64; this.controller = controller; this.curator = controller.curator(); this.logs = new BufferedLogStore(curator, controller.serviceRegistry().runDataStore()); this.cloud = controller.serviceRegistry().testerCloud(); this.metric = new JobMetrics(controller.metric(), controller::system); } public TesterCloud cloud() { return cloud; } public int historyLength() { return historyLength; } public void setRunner(Consumer<Run> runner) { this.runner.set(runner); } /** Rewrite all job data with the newest format. */ public void updateStorage() { for (ApplicationId id : instances()) for (JobType type : jobs(id)) { locked(id, type, runs -> { curator.readLastRun(id, type).ifPresent(curator::writeLastRun); }); } } /** Returns all entries currently logged for the given run. */ public Optional<RunLog> details(RunId id) { return details(id, -1); } /** Returns the logged entries for the given run, which are after the given id threshold. */ public Optional<RunLog> details(RunId id, long after) { try (Mutex __ = curator.lock(id.application(), id.type())) { Run run = runs(id.application(), id.type()).get(id); if (run == null) return Optional.empty(); return active(id).isPresent() ? Optional.of(logs.readActive(id.application(), id.type(), after)) : logs.readFinished(id, after); } } /** Stores the given log entries for the given run and step. */ public void log(RunId id, Step step, List<LogEntry> entries) { locked(id, __ -> { logs.append(id.application(), id.type(), step, entries, true); return __; }); } /** Stores the given log messages for the given run and step. */ public void log(RunId id, Step step, Level level, List<String> messages) { log(id, step, messages.stream() .map(message -> new LogEntry(0, controller.clock().instant(), LogEntry.typeOf(level), message)) .collect(toList())); } /** Stores the given log message for the given run and step. */ public void log(RunId id, Step step, Level level, String message) { log(id, step, level, Collections.singletonList(message)); } /** Fetches any new Vespa log entries, and records the timestamp of the last of these, for continuation. */ public void updateVespaLog(RunId id) { locked(id, run -> { if ( ! run.hasStep(copyVespaLogs)) return run; storeVespaLogs(id); ZoneId zone = id.type().zone(); Optional<Deployment> deployment = Optional.ofNullable(controller.applications().requireInstance(id.application()) .deployments().get(zone)); if (deployment.isEmpty() || deployment.get().at().isBefore(run.start())) return run; List<LogEntry> log; Instant deployedAt; Instant from; if ( ! run.id().type().isProduction()) { deployedAt = run.stepInfo(installInitialReal).or(() -> run.stepInfo(installReal)).flatMap(StepInfo::startTime).orElseThrow(); from = run.lastVespaLogTimestamp().isAfter(run.start()) ? run.lastVespaLogTimestamp() : deployedAt.minusSeconds(10); log = LogEntry.parseVespaLog(controller.serviceRegistry().configServer() .getLogs(new DeploymentId(id.application(), zone), Map.of("from", Long.toString(from.toEpochMilli()))), from); } else log = List.of(); if (id.type().isTest()) { deployedAt = run.stepInfo(installTester).flatMap(StepInfo::startTime).orElseThrow(); from = run.lastVespaLogTimestamp().isAfter(run.start()) ? run.lastVespaLogTimestamp() : deployedAt.minusSeconds(10); List<LogEntry> testerLog = LogEntry.parseVespaLog(controller.serviceRegistry().configServer() .getLogs(new DeploymentId(id.tester().id(), zone), Map.of("from", Long.toString(from.toEpochMilli()))), from); Instant justNow = controller.clock().instant().minusSeconds(2); log = Stream.concat(log.stream(), testerLog.stream()) .filter(entry -> entry.at().isBefore(justNow)) .sorted(comparing(LogEntry::at)) .collect(toUnmodifiableList()); } if (log.isEmpty()) return run; logs.append(id.application(), id.type(), Step.copyVespaLogs, log, false); return run.with(log.get(log.size() - 1).at()); }); } public InputStream getVespaLogs(RunId id, long fromMillis, boolean tester) { Run run = run(id); return run.stepStatus(copyVespaLogs).map(succeeded::equals).orElse(false) ? controller.serviceRegistry().runDataStore().getLogs(id, tester) : getVespaLogsFromLogserver(run, fromMillis, tester).orElse(InputStream.nullInputStream()); } public static Optional<Instant> deploymentCompletedAt(Run run, boolean tester) { return (tester ? run.stepInfo(installTester) : run.stepInfo(installInitialReal).or(() -> run.stepInfo(installReal))) .flatMap(StepInfo::startTime).map(start -> start.minusSeconds(10)); } public void storeVespaLogs(RunId id) { Run run = run(id); if ( ! id.type().isProduction()) { getVespaLogsFromLogserver(run, 0, false).ifPresent(logs -> { try (logs) { controller.serviceRegistry().runDataStore().putLogs(id, false, logs); } catch (IOException e) { throw new UncheckedIOException(e); } }); } if (id.type().isTest()) { getVespaLogsFromLogserver(run, 0, true).ifPresent(logs -> { try (logs) { controller.serviceRegistry().runDataStore().putLogs(id, true, logs); } catch(IOException e){ throw new UncheckedIOException(e); } }); } } private Optional<InputStream> getVespaLogsFromLogserver(Run run, long fromMillis, boolean tester) { return deploymentCompletedAt(run, tester).map(at -> controller.serviceRegistry().configServer().getLogs(new DeploymentId(tester ? run.id().tester().id() : run.id().application(), run.id().type().zone()), Map.of("from", Long.toString(Math.max(fromMillis, at.toEpochMilli())), "to", Long.toString(run.end().orElse(controller.clock().instant()).toEpochMilli())))); } /** Fetches any new test log entries, and records the id of the last of these, for continuation. */ public void updateTestLog(RunId id) { locked(id, run -> { Optional<Step> step = Stream.of(endStagingSetup, endTests) .filter(run.readySteps()::contains) .findAny(); if (step.isEmpty()) return run; List<LogEntry> entries = cloud.getLog(new DeploymentId(id.tester().id(), id.type().zone()), run.lastTestLogEntry()); if (entries.isEmpty()) return run; logs.append(id.application(), id.type(), step.get(), entries, false); return run.with(entries.stream().mapToLong(LogEntry::id).max().getAsLong()); }); } public void updateTestReport(RunId id) { locked(id, run -> { Optional<TestReport> report = cloud.getTestReport(new DeploymentId(id.tester().id(), id.type().zone())); if (report.isEmpty()) { return run; } logs.writeTestReport(id, report.get()); return run; }); } public Optional<String> getTestReports(RunId id) { return logs.readTestReports(id); } /** Stores the given certificate as the tester certificate for this run, or throws if it's already set. */ public void storeTesterCertificate(RunId id, X509Certificate testerCertificate) { locked(id, run -> run.with(testerCertificate)); } /** Returns a list of all instances of applications which have registered. */ public List<ApplicationId> instances() { return controller.applications().readable().stream() .flatMap(application -> application.instances().values().stream()) .map(Instance::id) .collect(toUnmodifiableList()); } /** Returns all job types which have been run for the given application. */ private List<JobType> jobs(ApplicationId id) { return JobType.allIn(controller.zoneRegistry()).stream() .filter(type -> last(id, type).isPresent()) .collect(toUnmodifiableList()); } /** Returns an immutable map of all known runs for the given application and job type. */ public NavigableMap<RunId, Run> runs(JobId id) { return runs(id.application(), id.type()); } /** Lists the start time of non-redeployment runs of the given job, in order of increasing age. */ public List<Instant> jobStarts(JobId id) { return runs(id).descendingMap().values().stream() .filter(run -> ! run.isRedeployment()) .map(Run::start) .collect(toUnmodifiableList()); } /** Returns when given deployment last started deploying, falling back to time of deployment if it cannot be determined from job runs */ public Instant lastDeploymentStart(ApplicationId instanceId, Deployment deployment) { return jobStarts(new JobId(instanceId, JobType.deploymentTo(deployment.zone()))).stream() .findFirst() .orElseGet(deployment::at); } /** Returns an immutable map of all known runs for the given application and job type. */ public NavigableMap<RunId, Run> runs(ApplicationId id, JobType type) { ImmutableSortedMap.Builder<RunId, Run> runs = ImmutableSortedMap.orderedBy(Comparator.comparing(RunId::number)); Optional<Run> last = last(id, type); curator.readHistoricRuns(id, type).forEach((runId, run) -> { if (last.isEmpty() || ! runId.equals(last.get().id())) runs.put(runId, run); }); last.ifPresent(run -> runs.put(run.id(), run)); return runs.build(); } /** Returns the run with the given id, or throws if no such run exists. */ public Run run(RunId id) { return runs(id.application(), id.type()).values().stream() .filter(run -> run.id().equals(id)) .findAny() .orElseThrow(() -> new NoSuchElementException("no run with id '" + id + "' exists")); } /** Returns the last run of the given type, for the given application, if one has been run. */ public Optional<Run> last(JobId job) { return curator.readLastRun(job.application(), job.type()); } /** Returns the last run of the given type, for the given application, if one has been run. */ public Optional<Run> last(ApplicationId id, JobType type) { return curator.readLastRun(id, type); } /** Returns the last completed of the given job. */ public Optional<Run> lastCompleted(JobId id) { return JobStatus.lastCompleted(runs(id)); } /** Returns the first failing of the given job. */ public Optional<Run> firstFailing(JobId id) { return JobStatus.firstFailing(runs(id)); } /** Returns the last success of the given job. */ public Optional<Run> lastSuccess(JobId id) { return JobStatus.lastSuccess(runs(id)); } /** Returns the run with the given id, provided it is still active. */ public Optional<Run> active(RunId id) { return last(id.application(), id.type()) .filter(run -> ! run.hasEnded()) .filter(run -> run.id().equals(id)); } /** Returns a list of all active runs. */ public List<Run> active() { return controller.applications().idList().stream() .flatMap(id -> active(id).stream()) .toList(); } /** Returns a list of all active runs for the given application. */ public List<Run> active(TenantAndApplicationId id) { return controller.applications().requireApplication(id).instances().keySet().stream() .flatMap(name -> JobType.allIn(controller.zoneRegistry()).stream() .map(type -> last(id.instance(name), type)) .flatMap(Optional::stream) .filter(run -> ! run.hasEnded())) .toList(); } /** Returns a list of all active runs for the given instance. */ public List<Run> active(ApplicationId id) { return JobType.allIn(controller.zoneRegistry()).stream() .map(type -> last(id, type)) .flatMap(Optional::stream) .filter(run -> !run.hasEnded()) .toList(); } /** Returns the job status of the given job, possibly empty. */ public JobStatus jobStatus(JobId id) { return new JobStatus(id, runs(id)); } /** Returns the deployment status of the given application. */ public DeploymentStatus deploymentStatus(Application application) { VersionStatus versionStatus = controller.readVersionStatus(); return deploymentStatus(application, versionStatus, controller.systemVersion(versionStatus)); } private DeploymentStatus deploymentStatus(Application application, VersionStatus versionStatus, Version systemVersion) { return new DeploymentStatus(application, this::jobStatus, controller.zoneRegistry(), versionStatus, systemVersion, instance -> controller.applications().versionCompatibility(application.id().instance(instance)), controller.clock().instant()); } /** Adds deployment status to each of the given applications. */ public DeploymentStatusList deploymentStatuses(ApplicationList applications, VersionStatus versionStatus) { Version systemVersion = controller.systemVersion(versionStatus); return DeploymentStatusList.from(applications.asList().stream() .map(application -> deploymentStatus(application, versionStatus, systemVersion)) .toList()); } /** Adds deployment status to each of the given applications. Calling this will do an implicit read of the controller's version status */ public DeploymentStatusList deploymentStatuses(ApplicationList applications) { VersionStatus versionStatus = controller.readVersionStatus(); return deploymentStatuses(applications, versionStatus); } /** Changes the status of the given step, for the given run, provided it is still active. */ public void update(RunId id, RunStatus status, LockedStep step) { locked(id, run -> run.with(status, step)); } /** * Changes the status of the given run to inactive, and stores it as a historic run. * Throws TimeoutException if some step in this job is still being run. */ public void finish(RunId id) throws TimeoutException { Deque<Mutex> locks = new ArrayDeque<>(); try { Run unlockedRun = run(id); locks.push(curator.lock(id.application(), id.type(), report)); for (Step step : report.allPrerequisites(unlockedRun.steps().keySet())) locks.push(curator.lock(id.application(), id.type(), step)); locked(id, run -> { if (run.status() == reset) { for (Step step : run.steps().keySet()) log(id, step, INFO, List.of(" return run.reset(); } if (run.status() == running && run.stepStatuses().values().stream().anyMatch(not(succeeded::equals))) return run; Run finishedRun = run.finished(controller.clock().instant()); locked(id.application(), id.type(), runs -> { runs.put(run.id(), finishedRun); long last = id.number(); long successes = runs.values().stream().filter(Run::hasSucceeded).count(); var oldEntries = runs.entrySet().iterator(); for (var old = oldEntries.next(); old.getKey().number() <= last - historyLength || old.getValue().start().isBefore(controller.clock().instant().minus(maxHistoryAge)); old = oldEntries.next()) { if ( successes == 1 && old.getValue().hasSucceeded() && ! old.getValue().start().isBefore(controller.clock().instant().minus(maxHistoryAge))) { oldEntries.next(); continue; } logs.delete(old.getKey()); oldEntries.remove(); } }); logs.flush(id); metric.jobFinished(run.id().job(), finishedRun.status()); pruneRevisions(unlockedRun); return finishedRun; }); } finally { for (Mutex lock : locks) { try { lock.close(); } catch (Throwable t) { log.log(WARNING, "Failed to close the lock " + lock + ": the lock may or may not " + "have been released in ZooKeeper, and if not this controller " + "must be restarted to release the lock", t); } } } } /** Marks the given run as aborted; no further normal steps will run, but run-always steps will try to succeed. */ public void abort(RunId id, String reason) { locked(id, run -> { run.stepStatuses().entrySet().stream() .filter(entry -> entry.getValue() == unfinished) .forEach(entry -> log(id, entry.getKey(), INFO, "Aborting run: " + reason)); return run.aborted(); }); } /** Accepts and stores a new application package and test jar pair under a generated application version key. */ public ApplicationVersion submit(TenantAndApplicationId id, Submission submission, long projectId) { ApplicationController applications = controller.applications(); AtomicReference<ApplicationVersion> version = new AtomicReference<>(); applications.lockApplicationOrThrow(id, application -> { Optional<ApplicationVersion> previousVersion = application.get().revisions().last(); Optional<ApplicationPackage> previousPackage = previousVersion.flatMap(previous -> applications.applicationStore().find(id.tenant(), id.application(), previous.buildNumber().getAsLong())) .map(ApplicationPackage::new); long previousBuild = previousVersion.map(latestVersion -> latestVersion.buildNumber().getAsLong()).orElse(0L); version.set(submission.toApplicationVersion(1 + previousBuild)); byte[] diff = previousPackage.map(previous -> ApplicationPackageDiff.diff(previous, submission.applicationPackage())) .orElseGet(() -> ApplicationPackageDiff.diffAgainstEmpty(submission.applicationPackage())); applications.applicationStore().put(id.tenant(), id.application(), version.get().id(), submission.applicationPackage().zippedContent(), submission.testPackage(), diff); applications.applicationStore().putMeta(id.tenant(), id.application(), controller.clock().instant(), submission.applicationPackage().metaDataZip()); application = application.withProjectId(projectId == -1 ? OptionalLong.empty() : OptionalLong.of(projectId)); application = application.withRevisions(revisions -> revisions.with(version.get())); application = withPrunedPackages(application, version.get().id()); validate(id, submission); applications.storeWithUpdatedConfig(application, submission.applicationPackage()); if (application.get().projectId().isPresent()) applications.deploymentTrigger().triggerNewRevision(id); }); return version.get(); } private void validate(TenantAndApplicationId id, Submission submission) { validateTests(id, submission); validateParentVersion(id, submission); validateMajorVersion(id, submission); } private void validateTests(TenantAndApplicationId id, Submission submission) { TestSummary testSummary = TestPackage.validateTests(submission.applicationPackage().deploymentSpec(), submission.testPackage()); if (testSummary.problems().isEmpty()) controller.notificationsDb().removeNotification(NotificationSource.from(id), Type.testPackage); else controller.notificationsDb().setNotification(NotificationSource.from(id), Type.testPackage, Notification.Level.warning, testSummary.problems()); } private void validateParentVersion(TenantAndApplicationId id, Submission submission) { submission.applicationPackage().parentVersion().ifPresent(parent -> { if (parent.getMajor() < controller.readSystemVersion().getMajor()) controller.notificationsDb().setNotification(NotificationSource.from(id), Type.submission, Notification.Level.warning, "Parent version used to compile the application is on a " + "lower major version than the current Vespa Cloud version"); else controller.notificationsDb().removeNotification(NotificationSource.from(id), Type.submission); }); } private LockedApplication withPrunedPackages(LockedApplication application, RevisionId latest){ TenantAndApplicationId id = application.get().id(); Application wrapped = application.get(); RevisionId oldestDeployed = application.get().oldestDeployedRevision() .or(() -> wrapped.instances().values().stream() .flatMap(instance -> instance.change().revision().stream()) .min(naturalOrder())) .orElse(latest); controller.applications().applicationStore().prune(id.tenant(), id.application(), oldestDeployed); for (ApplicationVersion version : application.get().revisions().withPackage()) if (version.id().compareTo(oldestDeployed) < 0) application = application.withRevisions(revisions -> revisions.with(version.withoutPackage())); return application; } /** Forget revisions no longer present in any relevant job history. */ private void pruneRevisions(Run run) { TenantAndApplicationId applicationId = TenantAndApplicationId.from(run.id().application()); boolean isProduction = run.versions().targetRevision().isProduction(); (isProduction ? deploymentStatus(controller.applications().requireApplication(applicationId)).jobs().asList().stream() : Stream.of(jobStatus(run.id().job()))) .flatMap(jobs -> jobs.runs().values().stream()) .map(r -> r.versions().targetRevision()) .filter(id -> id.isProduction() == isProduction) .min(naturalOrder()) .ifPresent(oldestRevision -> { controller.applications().lockApplicationOrThrow(applicationId, application -> { if (isProduction) { controller.applications().applicationStore().pruneDiffs(run.id().application().tenant(), run.id().application().application(), oldestRevision.number()); controller.applications().store(application.withRevisions(revisions -> revisions.withoutOlderThan(oldestRevision))); } else { controller.applications().applicationStore().pruneDevDiffs(new DeploymentId(run.id().application(), run.id().job().type().zone()), oldestRevision.number()); controller.applications().store(application.withRevisions(revisions -> revisions.withoutOlderThan(oldestRevision, run.id().job()))); } }); }); } /** Orders a run of the given type, or throws an IllegalStateException if that job type is already running. */ public void start(ApplicationId id, JobType type, Versions versions, boolean isRedeployment, Optional<String> reason) { start(id, type, versions, isRedeployment, JobProfile.of(type), reason); } /** Orders a run of the given type, or throws an IllegalStateException if that job type is already running. */ public void start(ApplicationId id, JobType type, Versions versions, boolean isRedeployment, JobProfile profile, Optional<String> reason) { ApplicationVersion revision = controller.applications().requireApplication(TenantAndApplicationId.from(id)).revisions().get(versions.targetRevision()); if (revision.compileVersion() .map(version -> controller.applications().versionCompatibility(id).refuse(versions.targetPlatform(), version)) .orElse(false)) throw new IllegalArgumentException("Will not start " + type + " for " + id + " with incompatible platform version (" + versions.targetPlatform() + ") " + "and compile versions (" + revision.compileVersion().get() + ")"); locked(id, type, __ -> { Optional<Run> last = last(id, type); if (last.flatMap(run -> active(run.id())).isPresent()) throw new IllegalArgumentException("Cannot start " + type + " for " + id + "; it is already running!"); RunId newId = new RunId(id, type, last.map(run -> run.id().number()).orElse(0L) + 1); curator.writeLastRun(Run.initial(newId, versions, isRedeployment, controller.clock().instant(), profile, reason)); metric.jobStarted(newId.job()); }); } /** Stores the given package and starts a deployment of it, after aborting any such ongoing deployment. */ public void deploy(ApplicationId id, JobType type, Optional<Version> platform, ApplicationPackage applicationPackage) { deploy(id, type, platform, applicationPackage, false); } /** Stores the given package and starts a deployment of it, after aborting any such ongoing deployment.*/ public void deploy(ApplicationId id, JobType type, Optional<Version> platform, ApplicationPackage applicationPackage, boolean dryRun) { if ( ! controller.zoneRegistry().hasZone(type.zone())) throw new IllegalArgumentException(type.zone() + " is not present in this system"); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { if ( ! application.get().instances().containsKey(id.instance())) application = controller.applications().withNewInstance(application, id); controller.applications().store(application); }); DeploymentId deploymentId = new DeploymentId(id, type.zone()); Optional<Run> lastRun = last(id, type); lastRun.filter(run -> ! run.hasEnded()).ifPresent(run -> abortAndWait(run.id(), Duration.ofMinutes(2))); long build = 1 + lastRun.map(run -> run.versions().targetRevision().number()).orElse(0L); RevisionId revisionId = RevisionId.forDevelopment(build, new JobId(id, type)); ApplicationVersion version = ApplicationVersion.forDevelopment(revisionId, applicationPackage.compileVersion()); byte[] diff = getDiff(applicationPackage, deploymentId, lastRun); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { controller.applications().applicationStore().putDev(deploymentId, version.id(), applicationPackage.zippedContent(), diff); Version targetPlatform = platform.orElseGet(() -> findTargetPlatform(applicationPackage, deploymentId, application.get().get(id.instance()))); controller.applications().store(application.withRevisions(revisions -> revisions.with(version))); start(id, type, new Versions(targetPlatform, version.id(), lastRun.map(run -> run.versions().targetPlatform()), lastRun.map(run -> run.versions().targetRevision())), false, dryRun ? JobProfile.developmentDryRun : JobProfile.development, Optional.empty()); }); locked(id, type, __ -> { runner.get().accept(last(id, type).get()); }); } /* Application package diff against previous version, or against empty version if previous does not exist or is invalid */ private byte[] getDiff(ApplicationPackage applicationPackage, DeploymentId deploymentId, Optional<Run> lastRun) { return lastRun.map(run -> run.versions().targetRevision()) .map(prevVersion -> { ApplicationPackage previous; try { previous = new ApplicationPackage(controller.applications().applicationStore().get(deploymentId, prevVersion)); } catch (IllegalArgumentException e) { return ApplicationPackageDiff.diffAgainstEmpty(applicationPackage); } return ApplicationPackageDiff.diff(previous, applicationPackage); }) .orElseGet(() -> ApplicationPackageDiff.diffAgainstEmpty(applicationPackage)); } private Version findTargetPlatform(ApplicationPackage applicationPackage, DeploymentId id, Optional<Instance> instance) { List<Version> versions = controller.readVersionStatus().deployableVersions().stream() .map(VespaVersion::versionNumber) .collect(toList()); instance.map(Instance::deployments) .map(deployments -> deployments.get(id.zoneId())) .map(Deployment::version) .ifPresent(versions::add); if (versions.isEmpty()) throw new IllegalStateException("no deployable platform version found in the system"); VersionCompatibility compatibility = controller.applications().versionCompatibility(id.applicationId()); List<Version> compatibleVersions = new ArrayList<>(); for (Version target : reversed(versions)) if (applicationPackage.compileVersion().isEmpty() || compatibility.accept(target, applicationPackage.compileVersion().get())) compatibleVersions.add(target); if (compatibleVersions.isEmpty()) throw new IllegalArgumentException("no platforms are compatible with compile version " + applicationPackage.compileVersion().get()); Optional<Integer> major = applicationPackage.deploymentSpec().majorVersion(); List<Version> versionOnRightMajor = new ArrayList<>(); for (Version target : reversed(versions)) if (major.isEmpty() || major.get() == target.getMajor()) versionOnRightMajor.add(target); if (versionOnRightMajor.isEmpty()) throw new IllegalArgumentException("no platforms were found for major version " + major.get() + " specified in deployment.xml"); for (Version target : compatibleVersions) if (versionOnRightMajor.contains(target)) return target; throw new IllegalArgumentException("no platforms on major version " + major.get() + " specified in deployment.xml " + "are compatible with compile version " + applicationPackage.compileVersion().get()); } /** Aborts a run and waits for it complete. */ private void abortAndWait(RunId id, Duration timeout) { abort(id, "replaced by new deployment"); runner.get().accept(last(id.application(), id.type()).get()); Instant doom = controller.clock().instant().plus(timeout); Duration sleep = Duration.ofMillis(100); while ( ! last(id.application(), id.type()).get().hasEnded()) { if (controller.clock().instant().plus(sleep).isAfter(doom)) throw new UncheckedTimeoutException("timeout waiting for " + id + " to abort and finish"); try { Thread.sleep(sleep.toMillis()); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new RuntimeException(e); } } } /** Deletes run data and tester deployments for applications which are unknown, or no longer built internally. */ public void collectGarbage() { Set<ApplicationId> applicationsToBuild = new HashSet<>(instances()); curator.applicationsWithJobs().stream() .filter(id -> ! applicationsToBuild.contains(id)) .forEach(id -> { try { TesterId tester = TesterId.of(id); for (JobType type : jobs(id)) locked(id, type, deactivateTester, __ -> { try (Mutex ___ = curator.lock(id, type)) { try { deactivateTester(tester, type); } catch (Exception e) { } curator.deleteRunData(id, type); } }); logs.delete(id); curator.deleteRunData(id); } catch (Exception e) { log.log(WARNING, "failed cleaning up after deleted application", e); } }); } public void deactivateTester(TesterId id, JobType type) { controller.serviceRegistry().configServer().deactivate(new DeploymentId(id.id(), type.zone())); } /** Locks all runs and modifies the list of historic runs for the given application and job type. */ private void locked(ApplicationId id, JobType type, Consumer<SortedMap<RunId, Run>> modifications) { try (Mutex __ = curator.lock(id, type)) { SortedMap<RunId, Run> runs = new TreeMap<>(curator.readHistoricRuns(id, type)); modifications.accept(runs); curator.writeHistoricRuns(id, type, runs.values()); } } /** Locks and modifies the run with the given id, provided it is still active. */ public void locked(RunId id, UnaryOperator<Run> modifications) { try (Mutex __ = curator.lock(id.application(), id.type())) { active(id).ifPresent(run -> { Run modified = modifications.apply(run); if (modified != null) curator.writeLastRun(modified); }); } } /** Locks the given step and checks none of its prerequisites are running, then performs the given actions. */ public void locked(ApplicationId id, JobType type, Step step, Consumer<LockedStep> action) throws TimeoutException { try (Mutex lock = curator.lock(id, type, step)) { for (Step prerequisite : step.allPrerequisites(last(id, type).get().steps().keySet())) try (Mutex __ = curator.lock(id, type, prerequisite)) { ; } action.accept(new LockedStep(lock, step)); } } }
Prefer Instant.now().isAfter(endTime) as above for consistency and readability.
public void waitForNodesInSlobrok(int distNodeCount, int storNodeCount, Duration timeout) throws InterruptedException { Instant endTime = Instant.now().plus(timeout); synchronized (monitor) { while (true) { int distCount = 0, storCount = 0; for (NodeInfo info : cluster.getNodeInfos()) { if (!info.isRpcAddressOutdated()) { if (info.isDistributor()) ++distCount; else ++storCount; } } if (distCount == distNodeCount && storCount == storNodeCount) return; Duration remainingTime = Duration.between(Instant.now(), endTime); if (remainingTime.isNegative()) { throw new IllegalStateException("Did not get all " + distNodeCount + " distributors and " + storNodeCount + " storage nodes registered in slobrok within timeout of " + timeout + ". (Got " + distCount + " distributors and " + storCount + " storage nodes)"); } monitor.wait(10); } } }
if (remainingTime.isNegative()) {
public void waitForNodesInSlobrok(int distNodeCount, int storNodeCount, Duration timeout) throws InterruptedException { Instant endTime = Instant.now().plus(timeout); synchronized (monitor) { while (true) { int distCount = 0, storCount = 0; for (NodeInfo info : cluster.getNodeInfos()) { if (!info.isRpcAddressOutdated()) { if (info.isDistributor()) ++distCount; else ++storCount; } } if (distCount == distNodeCount && storCount == storNodeCount) return; if (Instant.now().isAfter(endTime)) { throw new IllegalStateException("Did not get all " + distNodeCount + " distributors and " + storNodeCount + " storage nodes registered in slobrok within timeout of " + timeout + ". (Got " + distCount + " distributors and " + storCount + " storage nodes)"); } monitor.wait(10); } } }
class FleetController implements NodeListener, SlobrokListener, SystemStateListener, Runnable, RemoteClusterControllerTaskScheduler { private static final Logger logger = Logger.getLogger(FleetController.class.getName()); private final FleetControllerContext context; private final Timer timer; private final Object monitor; private final EventLog eventLog; private final NodeLookup nodeLookup; private final ContentCluster cluster; private final Communicator communicator; private final NodeStateGatherer stateGatherer; private final StateChangeHandler stateChangeHandler; private final SystemStateBroadcaster systemStateBroadcaster; private final StateVersionTracker stateVersionTracker; private final StatusPageServerInterface statusPageServer; private final RpcServer rpcServer; private final DatabaseHandler database; private final MasterElectionHandler masterElectionHandler; private Thread runner = null; private final AtomicBoolean running = new AtomicBoolean(true); private FleetControllerOptions options; private FleetControllerOptions nextOptions; private final List<SystemStateListener> systemStateListeners = new CopyOnWriteArrayList<>(); private boolean processingCycle = false; private boolean wantedStateChanged = false; private long cycleCount = 0; private long lastMetricUpdateCycleCount = 0; private long nextStateSendTime = 0; private Long controllerThreadId = null; private boolean waitingForCycle = false; private final StatusPageServer.PatternRequestRouter statusRequestRouter = new StatusPageServer.PatternRequestRouter(); private final List<ClusterStateBundle> newStates = new ArrayList<>(); private final List<ClusterStateBundle> convergedStates = new ArrayList<>(); private final Queue<RemoteClusterControllerTask> remoteTasks = new LinkedList<>(); private final MetricUpdater metricUpdater; private boolean isMaster = false; private boolean inMasterMoratorium = false; private boolean isStateGatherer = false; private long firstAllowedStateBroadcast = Long.MAX_VALUE; private long tickStartTime = Long.MAX_VALUE; private final List<RemoteClusterControllerTask> tasksPendingStateRecompute = new ArrayList<>(); private final Queue<VersionDependentTaskCompletion> taskCompletionQueue = new ArrayDeque<>(); private Set<String> configuredBucketSpaces = Collections.emptySet(); private final RunDataExtractor dataExtractor = new RunDataExtractor() { @Override public FleetControllerOptions getOptions() { return options; } @Override public ContentCluster getCluster() { return cluster; } }; public FleetController(FleetControllerContext context, Timer timer, EventLog eventLog, ContentCluster cluster, NodeStateGatherer nodeStateGatherer, Communicator communicator, StatusPageServerInterface statusPage, RpcServer server, NodeLookup nodeLookup, DatabaseHandler database, StateChangeHandler stateChangeHandler, SystemStateBroadcaster systemStateBroadcaster, MasterElectionHandler masterElectionHandler, MetricUpdater metricUpdater, FleetControllerOptions options) { context.log(logger, Level.INFO, "Created"); this.context = context; this.timer = timer; this.monitor = timer; this.eventLog = eventLog; this.options = options; this.nodeLookup = nodeLookup; this.cluster = cluster; this.communicator = communicator; this.database = database; this.stateGatherer = nodeStateGatherer; this.stateChangeHandler = stateChangeHandler; this.systemStateBroadcaster = systemStateBroadcaster; this.stateVersionTracker = new StateVersionTracker(options.minMergeCompletionRatio); this.metricUpdater = metricUpdater; this.statusPageServer = statusPage; this.rpcServer = server; this.masterElectionHandler = masterElectionHandler; this.statusRequestRouter.addHandler( "^/node=([a-z]+)\\.(\\d+)$", new LegacyNodePageRequestHandler(timer, eventLog, cluster)); this.statusRequestRouter.addHandler( "^/state.*", new NodeHealthRequestHandler()); this.statusRequestRouter.addHandler( "^/clusterstate", new ClusterStateRequestHandler(stateVersionTracker)); this.statusRequestRouter.addHandler( "^/$", new LegacyIndexPageRequestHandler(timer, cluster, masterElectionHandler, stateVersionTracker, eventLog, timer.getCurrentTimeInMillis(), dataExtractor)); propagateOptions(); } public static FleetController create(FleetControllerOptions options, StatusPageServerInterface statusPageServer, MetricReporter metricReporter) throws Exception { var context = new FleetControllerContextImpl(options); var timer = new RealTimer(); var metricUpdater = new MetricUpdater(metricReporter, options.fleetControllerIndex, options.clusterName); var log = new EventLog(timer, metricUpdater); var cluster = new ContentCluster( options.clusterName, options.nodes, options.storageDistribution); var stateGatherer = new NodeStateGatherer(timer, timer, log); var communicator = new RPCCommunicator( RPCCommunicator.createRealSupervisor(), timer, options.fleetControllerIndex, options.nodeStateRequestTimeoutMS, options.nodeStateRequestTimeoutEarliestPercentage, options.nodeStateRequestTimeoutLatestPercentage, options.nodeStateRequestRoundTripTimeMaxSeconds); var database = new DatabaseHandler(context, new ZooKeeperDatabaseFactory(context), timer, options.zooKeeperServerAddress, timer); var lookUp = new SlobrokClient(context, timer); var stateGenerator = new StateChangeHandler(context, timer, log); var stateBroadcaster = new SystemStateBroadcaster(context, timer, timer); var masterElectionHandler = new MasterElectionHandler(context, options.fleetControllerIndex, options.fleetControllerCount, timer, timer); var controller = new FleetController(context, timer, log, cluster, stateGatherer, communicator, statusPageServer, null, lookUp, database, stateGenerator, stateBroadcaster, masterElectionHandler, metricUpdater, options); controller.start(); return controller; } public void start() { runner = new Thread(this); runner.start(); } public Object getMonitor() { return monitor; } public boolean isRunning() { return running.get(); } public boolean isMaster() { synchronized (monitor) { return isMaster; } } public ClusterState getClusterState() { synchronized (monitor) { return systemStateBroadcaster.getClusterState(); } } public ClusterStateBundle getClusterStateBundle() { synchronized (monitor) { return systemStateBroadcaster.getClusterStateBundle(); } } public void schedule(RemoteClusterControllerTask task) { synchronized (monitor) { context.log(logger, Level.FINE, "Scheduled remote task " + task.getClass().getName() + " for execution"); remoteTasks.add(task); } } /** Used for unit testing. */ public void addSystemStateListener(SystemStateListener listener) { systemStateListeners.add(listener); com.yahoo.vdslib.state.ClusterState state = getSystemState(); if (state == null) { throw new NullPointerException("Cluster state should never be null at this point"); } listener.handleNewPublishedState(ClusterStateBundle.ofBaselineOnly(AnnotatedClusterState.withoutAnnotations(state))); ClusterStateBundle convergedState = systemStateBroadcaster.getLastClusterStateBundleConverged(); if (convergedState != null) { listener.handleStateConvergedInCluster(convergedState); } } public FleetControllerOptions getOptions() { synchronized(monitor) { return options.clone(); } } public NodeState getReportedNodeState(Node n) { synchronized(monitor) { NodeInfo node = cluster.getNodeInfo(n); if (node == null) { throw new IllegalStateException("Did not find node " + n + " in cluster " + cluster); } return node.getReportedState(); } } public NodeState getWantedNodeState(Node n) { synchronized(monitor) { return cluster.getNodeInfo(n).getWantedState(); } } public com.yahoo.vdslib.state.ClusterState getSystemState() { synchronized(monitor) { return stateVersionTracker.getVersionedClusterState(); } } public int getRpcPort() { return rpcServer.getPort(); } public void shutdown() throws InterruptedException, java.io.IOException { if (runner != null && isRunning()) { context.log(logger, Level.INFO, "Joining event thread."); running.set(false); synchronized(monitor) { monitor.notifyAll(); } runner.join(); } context.log(logger, Level.INFO, "FleetController done shutting down event thread."); controllerThreadId = Thread.currentThread().getId(); database.shutdown(databaseContext); if (statusPageServer != null) { statusPageServer.shutdown(); } if (rpcServer != null) { rpcServer.shutdown(); } communicator.shutdown(); nodeLookup.shutdown(); } public void updateOptions(FleetControllerOptions options) { var newId = FleetControllerId.fromOptions(options); synchronized(monitor) { assert newId.equals(context.id()); context.log(logger, Level.INFO, "FleetController has new options"); nextOptions = options.clone(); monitor.notifyAll(); } } private void verifyInControllerThread() { if (controllerThreadId != null && controllerThreadId != Thread.currentThread().getId()) { throw new IllegalStateException("Function called from non-controller thread. Shouldn't happen."); } } private ClusterState latestCandidateClusterState() { return stateVersionTracker.getLatestCandidateState().getClusterState(); } @Override public void handleNewNodeState(NodeInfo node, NodeState newState) { verifyInControllerThread(); stateChangeHandler.handleNewReportedNodeState(latestCandidateClusterState(), node, newState, this); } @Override public void handleNewWantedNodeState(NodeInfo node, NodeState newState) { verifyInControllerThread(); wantedStateChanged = true; stateChangeHandler.proposeNewNodeState(stateVersionTracker.getVersionedClusterState(), node, newState); } @Override public void handleRemovedNode(Node node) { verifyInControllerThread(); wantedStateChanged = true; } @Override public void handleUpdatedHostInfo(NodeInfo nodeInfo, HostInfo newHostInfo) { verifyInControllerThread(); triggerBundleRecomputationIfResourceExhaustionStateChanged(nodeInfo, newHostInfo); stateVersionTracker.handleUpdatedHostInfo(nodeInfo, newHostInfo); } private void triggerBundleRecomputationIfResourceExhaustionStateChanged(NodeInfo nodeInfo, HostInfo newHostInfo) { if (!options.clusterFeedBlockEnabled) { return; } var calc = createResourceExhaustionCalculator(); var previouslyExhausted = calc.enumerateNodeResourceExhaustions(nodeInfo); var nowExhausted = calc.resourceExhaustionsFromHostInfo(nodeInfo, newHostInfo); if (!previouslyExhausted.equals(nowExhausted)) { context.log(logger, Level.FINE, () -> String.format("Triggering state recomputation due to change in cluster feed block: %s -> %s", previouslyExhausted, nowExhausted)); stateChangeHandler.setStateChangedFlag(); } } @Override public void handleNewNode(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleNewNode(node); } @Override public void handleMissingNode(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleMissingNode(stateVersionTracker.getVersionedClusterState(), node, this); } @Override public void handleNewRpcAddress(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleNewRpcAddress(node); } @Override public void handleReturnedRpcAddress(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleReturnedRpcAddress(node); } @Override public void handleNewPublishedState(ClusterStateBundle stateBundle) { verifyInControllerThread(); ClusterState baselineState = stateBundle.getBaselineClusterState(); newStates.add(stateBundle); metricUpdater.updateClusterStateMetrics(cluster, baselineState, ResourceUsageStats.calculateFrom(cluster.getNodeInfos(), options.clusterFeedBlockLimit, stateBundle.getFeedBlock())); lastMetricUpdateCycleCount = cycleCount; systemStateBroadcaster.handleNewClusterStates(stateBundle); if (isMaster) { storeClusterStateMetaDataToZooKeeper(stateBundle); } } private boolean maybePublishOldMetrics() { verifyInControllerThread(); if (isMaster() && cycleCount > 300 + lastMetricUpdateCycleCount) { ClusterStateBundle stateBundle = stateVersionTracker.getVersionedClusterStateBundle(); ClusterState baselineState = stateBundle.getBaselineClusterState(); metricUpdater.updateClusterStateMetrics(cluster, baselineState, ResourceUsageStats.calculateFrom(cluster.getNodeInfos(), options.clusterFeedBlockLimit, stateBundle.getFeedBlock())); lastMetricUpdateCycleCount = cycleCount; return true; } else { return false; } } private void storeClusterStateMetaDataToZooKeeper(ClusterStateBundle stateBundle) { try { database.saveLatestSystemStateVersion(databaseContext, stateBundle.getVersion()); database.saveLatestClusterStateBundle(databaseContext, stateBundle); } catch (InterruptedException e) { throw new RuntimeException("ZooKeeper write interrupted", e); } } /** * This function gives data of the current state in master election. * The keys in the given map are indices of fleet controllers. * The values are what fleetcontroller that fleetcontroller wants to * become master. * * If more than half the fleetcontrollers want a node to be master and * that node also wants itself as master, that node is the single master. * If this condition is not met, there is currently no master. */ public void handleFleetData(Map<Integer, Integer> data) { verifyInControllerThread(); context.log(logger, Level.FINEST, "Sending fleet data event on to master election handler"); metricUpdater.updateMasterElectionMetrics(data); masterElectionHandler.handleFleetData(data); } /** * Called when we can no longer contact database. */ public void lostDatabaseConnection() { verifyInControllerThread(); boolean wasMaster = isMaster; masterElectionHandler.lostDatabaseConnection(); if (wasMaster) { dropLeadershipState(); metricUpdater.updateMasterState(false); } } private void failAllVersionDependentTasks() { tasksPendingStateRecompute.forEach(task -> { task.handleFailure(RemoteClusterControllerTask.Failure.of( RemoteClusterControllerTask.FailureCondition.LEADERSHIP_LOST)); task.notifyCompleted(); }); tasksPendingStateRecompute.clear(); taskCompletionQueue.forEach(task -> { task.getTask().handleFailure(RemoteClusterControllerTask.Failure.of( RemoteClusterControllerTask.FailureCondition.LEADERSHIP_LOST)); task.getTask().notifyCompleted(); }); taskCompletionQueue.clear(); } /** Called when all distributors have acked newest cluster state version. */ public void handleAllDistributorsInSync(DatabaseHandler database, DatabaseHandler.DatabaseContext dbContext) throws InterruptedException { Set<ConfiguredNode> nodes = new HashSet<>(cluster.clusterInfo().getConfiguredNodes().values()); var currentBundle = stateVersionTracker.getVersionedClusterStateBundle(); context.log(logger, Level.FINE, () -> String.format("All distributors have ACKed cluster state version %d", currentBundle.getVersion())); stateChangeHandler.handleAllDistributorsInSync(currentBundle.getBaselineClusterState(), nodes, database, dbContext); convergedStates.add(currentBundle); } private boolean changesConfiguredNodeSet(Collection<ConfiguredNode> newNodes) { if (newNodes.size() != cluster.getConfiguredNodes().size()) return true; if (! cluster.getConfiguredNodes().values().containsAll(newNodes)) return true; for (ConfiguredNode node : newNodes) { if (node.retired() != cluster.getConfiguredNodes().get(node.index()).retired()) { return true; } } return false; } /** This is called when the options field has been set to a new set of options */ private void propagateOptions() { verifyInControllerThread(); selfTerminateIfConfiguredNodeIndexHasChanged(); if (changesConfiguredNodeSet(options.nodes)) { cluster.setSlobrokGenerationCount(0); } configuredBucketSpaces = Set.of(FixedBucketSpaces.defaultSpace(), FixedBucketSpaces.globalSpace()); stateVersionTracker.setMinMergeCompletionRatio(options.minMergeCompletionRatio); communicator.propagateOptions(options); if (nodeLookup instanceof SlobrokClient) { ((SlobrokClient) nodeLookup).setSlobrokConnectionSpecs(options.slobrokConnectionSpecs); } eventLog.setMaxSize(options.eventLogMaxSize, options.eventNodeLogMaxSize); cluster.setPollingFrequency(options.statePollingFrequency); cluster.setDistribution(options.storageDistribution); cluster.setNodes(options.nodes, databaseContext.getNodeStateUpdateListener()); database.setZooKeeperAddress(options.zooKeeperServerAddress, databaseContext); database.setZooKeeperSessionTimeout(options.zooKeeperSessionTimeout, databaseContext); stateGatherer.setMaxSlobrokDisconnectGracePeriod(options.maxSlobrokDisconnectGracePeriod); stateGatherer.setNodeStateRequestTimeout(options.nodeStateRequestTimeoutMS); stateChangeHandler.reconfigureFromOptions(options); stateChangeHandler.setStateChangedFlag(); masterElectionHandler.setFleetControllerCount(options.fleetControllerCount); masterElectionHandler.setMasterZooKeeperCooldownPeriod(options.masterZooKeeperCooldownPeriod); masterElectionHandler.setUsingZooKeeper(options.zooKeeperServerAddress != null && !options.zooKeeperServerAddress.isEmpty()); if (rpcServer != null) { rpcServer.setMasterElectionHandler(masterElectionHandler); try{ rpcServer.setSlobrokConnectionSpecs(options.slobrokConnectionSpecs, options.rpcPort); } catch (ListenFailedException e) { context.log(logger, Level.WARNING, "Failed to bind RPC server to port " + options.rpcPort + ". This may be natural if cluster has altered the services running on this node: " + e.getMessage()); } catch (Exception e) { context.log(logger, Level.WARNING, "Failed to initialize RPC server socket: " + e.getMessage()); } } if (statusPageServer != null) { try{ statusPageServer.setPort(options.httpPort); } catch (Exception e) { context.log(logger, Level.WARNING, "Failed to initialize status server socket. This may be natural if cluster has altered the services running on this node: " + e.getMessage()); } } long currentTime = timer.getCurrentTimeInMillis(); nextStateSendTime = Math.min(currentTime + options.minTimeBetweenNewSystemStates, nextStateSendTime); } private void selfTerminateIfConfiguredNodeIndexHasChanged() { var newId = new FleetControllerId(options.clusterName, options.fleetControllerIndex); if (!newId.equals(context.id())) { context.log(logger, Level.WARNING, context.id() + " got new configuration for " + newId + ". We do not support doing this live; " + "immediately exiting now to force new configuration"); prepareShutdownEdge(); System.exit(1); } } public StatusPageResponse fetchStatusPage(StatusPageServer.HttpRequest httpRequest) { verifyInControllerThread(); StatusPageResponse.ResponseCode responseCode; String message; final String hiddenMessage; try { StatusPageServer.RequestHandler handler = statusRequestRouter.resolveHandler(httpRequest); if (handler == null) { throw new FileNotFoundException("No handler found for request: " + httpRequest.getPath()); } return handler.handle(httpRequest); } catch (FileNotFoundException e) { responseCode = StatusPageResponse.ResponseCode.NOT_FOUND; message = e.getMessage(); hiddenMessage = ""; } catch (Exception e) { responseCode = StatusPageResponse.ResponseCode.INTERNAL_SERVER_ERROR; message = "Internal Server Error"; hiddenMessage = ExceptionUtils.getStackTraceAsString(e); context.log(logger, Level.FINE, () -> "Unknown exception thrown for request " + httpRequest.getRequest() + ": " + hiddenMessage); } TimeZone tz = TimeZone.getTimeZone("UTC"); long currentTime = timer.getCurrentTimeInMillis(); StatusPageResponse response = new StatusPageResponse(); StringBuilder content = new StringBuilder(); response.setContentType("text/html"); response.setResponseCode(responseCode); content.append("<!-- Answer to request " + httpRequest.getRequest() + " -->\n"); content.append("<p>UTC time when creating this page: ").append(RealTimer.printDateNoMilliSeconds(currentTime, tz)).append("</p>"); response.writeHtmlHeader(content, message); response.writeHtmlFooter(content, hiddenMessage); response.writeContent(content.toString()); return response; } public void tick() throws Exception { synchronized (monitor) { boolean didWork; didWork = metricUpdater.forWork("doNextZooKeeperTask", () -> database.doNextZooKeeperTask(databaseContext)); didWork |= metricUpdater.forWork("updateMasterElectionState", this::updateMasterElectionState); didWork |= metricUpdater.forWork("handleLeadershipEdgeTransitions", this::handleLeadershipEdgeTransitions); stateChangeHandler.setMaster(isMaster); if ( ! isRunning()) { return; } didWork |= metricUpdater.forWork("stateGatherer-processResponses", () -> stateGatherer.processResponses(this)); if ( ! isRunning()) { return; } if (masterElectionHandler.isAmongNthFirst(options.stateGatherCount)) { didWork |= resyncLocallyCachedState(); } else { stepDownAsStateGatherer(); } if ( ! isRunning()) { return; } didWork |= metricUpdater.forWork("systemStateBroadcaster-processResponses", systemStateBroadcaster::processResponses); if ( ! isRunning()) { return; } if (isMaster) { didWork |= metricUpdater.forWork("broadcastClusterStateToEligibleNodes", this::broadcastClusterStateToEligibleNodes); systemStateBroadcaster.checkIfClusterStateIsAckedByAllDistributors(database, databaseContext, this); } if ( ! isRunning()) { return; } didWork |= metricUpdater.forWork("processAnyPendingStatusPageRequest", this::processAnyPendingStatusPageRequest); if ( ! isRunning()) { return; } if (rpcServer != null) { didWork |= metricUpdater.forWork("handleRpcRequests", () -> rpcServer.handleRpcRequests(cluster, consolidatedClusterState(), this)); } if ( ! isRunning()) { return; } didWork |= metricUpdater.forWork("processNextQueuedRemoteTask", this::processNextQueuedRemoteTask); didWork |= metricUpdater.forWork("completeSatisfiedVersionDependentTasks", this::completeSatisfiedVersionDependentTasks); didWork |= metricUpdater.forWork("maybePublishOldMetrics", this::maybePublishOldMetrics); processingCycle = false; ++cycleCount; long tickStopTime = timer.getCurrentTimeInMillis(); if (tickStopTime >= tickStartTime) { metricUpdater.addTickTime(tickStopTime - tickStartTime, didWork); } monitor.wait(didWork || waitingForCycle ? 1 : options.cycleWaitTime); if ( ! isRunning()) { return; } tickStartTime = timer.getCurrentTimeInMillis(); processingCycle = true; if (nextOptions != null) { switchToNewConfig(); } } if (isRunning()) { propagateNewStatesToListeners(); } } private boolean updateMasterElectionState() { try { return masterElectionHandler.watchMasterElection(database, databaseContext); } catch (InterruptedException e) { throw new RuntimeException(e); } catch (Exception e) { context.log(logger, Level.WARNING, "Failed to watch master election: " + e); } return false; } private void stepDownAsStateGatherer() { if (isStateGatherer) { cluster.clearStates(); eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer a node state gatherer.", timer.getCurrentTimeInMillis())); } isStateGatherer = false; } private void switchToNewConfig() { options = nextOptions; nextOptions = null; try { propagateOptions(); } catch (Exception e) { context.log(logger, Level.SEVERE, "Failed to handle new fleet controller config", e); } } private boolean processAnyPendingStatusPageRequest() { if (statusPageServer != null) { StatusPageServer.HttpRequest statusRequest = statusPageServer.getCurrentHttpRequest(); if (statusRequest != null) { statusPageServer.answerCurrentStatusRequest(fetchStatusPage(statusRequest)); return true; } } return false; } private boolean broadcastClusterStateToEligibleNodes() { if (database.hasPendingClusterStateMetaDataStore()) { context.log(logger, Level.FINE, "Can't publish current cluster state as it has one or more pending ZooKeeper stores"); return false; } boolean sentAny = false; long currentTime = timer.getCurrentTimeInMillis(); if ((currentTime >= firstAllowedStateBroadcast || cluster.allStatesReported()) && currentTime >= nextStateSendTime) { if (inMasterMoratorium) { context.log(logger, Level.INFO, currentTime < firstAllowedStateBroadcast ? "Master moratorium complete: all nodes have reported in" : "Master moratorium complete: timed out waiting for all nodes to report in"); firstAllowedStateBroadcast = currentTime; inMasterMoratorium = false; } sentAny = systemStateBroadcaster.broadcastNewStateBundleIfRequired( databaseContext, communicator, database.getLastKnownStateBundleVersionWrittenBySelf()); if (sentAny) { nextStateSendTime = currentTime + options.minTimeBetweenNewSystemStates; } } sentAny |= systemStateBroadcaster.broadcastStateActivationsIfRequired(databaseContext, communicator); return sentAny; } private void propagateNewStatesToListeners() { if ( ! newStates.isEmpty()) { synchronized (systemStateListeners) { for (ClusterStateBundle stateBundle : newStates) { for (SystemStateListener listener : systemStateListeners) { listener.handleNewPublishedState(stateBundle); } } newStates.clear(); } } if ( ! convergedStates.isEmpty()) { synchronized (systemStateListeners) { for (ClusterStateBundle stateBundle : convergedStates) { for (SystemStateListener listener : systemStateListeners) { listener.handleStateConvergedInCluster(stateBundle); } } convergedStates.clear(); } } } private boolean processNextQueuedRemoteTask() { metricUpdater.updateRemoteTaskQueueSize(remoteTasks.size()); RemoteClusterControllerTask task = remoteTasks.poll(); if (task == null) { return false; } final RemoteClusterControllerTask.Context taskContext = createRemoteTaskProcessingContext(); context.log(logger, Level.FINEST, () -> String.format("Processing remote task of type '%s'", task.getClass().getName())); task.doRemoteFleetControllerTask(taskContext); if (taskMayBeCompletedImmediately(task)) { context.log(logger, Level.FINEST, () -> String.format("Done processing remote task of type '%s'", task.getClass().getName())); task.notifyCompleted(); } else { context.log(logger, Level.FINEST, () -> String.format("Remote task of type '%s' queued until state recomputation", task.getClass().getName())); tasksPendingStateRecompute.add(task); } return true; } private boolean taskMayBeCompletedImmediately(RemoteClusterControllerTask task) { return (!task.hasVersionAckDependency() || task.isFailed() || !isMaster); } private RemoteClusterControllerTask.Context createRemoteTaskProcessingContext() { final RemoteClusterControllerTask.Context context = new RemoteClusterControllerTask.Context(); context.cluster = cluster; context.currentConsolidatedState = consolidatedClusterState(); context.publishedClusterStateBundle = stateVersionTracker.getVersionedClusterStateBundle(); context.masterInfo = new MasterInterface() { @Override public boolean isMaster() { return isMaster; } @Override public Integer getMaster() { return masterElectionHandler.getMaster(); } @Override public boolean inMasterMoratorium() { return inMasterMoratorium; } }; context.nodeListener = this; context.slobrokListener = this; return context; } private static long effectiveActivatedStateVersion(NodeInfo nodeInfo, ClusterStateBundle bundle) { return bundle.deferredActivation() ? nodeInfo.getClusterStateVersionActivationAcked() : nodeInfo.getClusterStateVersionBundleAcknowledged(); } private List<Node> enumerateNodesNotYetAckedAtLeastVersion(long version) { var bundle = systemStateBroadcaster.getClusterStateBundle(); if (bundle == null) { return List.of(); } return cluster.getNodeInfos().stream(). filter(n -> effectiveActivatedStateVersion(n, bundle) < version). map(NodeInfo::getNode). collect(Collectors.toList()); } private static <E> String stringifyListWithLimits(List<E> list, int limit) { if (list.size() > limit) { var sub = list.subList(0, limit); return String.format("%s (... and %d more)", sub.stream().map(E::toString).collect(Collectors.joining(", ")), list.size() - limit); } else { return list.stream().map(E::toString).collect(Collectors.joining(", ")); } } private String buildNodesNotYetConvergedMessage(long taskConvergeVersion) { var nodes = enumerateNodesNotYetAckedAtLeastVersion(taskConvergeVersion); if (nodes.isEmpty()) { return ""; } return String.format("the following nodes have not converged to at least version %d: %s", taskConvergeVersion, stringifyListWithLimits(nodes, options.maxDivergentNodesPrintedInTaskErrorMessages)); } private boolean completeSatisfiedVersionDependentTasks() { int publishedVersion = systemStateBroadcaster.lastClusterStateVersionInSync(); long queueSizeBefore = taskCompletionQueue.size(); final long now = timer.getCurrentTimeInMillis(); while (!taskCompletionQueue.isEmpty()) { VersionDependentTaskCompletion taskCompletion = taskCompletionQueue.peek(); if (publishedVersion >= taskCompletion.getMinimumVersion()) { context.log(logger, Level.FINE, () -> String.format("Deferred task of type '%s' has minimum version %d, published is %d; completing", taskCompletion.getTask().getClass().getName(), taskCompletion.getMinimumVersion(), publishedVersion)); taskCompletion.getTask().notifyCompleted(); taskCompletionQueue.remove(); } else if (taskCompletion.getDeadlineTimePointMs() <= now) { var details = buildNodesNotYetConvergedMessage(taskCompletion.getMinimumVersion()); context.log(logger, Level.WARNING, () -> String.format("Deferred task of type '%s' has exceeded wait deadline; completing with failure (details: %s)", taskCompletion.getTask().getClass().getName(), details)); taskCompletion.getTask().handleFailure(RemoteClusterControllerTask.Failure.of( RemoteClusterControllerTask.FailureCondition.DEADLINE_EXCEEDED, details)); taskCompletion.getTask().notifyCompleted(); taskCompletionQueue.remove(); } else { break; } } return (taskCompletionQueue.size() != queueSizeBefore); } /** * A "consolidated" cluster state is guaranteed to have up-to-date information on which nodes are * up or down even when the whole cluster is down. The regular, published cluster state is not * normally updated to reflect node events when the cluster is down. */ ClusterState consolidatedClusterState() { final ClusterState publishedState = stateVersionTracker.getVersionedClusterState(); if (publishedState.getClusterState() == State.UP) { return publishedState; } final ClusterState current = stateVersionTracker.getLatestCandidateState().getClusterState().clone(); current.setVersion(publishedState.getVersion()); return current; } /* System test observations: - a node that stops normally (U -> S) then goes down erroneously triggers premature crash handling - long time before content node state convergence (though this seems to be the case for legacy impl as well) */ private boolean resyncLocallyCachedState() { boolean didWork = false; if ( ! isMaster && cycleCount % 100 == 0) { didWork = metricUpdater.forWork("loadWantedStates", () -> database.loadWantedStates(databaseContext)); didWork |= metricUpdater.forWork("loadStartTimestamps", () -> database.loadStartTimestamps(cluster)); } didWork |= metricUpdater.forWork("updateCluster", () -> nodeLookup.updateCluster(cluster, this)); didWork |= metricUpdater.forWork("sendMessages", () -> stateGatherer.sendMessages(cluster, communicator, this)); didWork |= metricUpdater.forWork( "watchTimers", () -> stateChangeHandler.watchTimers(cluster, stateVersionTracker.getLatestCandidateState().getClusterState(), this)); didWork |= metricUpdater.forWork("recomputeClusterStateIfRequired", this::recomputeClusterStateIfRequired); if ( ! isStateGatherer) { if ( ! isMaster) { eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became node state gatherer as we are fleetcontroller master candidate.", timer.getCurrentTimeInMillis())); stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion()); stateChangeHandler.setStateChangedFlag(); } } isStateGatherer = true; return didWork; } private void invokeCandidateStateListeners(ClusterStateBundle candidateBundle) { systemStateListeners.forEach(listener -> listener.handleNewCandidateState(candidateBundle)); } private boolean hasPassedFirstStateBroadcastTimePoint(long timeNowMs) { return timeNowMs >= firstAllowedStateBroadcast || cluster.allStatesReported(); } private boolean recomputeClusterStateIfRequired() { boolean stateWasChanged = false; if (mustRecomputeCandidateClusterState()) { stateChangeHandler.unsetStateChangedFlag(); final AnnotatedClusterState candidate = computeCurrentAnnotatedState(); final ClusterStateBundle candidateBundle = ClusterStateBundle.builder(candidate) .bucketSpaces(configuredBucketSpaces) .stateDeriver(createBucketSpaceStateDeriver()) .deferredActivation(options.enableTwoPhaseClusterStateActivation) .feedBlock(createResourceExhaustionCalculator() .inferContentClusterFeedBlockOrNull(cluster.getNodeInfos())) .deriveAndBuild(); stateVersionTracker.updateLatestCandidateStateBundle(candidateBundle); invokeCandidateStateListeners(candidateBundle); final long timeNowMs = timer.getCurrentTimeInMillis(); if (hasPassedFirstStateBroadcastTimePoint(timeNowMs) && (stateVersionTracker.candidateChangedEnoughFromCurrentToWarrantPublish() || stateVersionTracker.hasReceivedNewVersionFromZooKeeper())) { final ClusterStateBundle before = stateVersionTracker.getVersionedClusterStateBundle(); stateVersionTracker.promoteCandidateToVersionedState(timeNowMs); emitEventsForAlteredStateEdges(before, stateVersionTracker.getVersionedClusterStateBundle(), timeNowMs); handleNewPublishedState(stateVersionTracker.getVersionedClusterStateBundle()); stateWasChanged = true; } } /* * This works transparently for tasks that end up changing the current cluster state (i.e. * requiring a new state to be published) and for those whose changes are no-ops (because * the changes they request are already part of the current state). In the former case the * tasks will depend on the version that was generated based upon them. In the latter case * the tasks will depend on the version that is already published (or in the process of * being published). */ scheduleVersionDependentTasksForFutureCompletion(stateVersionTracker.getCurrentVersion()); return stateWasChanged; } private ClusterStateDeriver createBucketSpaceStateDeriver() { if (options.clusterHasGlobalDocumentTypes) { return new MaintenanceWhenPendingGlobalMerges(stateVersionTracker.createMergePendingChecker(), createDefaultSpaceMaintenanceTransitionConstraint()); } else { return createIdentityClonedBucketSpaceStateDeriver(); } } private ResourceExhaustionCalculator createResourceExhaustionCalculator() { return new ResourceExhaustionCalculator( options.clusterFeedBlockEnabled, options.clusterFeedBlockLimit, stateVersionTracker.getLatestCandidateStateBundle().getFeedBlockOrNull(), options.clusterFeedBlockNoiseLevel); } private static ClusterStateDeriver createIdentityClonedBucketSpaceStateDeriver() { return (state, space) -> state.clone(); } private MaintenanceTransitionConstraint createDefaultSpaceMaintenanceTransitionConstraint() { AnnotatedClusterState currentDefaultSpaceState = stateVersionTracker.getVersionedClusterStateBundle() .getDerivedBucketSpaceStates().getOrDefault(FixedBucketSpaces.defaultSpace(), AnnotatedClusterState.emptyState()); return UpEdgeMaintenanceTransitionConstraint.forPreviouslyPublishedState(currentDefaultSpaceState.getClusterState()); } /** * Move tasks that are dependent on the most recently generated state being published into * a completion queue with a dependency on the provided version argument. Once that version * has been ACKed by all distributors in the system, those tasks will be marked as completed. */ private void scheduleVersionDependentTasksForFutureCompletion(int completeAtVersion) { final long maxDeadlineTimePointMs = timer.getCurrentTimeInMillis() + options.getMaxDeferredTaskVersionWaitTime().toMillis(); for (RemoteClusterControllerTask task : tasksPendingStateRecompute) { context.log(logger, Level.INFO, task + " will be completed at version " + completeAtVersion); taskCompletionQueue.add(new VersionDependentTaskCompletion(completeAtVersion, task, maxDeadlineTimePointMs)); } tasksPendingStateRecompute.clear(); } private AnnotatedClusterState computeCurrentAnnotatedState() { ClusterStateGenerator.Params params = ClusterStateGenerator.Params.fromOptions(options); params.currentTimeInMilllis(timer.getCurrentTimeInMillis()) .cluster(cluster) .lowestObservedDistributionBitCount(stateVersionTracker.getLowestObservedDistributionBits()); return ClusterStateGenerator.generatedStateFrom(params); } private void emitEventsForAlteredStateEdges(final ClusterStateBundle fromState, final ClusterStateBundle toState, final long timeNowMs) { final List<Event> deltaEvents = EventDiffCalculator.computeEventDiff( EventDiffCalculator.params() .cluster(cluster) .fromState(fromState) .toState(toState) .currentTimeMs(timeNowMs) .maxMaintenanceGracePeriodTimeMs(options.storageNodeMaxTransitionTimeMs())); for (Event event : deltaEvents) { eventLog.add(event, isMaster); } emitStateAppliedEvents(timeNowMs, fromState.getBaselineClusterState(), toState.getBaselineClusterState()); } private void emitStateAppliedEvents(long timeNowMs, ClusterState fromClusterState, ClusterState toClusterState) { eventLog.add(new ClusterEvent( ClusterEvent.Type.SYSTEMSTATE, "New cluster state version " + toClusterState.getVersion() + ". Change from last: " + fromClusterState.getTextualDifference(toClusterState), timeNowMs), isMaster); if (toClusterState.getDistributionBitCount() != fromClusterState.getDistributionBitCount()) { eventLog.add(new ClusterEvent( ClusterEvent.Type.SYSTEMSTATE, "Altering distribution bits in system from " + fromClusterState.getDistributionBitCount() + " to " + toClusterState.getDistributionBitCount(), timeNowMs), isMaster); } } private boolean atFirstClusterStateSendTimeEdge() { if (!isMaster || systemStateBroadcaster.hasBroadcastedClusterStateBundle()) { return false; } return hasPassedFirstStateBroadcastTimePoint(timer.getCurrentTimeInMillis()); } private boolean mustRecomputeCandidateClusterState() { return stateChangeHandler.stateMayHaveChanged() || stateVersionTracker.bucketSpaceMergeCompletionStateHasChanged() || atFirstClusterStateSendTimeEdge(); } private boolean handleLeadershipEdgeTransitions() { boolean didWork = false; if (masterElectionHandler.isMaster()) { if ( ! isMaster) { stateChangeHandler.setStateChangedFlag(); systemStateBroadcaster.resetBroadcastedClusterStateBundle(); stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion()); ClusterStateBundle previousBundle = database.getLatestClusterStateBundle(); database.loadStartTimestamps(cluster); database.loadWantedStates(databaseContext); context.log(logger, Level.INFO, () -> String.format("Loaded previous cluster state bundle from ZooKeeper: %s", previousBundle)); stateVersionTracker.setClusterStateBundleRetrievedFromZooKeeper(previousBundle); eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became fleetcontroller master. Bumped version to " + stateVersionTracker.getCurrentVersion() + " to be in line.", timer.getCurrentTimeInMillis())); long currentTime = timer.getCurrentTimeInMillis(); firstAllowedStateBroadcast = currentTime + options.minTimeBeforeFirstSystemStateBroadcast; isMaster = true; inMasterMoratorium = true; context.log(logger, Level.FINE, () -> "At time " + currentTime + " we set first system state broadcast time to be " + options.minTimeBeforeFirstSystemStateBroadcast + " ms after at time " + firstAllowedStateBroadcast + "."); didWork = true; } if (wantedStateChanged) { didWork |= database.saveWantedStates(databaseContext); wantedStateChanged = false; } } else { dropLeadershipState(); } metricUpdater.updateMasterState(isMaster); return didWork; } private void dropLeadershipState() { if (isMaster) { eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer fleetcontroller master.", timer.getCurrentTimeInMillis())); firstAllowedStateBroadcast = Long.MAX_VALUE; failAllVersionDependentTasks(); } wantedStateChanged = false; isMaster = false; inMasterMoratorium = false; } @Override public void run() { controllerThreadId = Thread.currentThread().getId(); context.log(logger, Level.INFO, "Starting tick loop"); try { processingCycle = true; while (isRunning()) { tick(); } context.log(logger, Level.INFO, "Tick loop stopped"); } catch (InterruptedException e) { context.log(logger, Level.INFO, "Event thread stopped by interrupt exception: ", e); } catch (Throwable t) { t.printStackTrace(); context.log(logger, Level.SEVERE, "Fatal error killed fleet controller", t); synchronized (monitor) { running.set(false); } System.exit(1); } finally { prepareShutdownEdge(); } } private void prepareShutdownEdge() { running.set(false); failAllVersionDependentTasks(); synchronized (monitor) { monitor.notifyAll(); } } public DatabaseHandler.DatabaseContext databaseContext = new DatabaseHandler.DatabaseContext() { @Override public ContentCluster getCluster() { return cluster; } @Override public FleetController getFleetController() { return FleetController.this; } @Override public SlobrokListener getNodeAddedOrRemovedListener() { return FleetController.this; } @Override public NodeListener getNodeStateUpdateListener() { return FleetController.this; } }; public void waitForCompleteCycle(Duration timeout) { Instant endTime = Instant.now().plus(timeout); synchronized (monitor) { long wantedCycle = cycleCount + (processingCycle ? 2 : 1); waitingForCycle = true; try{ while (cycleCount < wantedCycle) { if (Instant.now().isAfter(endTime)) throw new IllegalStateException("Timed out waiting for cycle to complete. Not completed after " + timeout); if ( !isRunning() ) throw new IllegalStateException("Fleetcontroller not running. Will never complete cycles"); try{ monitor.wait(100); } catch (InterruptedException e) {} } } finally { waitingForCycle = false; } } } /** * This function might not be 100% threadsafe, as in theory cluster can be changing while accessed. * But it is only used in unit tests that should not trigger any thread issues. Don't want to add locks that reduce * live performance to remove a non-problem. */ public void waitForNodesHavingSystemStateVersionEqualToOrAbove(int version, int nodeCount, Duration timeout) throws InterruptedException { Instant endTime = Instant.now().plus(timeout); synchronized (monitor) { while (true) { int ackedNodes = 0; for (NodeInfo node : cluster.getNodeInfos()) { if (node.getClusterStateVersionBundleAcknowledged() >= version) { ++ackedNodes; } } if (ackedNodes >= nodeCount) { context.log(logger, Level.INFO, ackedNodes + " nodes now have acked system state " + version + " or higher."); return; } Duration remainingTime = Duration.between(Instant.now(), endTime); if (remainingTime.isNegative()) { throw new IllegalStateException("Did not get " + nodeCount + " nodes to system state " + version + " within timeout of " + timeout); } monitor.wait(10); } } } public boolean hasZookeeperConnection() { return !database.isClosed(); } public int getSlobrokMirrorUpdates() { return ((SlobrokClient)nodeLookup).getMirror().updates(); } public ContentCluster getCluster() { return cluster; } public List<NodeEvent> getNodeEvents(Node n) { return eventLog.getNodeEvents(n); } public EventLog getEventLog() { return eventLog; } }
class FleetController implements NodeListener, SlobrokListener, SystemStateListener, Runnable, RemoteClusterControllerTaskScheduler { private static final Logger logger = Logger.getLogger(FleetController.class.getName()); private final FleetControllerContext context; private final Timer timer; private final Object monitor; private final EventLog eventLog; private final NodeLookup nodeLookup; private final ContentCluster cluster; private final Communicator communicator; private final NodeStateGatherer stateGatherer; private final StateChangeHandler stateChangeHandler; private final SystemStateBroadcaster systemStateBroadcaster; private final StateVersionTracker stateVersionTracker; private final StatusPageServerInterface statusPageServer; private final RpcServer rpcServer; private final DatabaseHandler database; private final MasterElectionHandler masterElectionHandler; private Thread runner = null; private final AtomicBoolean running = new AtomicBoolean(true); private FleetControllerOptions options; private FleetControllerOptions nextOptions; private final List<SystemStateListener> systemStateListeners = new CopyOnWriteArrayList<>(); private boolean processingCycle = false; private boolean wantedStateChanged = false; private long cycleCount = 0; private long lastMetricUpdateCycleCount = 0; private long nextStateSendTime = 0; private Long controllerThreadId = null; private boolean waitingForCycle = false; private final StatusPageServer.PatternRequestRouter statusRequestRouter = new StatusPageServer.PatternRequestRouter(); private final List<ClusterStateBundle> newStates = new ArrayList<>(); private final List<ClusterStateBundle> convergedStates = new ArrayList<>(); private final Queue<RemoteClusterControllerTask> remoteTasks = new LinkedList<>(); private final MetricUpdater metricUpdater; private boolean isMaster = false; private boolean inMasterMoratorium = false; private boolean isStateGatherer = false; private long firstAllowedStateBroadcast = Long.MAX_VALUE; private long tickStartTime = Long.MAX_VALUE; private final List<RemoteClusterControllerTask> tasksPendingStateRecompute = new ArrayList<>(); private final Queue<VersionDependentTaskCompletion> taskCompletionQueue = new ArrayDeque<>(); private Set<String> configuredBucketSpaces = Collections.emptySet(); private final RunDataExtractor dataExtractor = new RunDataExtractor() { @Override public FleetControllerOptions getOptions() { return options; } @Override public ContentCluster getCluster() { return cluster; } }; public FleetController(FleetControllerContext context, Timer timer, EventLog eventLog, ContentCluster cluster, NodeStateGatherer nodeStateGatherer, Communicator communicator, StatusPageServerInterface statusPage, RpcServer server, NodeLookup nodeLookup, DatabaseHandler database, StateChangeHandler stateChangeHandler, SystemStateBroadcaster systemStateBroadcaster, MasterElectionHandler masterElectionHandler, MetricUpdater metricUpdater, FleetControllerOptions options) { context.log(logger, Level.INFO, "Created"); this.context = context; this.timer = timer; this.monitor = timer; this.eventLog = eventLog; this.options = options; this.nodeLookup = nodeLookup; this.cluster = cluster; this.communicator = communicator; this.database = database; this.stateGatherer = nodeStateGatherer; this.stateChangeHandler = stateChangeHandler; this.systemStateBroadcaster = systemStateBroadcaster; this.stateVersionTracker = new StateVersionTracker(options.minMergeCompletionRatio); this.metricUpdater = metricUpdater; this.statusPageServer = statusPage; this.rpcServer = server; this.masterElectionHandler = masterElectionHandler; this.statusRequestRouter.addHandler( "^/node=([a-z]+)\\.(\\d+)$", new LegacyNodePageRequestHandler(timer, eventLog, cluster)); this.statusRequestRouter.addHandler( "^/state.*", new NodeHealthRequestHandler()); this.statusRequestRouter.addHandler( "^/clusterstate", new ClusterStateRequestHandler(stateVersionTracker)); this.statusRequestRouter.addHandler( "^/$", new LegacyIndexPageRequestHandler(timer, cluster, masterElectionHandler, stateVersionTracker, eventLog, timer.getCurrentTimeInMillis(), dataExtractor)); propagateOptions(); } public static FleetController create(FleetControllerOptions options, StatusPageServerInterface statusPageServer, MetricReporter metricReporter) throws Exception { var context = new FleetControllerContextImpl(options); var timer = new RealTimer(); var metricUpdater = new MetricUpdater(metricReporter, options.fleetControllerIndex, options.clusterName); var log = new EventLog(timer, metricUpdater); var cluster = new ContentCluster( options.clusterName, options.nodes, options.storageDistribution); var stateGatherer = new NodeStateGatherer(timer, timer, log); var communicator = new RPCCommunicator( RPCCommunicator.createRealSupervisor(), timer, options.fleetControllerIndex, options.nodeStateRequestTimeoutMS, options.nodeStateRequestTimeoutEarliestPercentage, options.nodeStateRequestTimeoutLatestPercentage, options.nodeStateRequestRoundTripTimeMaxSeconds); var database = new DatabaseHandler(context, new ZooKeeperDatabaseFactory(context), timer, options.zooKeeperServerAddress, timer); var lookUp = new SlobrokClient(context, timer); var stateGenerator = new StateChangeHandler(context, timer, log); var stateBroadcaster = new SystemStateBroadcaster(context, timer, timer); var masterElectionHandler = new MasterElectionHandler(context, options.fleetControllerIndex, options.fleetControllerCount, timer, timer); var controller = new FleetController(context, timer, log, cluster, stateGatherer, communicator, statusPageServer, null, lookUp, database, stateGenerator, stateBroadcaster, masterElectionHandler, metricUpdater, options); controller.start(); return controller; } public void start() { runner = new Thread(this); runner.start(); } public Object getMonitor() { return monitor; } public boolean isRunning() { return running.get(); } public boolean isMaster() { synchronized (monitor) { return isMaster; } } public ClusterState getClusterState() { synchronized (monitor) { return systemStateBroadcaster.getClusterState(); } } public ClusterStateBundle getClusterStateBundle() { synchronized (monitor) { return systemStateBroadcaster.getClusterStateBundle(); } } public void schedule(RemoteClusterControllerTask task) { synchronized (monitor) { context.log(logger, Level.FINE, "Scheduled remote task " + task.getClass().getName() + " for execution"); remoteTasks.add(task); } } /** Used for unit testing. */ public void addSystemStateListener(SystemStateListener listener) { systemStateListeners.add(listener); com.yahoo.vdslib.state.ClusterState state = getSystemState(); if (state == null) { throw new NullPointerException("Cluster state should never be null at this point"); } listener.handleNewPublishedState(ClusterStateBundle.ofBaselineOnly(AnnotatedClusterState.withoutAnnotations(state))); ClusterStateBundle convergedState = systemStateBroadcaster.getLastClusterStateBundleConverged(); if (convergedState != null) { listener.handleStateConvergedInCluster(convergedState); } } public FleetControllerOptions getOptions() { synchronized(monitor) { return options.clone(); } } public NodeState getReportedNodeState(Node n) { synchronized(monitor) { NodeInfo node = cluster.getNodeInfo(n); if (node == null) { throw new IllegalStateException("Did not find node " + n + " in cluster " + cluster); } return node.getReportedState(); } } public NodeState getWantedNodeState(Node n) { synchronized(monitor) { return cluster.getNodeInfo(n).getWantedState(); } } public com.yahoo.vdslib.state.ClusterState getSystemState() { synchronized(monitor) { return stateVersionTracker.getVersionedClusterState(); } } public int getRpcPort() { return rpcServer.getPort(); } public void shutdown() throws InterruptedException, java.io.IOException { if (runner != null && isRunning()) { context.log(logger, Level.INFO, "Joining event thread."); running.set(false); synchronized(monitor) { monitor.notifyAll(); } runner.join(); } context.log(logger, Level.INFO, "FleetController done shutting down event thread."); controllerThreadId = Thread.currentThread().getId(); database.shutdown(databaseContext); if (statusPageServer != null) { statusPageServer.shutdown(); } if (rpcServer != null) { rpcServer.shutdown(); } communicator.shutdown(); nodeLookup.shutdown(); } public void updateOptions(FleetControllerOptions options) { var newId = FleetControllerId.fromOptions(options); synchronized(monitor) { assert newId.equals(context.id()); context.log(logger, Level.INFO, "FleetController has new options"); nextOptions = options.clone(); monitor.notifyAll(); } } private void verifyInControllerThread() { if (controllerThreadId != null && controllerThreadId != Thread.currentThread().getId()) { throw new IllegalStateException("Function called from non-controller thread. Shouldn't happen."); } } private ClusterState latestCandidateClusterState() { return stateVersionTracker.getLatestCandidateState().getClusterState(); } @Override public void handleNewNodeState(NodeInfo node, NodeState newState) { verifyInControllerThread(); stateChangeHandler.handleNewReportedNodeState(latestCandidateClusterState(), node, newState, this); } @Override public void handleNewWantedNodeState(NodeInfo node, NodeState newState) { verifyInControllerThread(); wantedStateChanged = true; stateChangeHandler.proposeNewNodeState(stateVersionTracker.getVersionedClusterState(), node, newState); } @Override public void handleRemovedNode(Node node) { verifyInControllerThread(); wantedStateChanged = true; } @Override public void handleUpdatedHostInfo(NodeInfo nodeInfo, HostInfo newHostInfo) { verifyInControllerThread(); triggerBundleRecomputationIfResourceExhaustionStateChanged(nodeInfo, newHostInfo); stateVersionTracker.handleUpdatedHostInfo(nodeInfo, newHostInfo); } private void triggerBundleRecomputationIfResourceExhaustionStateChanged(NodeInfo nodeInfo, HostInfo newHostInfo) { if (!options.clusterFeedBlockEnabled) { return; } var calc = createResourceExhaustionCalculator(); var previouslyExhausted = calc.enumerateNodeResourceExhaustions(nodeInfo); var nowExhausted = calc.resourceExhaustionsFromHostInfo(nodeInfo, newHostInfo); if (!previouslyExhausted.equals(nowExhausted)) { context.log(logger, Level.FINE, () -> String.format("Triggering state recomputation due to change in cluster feed block: %s -> %s", previouslyExhausted, nowExhausted)); stateChangeHandler.setStateChangedFlag(); } } @Override public void handleNewNode(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleNewNode(node); } @Override public void handleMissingNode(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleMissingNode(stateVersionTracker.getVersionedClusterState(), node, this); } @Override public void handleNewRpcAddress(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleNewRpcAddress(node); } @Override public void handleReturnedRpcAddress(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleReturnedRpcAddress(node); } @Override public void handleNewPublishedState(ClusterStateBundle stateBundle) { verifyInControllerThread(); ClusterState baselineState = stateBundle.getBaselineClusterState(); newStates.add(stateBundle); metricUpdater.updateClusterStateMetrics(cluster, baselineState, ResourceUsageStats.calculateFrom(cluster.getNodeInfos(), options.clusterFeedBlockLimit, stateBundle.getFeedBlock())); lastMetricUpdateCycleCount = cycleCount; systemStateBroadcaster.handleNewClusterStates(stateBundle); if (isMaster) { storeClusterStateMetaDataToZooKeeper(stateBundle); } } private boolean maybePublishOldMetrics() { verifyInControllerThread(); if (isMaster() && cycleCount > 300 + lastMetricUpdateCycleCount) { ClusterStateBundle stateBundle = stateVersionTracker.getVersionedClusterStateBundle(); ClusterState baselineState = stateBundle.getBaselineClusterState(); metricUpdater.updateClusterStateMetrics(cluster, baselineState, ResourceUsageStats.calculateFrom(cluster.getNodeInfos(), options.clusterFeedBlockLimit, stateBundle.getFeedBlock())); lastMetricUpdateCycleCount = cycleCount; return true; } else { return false; } } private void storeClusterStateMetaDataToZooKeeper(ClusterStateBundle stateBundle) { try { database.saveLatestSystemStateVersion(databaseContext, stateBundle.getVersion()); database.saveLatestClusterStateBundle(databaseContext, stateBundle); } catch (InterruptedException e) { throw new RuntimeException("ZooKeeper write interrupted", e); } } /** * This function gives data of the current state in master election. * The keys in the given map are indices of fleet controllers. * The values are what fleetcontroller that fleetcontroller wants to * become master. * * If more than half the fleetcontrollers want a node to be master and * that node also wants itself as master, that node is the single master. * If this condition is not met, there is currently no master. */ public void handleFleetData(Map<Integer, Integer> data) { verifyInControllerThread(); context.log(logger, Level.FINEST, "Sending fleet data event on to master election handler"); metricUpdater.updateMasterElectionMetrics(data); masterElectionHandler.handleFleetData(data); } /** * Called when we can no longer contact database. */ public void lostDatabaseConnection() { verifyInControllerThread(); boolean wasMaster = isMaster; masterElectionHandler.lostDatabaseConnection(); if (wasMaster) { dropLeadershipState(); metricUpdater.updateMasterState(false); } } private void failAllVersionDependentTasks() { tasksPendingStateRecompute.forEach(task -> { task.handleFailure(RemoteClusterControllerTask.Failure.of( RemoteClusterControllerTask.FailureCondition.LEADERSHIP_LOST)); task.notifyCompleted(); }); tasksPendingStateRecompute.clear(); taskCompletionQueue.forEach(task -> { task.getTask().handleFailure(RemoteClusterControllerTask.Failure.of( RemoteClusterControllerTask.FailureCondition.LEADERSHIP_LOST)); task.getTask().notifyCompleted(); }); taskCompletionQueue.clear(); } /** Called when all distributors have acked newest cluster state version. */ public void handleAllDistributorsInSync(DatabaseHandler database, DatabaseHandler.DatabaseContext dbContext) throws InterruptedException { Set<ConfiguredNode> nodes = new HashSet<>(cluster.clusterInfo().getConfiguredNodes().values()); var currentBundle = stateVersionTracker.getVersionedClusterStateBundle(); context.log(logger, Level.FINE, () -> String.format("All distributors have ACKed cluster state version %d", currentBundle.getVersion())); stateChangeHandler.handleAllDistributorsInSync(currentBundle.getBaselineClusterState(), nodes, database, dbContext); convergedStates.add(currentBundle); } private boolean changesConfiguredNodeSet(Collection<ConfiguredNode> newNodes) { if (newNodes.size() != cluster.getConfiguredNodes().size()) return true; if (! cluster.getConfiguredNodes().values().containsAll(newNodes)) return true; for (ConfiguredNode node : newNodes) { if (node.retired() != cluster.getConfiguredNodes().get(node.index()).retired()) { return true; } } return false; } /** This is called when the options field has been set to a new set of options */ private void propagateOptions() { verifyInControllerThread(); selfTerminateIfConfiguredNodeIndexHasChanged(); if (changesConfiguredNodeSet(options.nodes)) { cluster.setSlobrokGenerationCount(0); } configuredBucketSpaces = Set.of(FixedBucketSpaces.defaultSpace(), FixedBucketSpaces.globalSpace()); stateVersionTracker.setMinMergeCompletionRatio(options.minMergeCompletionRatio); communicator.propagateOptions(options); if (nodeLookup instanceof SlobrokClient) { ((SlobrokClient) nodeLookup).setSlobrokConnectionSpecs(options.slobrokConnectionSpecs); } eventLog.setMaxSize(options.eventLogMaxSize, options.eventNodeLogMaxSize); cluster.setPollingFrequency(options.statePollingFrequency); cluster.setDistribution(options.storageDistribution); cluster.setNodes(options.nodes, databaseContext.getNodeStateUpdateListener()); database.setZooKeeperAddress(options.zooKeeperServerAddress, databaseContext); database.setZooKeeperSessionTimeout(options.zooKeeperSessionTimeout, databaseContext); stateGatherer.setMaxSlobrokDisconnectGracePeriod(options.maxSlobrokDisconnectGracePeriod); stateGatherer.setNodeStateRequestTimeout(options.nodeStateRequestTimeoutMS); stateChangeHandler.reconfigureFromOptions(options); stateChangeHandler.setStateChangedFlag(); masterElectionHandler.setFleetControllerCount(options.fleetControllerCount); masterElectionHandler.setMasterZooKeeperCooldownPeriod(options.masterZooKeeperCooldownPeriod); masterElectionHandler.setUsingZooKeeper(options.zooKeeperServerAddress != null && !options.zooKeeperServerAddress.isEmpty()); if (rpcServer != null) { rpcServer.setMasterElectionHandler(masterElectionHandler); try{ rpcServer.setSlobrokConnectionSpecs(options.slobrokConnectionSpecs, options.rpcPort); } catch (ListenFailedException e) { context.log(logger, Level.WARNING, "Failed to bind RPC server to port " + options.rpcPort + ". This may be natural if cluster has altered the services running on this node: " + e.getMessage()); } catch (Exception e) { context.log(logger, Level.WARNING, "Failed to initialize RPC server socket: " + e.getMessage()); } } if (statusPageServer != null) { try{ statusPageServer.setPort(options.httpPort); } catch (Exception e) { context.log(logger, Level.WARNING, "Failed to initialize status server socket. This may be natural if cluster has altered the services running on this node: " + e.getMessage()); } } long currentTime = timer.getCurrentTimeInMillis(); nextStateSendTime = Math.min(currentTime + options.minTimeBetweenNewSystemStates, nextStateSendTime); } private void selfTerminateIfConfiguredNodeIndexHasChanged() { var newId = new FleetControllerId(options.clusterName, options.fleetControllerIndex); if (!newId.equals(context.id())) { context.log(logger, Level.WARNING, context.id() + " got new configuration for " + newId + ". We do not support doing this live; " + "immediately exiting now to force new configuration"); prepareShutdownEdge(); System.exit(1); } } public StatusPageResponse fetchStatusPage(StatusPageServer.HttpRequest httpRequest) { verifyInControllerThread(); StatusPageResponse.ResponseCode responseCode; String message; final String hiddenMessage; try { StatusPageServer.RequestHandler handler = statusRequestRouter.resolveHandler(httpRequest); if (handler == null) { throw new FileNotFoundException("No handler found for request: " + httpRequest.getPath()); } return handler.handle(httpRequest); } catch (FileNotFoundException e) { responseCode = StatusPageResponse.ResponseCode.NOT_FOUND; message = e.getMessage(); hiddenMessage = ""; } catch (Exception e) { responseCode = StatusPageResponse.ResponseCode.INTERNAL_SERVER_ERROR; message = "Internal Server Error"; hiddenMessage = ExceptionUtils.getStackTraceAsString(e); context.log(logger, Level.FINE, () -> "Unknown exception thrown for request " + httpRequest.getRequest() + ": " + hiddenMessage); } TimeZone tz = TimeZone.getTimeZone("UTC"); long currentTime = timer.getCurrentTimeInMillis(); StatusPageResponse response = new StatusPageResponse(); StringBuilder content = new StringBuilder(); response.setContentType("text/html"); response.setResponseCode(responseCode); content.append("<!-- Answer to request " + httpRequest.getRequest() + " -->\n"); content.append("<p>UTC time when creating this page: ").append(RealTimer.printDateNoMilliSeconds(currentTime, tz)).append("</p>"); response.writeHtmlHeader(content, message); response.writeHtmlFooter(content, hiddenMessage); response.writeContent(content.toString()); return response; } public void tick() throws Exception { synchronized (monitor) { boolean didWork; didWork = metricUpdater.forWork("doNextZooKeeperTask", () -> database.doNextZooKeeperTask(databaseContext)); didWork |= metricUpdater.forWork("updateMasterElectionState", this::updateMasterElectionState); didWork |= metricUpdater.forWork("handleLeadershipEdgeTransitions", this::handleLeadershipEdgeTransitions); stateChangeHandler.setMaster(isMaster); if ( ! isRunning()) { return; } didWork |= metricUpdater.forWork("stateGatherer-processResponses", () -> stateGatherer.processResponses(this)); if ( ! isRunning()) { return; } if (masterElectionHandler.isAmongNthFirst(options.stateGatherCount)) { didWork |= resyncLocallyCachedState(); } else { stepDownAsStateGatherer(); } if ( ! isRunning()) { return; } didWork |= metricUpdater.forWork("systemStateBroadcaster-processResponses", systemStateBroadcaster::processResponses); if ( ! isRunning()) { return; } if (isMaster) { didWork |= metricUpdater.forWork("broadcastClusterStateToEligibleNodes", this::broadcastClusterStateToEligibleNodes); systemStateBroadcaster.checkIfClusterStateIsAckedByAllDistributors(database, databaseContext, this); } if ( ! isRunning()) { return; } didWork |= metricUpdater.forWork("processAnyPendingStatusPageRequest", this::processAnyPendingStatusPageRequest); if ( ! isRunning()) { return; } if (rpcServer != null) { didWork |= metricUpdater.forWork("handleRpcRequests", () -> rpcServer.handleRpcRequests(cluster, consolidatedClusterState(), this)); } if ( ! isRunning()) { return; } didWork |= metricUpdater.forWork("processNextQueuedRemoteTask", this::processNextQueuedRemoteTask); didWork |= metricUpdater.forWork("completeSatisfiedVersionDependentTasks", this::completeSatisfiedVersionDependentTasks); didWork |= metricUpdater.forWork("maybePublishOldMetrics", this::maybePublishOldMetrics); processingCycle = false; ++cycleCount; long tickStopTime = timer.getCurrentTimeInMillis(); if (tickStopTime >= tickStartTime) { metricUpdater.addTickTime(tickStopTime - tickStartTime, didWork); } monitor.wait(didWork || waitingForCycle ? 1 : options.cycleWaitTime); if ( ! isRunning()) { return; } tickStartTime = timer.getCurrentTimeInMillis(); processingCycle = true; if (nextOptions != null) { switchToNewConfig(); } } if (isRunning()) { propagateNewStatesToListeners(); } } private boolean updateMasterElectionState() { try { return masterElectionHandler.watchMasterElection(database, databaseContext); } catch (InterruptedException e) { throw new RuntimeException(e); } catch (Exception e) { context.log(logger, Level.WARNING, "Failed to watch master election: " + e); } return false; } private void stepDownAsStateGatherer() { if (isStateGatherer) { cluster.clearStates(); eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer a node state gatherer.", timer.getCurrentTimeInMillis())); } isStateGatherer = false; } private void switchToNewConfig() { options = nextOptions; nextOptions = null; try { propagateOptions(); } catch (Exception e) { context.log(logger, Level.SEVERE, "Failed to handle new fleet controller config", e); } } private boolean processAnyPendingStatusPageRequest() { if (statusPageServer != null) { StatusPageServer.HttpRequest statusRequest = statusPageServer.getCurrentHttpRequest(); if (statusRequest != null) { statusPageServer.answerCurrentStatusRequest(fetchStatusPage(statusRequest)); return true; } } return false; } private boolean broadcastClusterStateToEligibleNodes() { if (database.hasPendingClusterStateMetaDataStore()) { context.log(logger, Level.FINE, "Can't publish current cluster state as it has one or more pending ZooKeeper stores"); return false; } boolean sentAny = false; long currentTime = timer.getCurrentTimeInMillis(); if ((currentTime >= firstAllowedStateBroadcast || cluster.allStatesReported()) && currentTime >= nextStateSendTime) { if (inMasterMoratorium) { context.log(logger, Level.INFO, currentTime < firstAllowedStateBroadcast ? "Master moratorium complete: all nodes have reported in" : "Master moratorium complete: timed out waiting for all nodes to report in"); firstAllowedStateBroadcast = currentTime; inMasterMoratorium = false; } sentAny = systemStateBroadcaster.broadcastNewStateBundleIfRequired( databaseContext, communicator, database.getLastKnownStateBundleVersionWrittenBySelf()); if (sentAny) { nextStateSendTime = currentTime + options.minTimeBetweenNewSystemStates; } } sentAny |= systemStateBroadcaster.broadcastStateActivationsIfRequired(databaseContext, communicator); return sentAny; } private void propagateNewStatesToListeners() { if ( ! newStates.isEmpty()) { synchronized (systemStateListeners) { for (ClusterStateBundle stateBundle : newStates) { for (SystemStateListener listener : systemStateListeners) { listener.handleNewPublishedState(stateBundle); } } newStates.clear(); } } if ( ! convergedStates.isEmpty()) { synchronized (systemStateListeners) { for (ClusterStateBundle stateBundle : convergedStates) { for (SystemStateListener listener : systemStateListeners) { listener.handleStateConvergedInCluster(stateBundle); } } convergedStates.clear(); } } } private boolean processNextQueuedRemoteTask() { metricUpdater.updateRemoteTaskQueueSize(remoteTasks.size()); RemoteClusterControllerTask task = remoteTasks.poll(); if (task == null) { return false; } final RemoteClusterControllerTask.Context taskContext = createRemoteTaskProcessingContext(); context.log(logger, Level.FINEST, () -> String.format("Processing remote task of type '%s'", task.getClass().getName())); task.doRemoteFleetControllerTask(taskContext); if (taskMayBeCompletedImmediately(task)) { context.log(logger, Level.FINEST, () -> String.format("Done processing remote task of type '%s'", task.getClass().getName())); task.notifyCompleted(); } else { context.log(logger, Level.FINEST, () -> String.format("Remote task of type '%s' queued until state recomputation", task.getClass().getName())); tasksPendingStateRecompute.add(task); } return true; } private boolean taskMayBeCompletedImmediately(RemoteClusterControllerTask task) { return (!task.hasVersionAckDependency() || task.isFailed() || !isMaster); } private RemoteClusterControllerTask.Context createRemoteTaskProcessingContext() { final RemoteClusterControllerTask.Context context = new RemoteClusterControllerTask.Context(); context.cluster = cluster; context.currentConsolidatedState = consolidatedClusterState(); context.publishedClusterStateBundle = stateVersionTracker.getVersionedClusterStateBundle(); context.masterInfo = new MasterInterface() { @Override public boolean isMaster() { return isMaster; } @Override public Integer getMaster() { return masterElectionHandler.getMaster(); } @Override public boolean inMasterMoratorium() { return inMasterMoratorium; } }; context.nodeListener = this; context.slobrokListener = this; return context; } private static long effectiveActivatedStateVersion(NodeInfo nodeInfo, ClusterStateBundle bundle) { return bundle.deferredActivation() ? nodeInfo.getClusterStateVersionActivationAcked() : nodeInfo.getClusterStateVersionBundleAcknowledged(); } private List<Node> enumerateNodesNotYetAckedAtLeastVersion(long version) { var bundle = systemStateBroadcaster.getClusterStateBundle(); if (bundle == null) { return List.of(); } return cluster.getNodeInfos().stream(). filter(n -> effectiveActivatedStateVersion(n, bundle) < version). map(NodeInfo::getNode). collect(Collectors.toList()); } private static <E> String stringifyListWithLimits(List<E> list, int limit) { if (list.size() > limit) { var sub = list.subList(0, limit); return String.format("%s (... and %d more)", sub.stream().map(E::toString).collect(Collectors.joining(", ")), list.size() - limit); } else { return list.stream().map(E::toString).collect(Collectors.joining(", ")); } } private String buildNodesNotYetConvergedMessage(long taskConvergeVersion) { var nodes = enumerateNodesNotYetAckedAtLeastVersion(taskConvergeVersion); if (nodes.isEmpty()) { return ""; } return String.format("the following nodes have not converged to at least version %d: %s", taskConvergeVersion, stringifyListWithLimits(nodes, options.maxDivergentNodesPrintedInTaskErrorMessages)); } private boolean completeSatisfiedVersionDependentTasks() { int publishedVersion = systemStateBroadcaster.lastClusterStateVersionInSync(); long queueSizeBefore = taskCompletionQueue.size(); final long now = timer.getCurrentTimeInMillis(); while (!taskCompletionQueue.isEmpty()) { VersionDependentTaskCompletion taskCompletion = taskCompletionQueue.peek(); if (publishedVersion >= taskCompletion.getMinimumVersion()) { context.log(logger, Level.FINE, () -> String.format("Deferred task of type '%s' has minimum version %d, published is %d; completing", taskCompletion.getTask().getClass().getName(), taskCompletion.getMinimumVersion(), publishedVersion)); taskCompletion.getTask().notifyCompleted(); taskCompletionQueue.remove(); } else if (taskCompletion.getDeadlineTimePointMs() <= now) { var details = buildNodesNotYetConvergedMessage(taskCompletion.getMinimumVersion()); context.log(logger, Level.WARNING, () -> String.format("Deferred task of type '%s' has exceeded wait deadline; completing with failure (details: %s)", taskCompletion.getTask().getClass().getName(), details)); taskCompletion.getTask().handleFailure(RemoteClusterControllerTask.Failure.of( RemoteClusterControllerTask.FailureCondition.DEADLINE_EXCEEDED, details)); taskCompletion.getTask().notifyCompleted(); taskCompletionQueue.remove(); } else { break; } } return (taskCompletionQueue.size() != queueSizeBefore); } /** * A "consolidated" cluster state is guaranteed to have up-to-date information on which nodes are * up or down even when the whole cluster is down. The regular, published cluster state is not * normally updated to reflect node events when the cluster is down. */ ClusterState consolidatedClusterState() { final ClusterState publishedState = stateVersionTracker.getVersionedClusterState(); if (publishedState.getClusterState() == State.UP) { return publishedState; } final ClusterState current = stateVersionTracker.getLatestCandidateState().getClusterState().clone(); current.setVersion(publishedState.getVersion()); return current; } /* System test observations: - a node that stops normally (U -> S) then goes down erroneously triggers premature crash handling - long time before content node state convergence (though this seems to be the case for legacy impl as well) */ private boolean resyncLocallyCachedState() { boolean didWork = false; if ( ! isMaster && cycleCount % 100 == 0) { didWork = metricUpdater.forWork("loadWantedStates", () -> database.loadWantedStates(databaseContext)); didWork |= metricUpdater.forWork("loadStartTimestamps", () -> database.loadStartTimestamps(cluster)); } didWork |= metricUpdater.forWork("updateCluster", () -> nodeLookup.updateCluster(cluster, this)); didWork |= metricUpdater.forWork("sendMessages", () -> stateGatherer.sendMessages(cluster, communicator, this)); didWork |= metricUpdater.forWork( "watchTimers", () -> stateChangeHandler.watchTimers(cluster, stateVersionTracker.getLatestCandidateState().getClusterState(), this)); didWork |= metricUpdater.forWork("recomputeClusterStateIfRequired", this::recomputeClusterStateIfRequired); if ( ! isStateGatherer) { if ( ! isMaster) { eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became node state gatherer as we are fleetcontroller master candidate.", timer.getCurrentTimeInMillis())); stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion()); stateChangeHandler.setStateChangedFlag(); } } isStateGatherer = true; return didWork; } private void invokeCandidateStateListeners(ClusterStateBundle candidateBundle) { systemStateListeners.forEach(listener -> listener.handleNewCandidateState(candidateBundle)); } private boolean hasPassedFirstStateBroadcastTimePoint(long timeNowMs) { return timeNowMs >= firstAllowedStateBroadcast || cluster.allStatesReported(); } private boolean recomputeClusterStateIfRequired() { boolean stateWasChanged = false; if (mustRecomputeCandidateClusterState()) { stateChangeHandler.unsetStateChangedFlag(); final AnnotatedClusterState candidate = computeCurrentAnnotatedState(); final ClusterStateBundle candidateBundle = ClusterStateBundle.builder(candidate) .bucketSpaces(configuredBucketSpaces) .stateDeriver(createBucketSpaceStateDeriver()) .deferredActivation(options.enableTwoPhaseClusterStateActivation) .feedBlock(createResourceExhaustionCalculator() .inferContentClusterFeedBlockOrNull(cluster.getNodeInfos())) .deriveAndBuild(); stateVersionTracker.updateLatestCandidateStateBundle(candidateBundle); invokeCandidateStateListeners(candidateBundle); final long timeNowMs = timer.getCurrentTimeInMillis(); if (hasPassedFirstStateBroadcastTimePoint(timeNowMs) && (stateVersionTracker.candidateChangedEnoughFromCurrentToWarrantPublish() || stateVersionTracker.hasReceivedNewVersionFromZooKeeper())) { final ClusterStateBundle before = stateVersionTracker.getVersionedClusterStateBundle(); stateVersionTracker.promoteCandidateToVersionedState(timeNowMs); emitEventsForAlteredStateEdges(before, stateVersionTracker.getVersionedClusterStateBundle(), timeNowMs); handleNewPublishedState(stateVersionTracker.getVersionedClusterStateBundle()); stateWasChanged = true; } } /* * This works transparently for tasks that end up changing the current cluster state (i.e. * requiring a new state to be published) and for those whose changes are no-ops (because * the changes they request are already part of the current state). In the former case the * tasks will depend on the version that was generated based upon them. In the latter case * the tasks will depend on the version that is already published (or in the process of * being published). */ scheduleVersionDependentTasksForFutureCompletion(stateVersionTracker.getCurrentVersion()); return stateWasChanged; } private ClusterStateDeriver createBucketSpaceStateDeriver() { if (options.clusterHasGlobalDocumentTypes) { return new MaintenanceWhenPendingGlobalMerges(stateVersionTracker.createMergePendingChecker(), createDefaultSpaceMaintenanceTransitionConstraint()); } else { return createIdentityClonedBucketSpaceStateDeriver(); } } private ResourceExhaustionCalculator createResourceExhaustionCalculator() { return new ResourceExhaustionCalculator( options.clusterFeedBlockEnabled, options.clusterFeedBlockLimit, stateVersionTracker.getLatestCandidateStateBundle().getFeedBlockOrNull(), options.clusterFeedBlockNoiseLevel); } private static ClusterStateDeriver createIdentityClonedBucketSpaceStateDeriver() { return (state, space) -> state.clone(); } private MaintenanceTransitionConstraint createDefaultSpaceMaintenanceTransitionConstraint() { AnnotatedClusterState currentDefaultSpaceState = stateVersionTracker.getVersionedClusterStateBundle() .getDerivedBucketSpaceStates().getOrDefault(FixedBucketSpaces.defaultSpace(), AnnotatedClusterState.emptyState()); return UpEdgeMaintenanceTransitionConstraint.forPreviouslyPublishedState(currentDefaultSpaceState.getClusterState()); } /** * Move tasks that are dependent on the most recently generated state being published into * a completion queue with a dependency on the provided version argument. Once that version * has been ACKed by all distributors in the system, those tasks will be marked as completed. */ private void scheduleVersionDependentTasksForFutureCompletion(int completeAtVersion) { final long maxDeadlineTimePointMs = timer.getCurrentTimeInMillis() + options.getMaxDeferredTaskVersionWaitTime().toMillis(); for (RemoteClusterControllerTask task : tasksPendingStateRecompute) { context.log(logger, Level.INFO, task + " will be completed at version " + completeAtVersion); taskCompletionQueue.add(new VersionDependentTaskCompletion(completeAtVersion, task, maxDeadlineTimePointMs)); } tasksPendingStateRecompute.clear(); } private AnnotatedClusterState computeCurrentAnnotatedState() { ClusterStateGenerator.Params params = ClusterStateGenerator.Params.fromOptions(options); params.currentTimeInMilllis(timer.getCurrentTimeInMillis()) .cluster(cluster) .lowestObservedDistributionBitCount(stateVersionTracker.getLowestObservedDistributionBits()); return ClusterStateGenerator.generatedStateFrom(params); } private void emitEventsForAlteredStateEdges(final ClusterStateBundle fromState, final ClusterStateBundle toState, final long timeNowMs) { final List<Event> deltaEvents = EventDiffCalculator.computeEventDiff( EventDiffCalculator.params() .cluster(cluster) .fromState(fromState) .toState(toState) .currentTimeMs(timeNowMs) .maxMaintenanceGracePeriodTimeMs(options.storageNodeMaxTransitionTimeMs())); for (Event event : deltaEvents) { eventLog.add(event, isMaster); } emitStateAppliedEvents(timeNowMs, fromState.getBaselineClusterState(), toState.getBaselineClusterState()); } private void emitStateAppliedEvents(long timeNowMs, ClusterState fromClusterState, ClusterState toClusterState) { eventLog.add(new ClusterEvent( ClusterEvent.Type.SYSTEMSTATE, "New cluster state version " + toClusterState.getVersion() + ". Change from last: " + fromClusterState.getTextualDifference(toClusterState), timeNowMs), isMaster); if (toClusterState.getDistributionBitCount() != fromClusterState.getDistributionBitCount()) { eventLog.add(new ClusterEvent( ClusterEvent.Type.SYSTEMSTATE, "Altering distribution bits in system from " + fromClusterState.getDistributionBitCount() + " to " + toClusterState.getDistributionBitCount(), timeNowMs), isMaster); } } private boolean atFirstClusterStateSendTimeEdge() { if (!isMaster || systemStateBroadcaster.hasBroadcastedClusterStateBundle()) { return false; } return hasPassedFirstStateBroadcastTimePoint(timer.getCurrentTimeInMillis()); } private boolean mustRecomputeCandidateClusterState() { return stateChangeHandler.stateMayHaveChanged() || stateVersionTracker.bucketSpaceMergeCompletionStateHasChanged() || atFirstClusterStateSendTimeEdge(); } private boolean handleLeadershipEdgeTransitions() { boolean didWork = false; if (masterElectionHandler.isMaster()) { if ( ! isMaster) { stateChangeHandler.setStateChangedFlag(); systemStateBroadcaster.resetBroadcastedClusterStateBundle(); stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion()); ClusterStateBundle previousBundle = database.getLatestClusterStateBundle(); database.loadStartTimestamps(cluster); database.loadWantedStates(databaseContext); context.log(logger, Level.INFO, () -> String.format("Loaded previous cluster state bundle from ZooKeeper: %s", previousBundle)); stateVersionTracker.setClusterStateBundleRetrievedFromZooKeeper(previousBundle); eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became fleetcontroller master. Bumped version to " + stateVersionTracker.getCurrentVersion() + " to be in line.", timer.getCurrentTimeInMillis())); long currentTime = timer.getCurrentTimeInMillis(); firstAllowedStateBroadcast = currentTime + options.minTimeBeforeFirstSystemStateBroadcast; isMaster = true; inMasterMoratorium = true; context.log(logger, Level.FINE, () -> "At time " + currentTime + " we set first system state broadcast time to be " + options.minTimeBeforeFirstSystemStateBroadcast + " ms after at time " + firstAllowedStateBroadcast + "."); didWork = true; } if (wantedStateChanged) { didWork |= database.saveWantedStates(databaseContext); wantedStateChanged = false; } } else { dropLeadershipState(); } metricUpdater.updateMasterState(isMaster); return didWork; } private void dropLeadershipState() { if (isMaster) { eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer fleetcontroller master.", timer.getCurrentTimeInMillis())); firstAllowedStateBroadcast = Long.MAX_VALUE; failAllVersionDependentTasks(); } wantedStateChanged = false; isMaster = false; inMasterMoratorium = false; } @Override public void run() { controllerThreadId = Thread.currentThread().getId(); context.log(logger, Level.INFO, "Starting tick loop"); try { processingCycle = true; while (isRunning()) { tick(); } context.log(logger, Level.INFO, "Tick loop stopped"); } catch (InterruptedException e) { context.log(logger, Level.INFO, "Event thread stopped by interrupt exception: ", e); } catch (Throwable t) { t.printStackTrace(); context.log(logger, Level.SEVERE, "Fatal error killed fleet controller", t); synchronized (monitor) { running.set(false); } System.exit(1); } finally { prepareShutdownEdge(); } } private void prepareShutdownEdge() { running.set(false); failAllVersionDependentTasks(); synchronized (monitor) { monitor.notifyAll(); } } public DatabaseHandler.DatabaseContext databaseContext = new DatabaseHandler.DatabaseContext() { @Override public ContentCluster getCluster() { return cluster; } @Override public FleetController getFleetController() { return FleetController.this; } @Override public SlobrokListener getNodeAddedOrRemovedListener() { return FleetController.this; } @Override public NodeListener getNodeStateUpdateListener() { return FleetController.this; } }; public void waitForCompleteCycle(Duration timeout) { Instant endTime = Instant.now().plus(timeout); synchronized (monitor) { long wantedCycle = cycleCount + (processingCycle ? 2 : 1); waitingForCycle = true; try{ while (cycleCount < wantedCycle) { if (Instant.now().isAfter(endTime)) throw new IllegalStateException("Timed out waiting for cycle to complete. Not completed after " + timeout); if ( !isRunning() ) throw new IllegalStateException("Fleetcontroller not running. Will never complete cycles"); try{ monitor.wait(100); } catch (InterruptedException e) {} } } finally { waitingForCycle = false; } } } /** * This function might not be 100% threadsafe, as in theory cluster can be changing while accessed. * But it is only used in unit tests that should not trigger any thread issues. Don't want to add locks that reduce * live performance to remove a non-problem. */ public void waitForNodesHavingSystemStateVersionEqualToOrAbove(int version, int nodeCount, Duration timeout) throws InterruptedException { Instant endTime = Instant.now().plus(timeout); synchronized (monitor) { while (true) { int ackedNodes = 0; for (NodeInfo node : cluster.getNodeInfos()) { if (node.getClusterStateVersionBundleAcknowledged() >= version) { ++ackedNodes; } } if (ackedNodes >= nodeCount) { context.log(logger, Level.INFO, ackedNodes + " nodes now have acked system state " + version + " or higher."); return; } if (Instant.now().isAfter(endTime)) { throw new IllegalStateException("Did not get " + nodeCount + " nodes to system state " + version + " within timeout of " + timeout); } monitor.wait(10); } } } public boolean hasZookeeperConnection() { return !database.isClosed(); } public int getSlobrokMirrorUpdates() { return ((SlobrokClient)nodeLookup).getMirror().updates(); } public ContentCluster getCluster() { return cluster; } public List<NodeEvent> getNodeEvents(Node n) { return eventLog.getNodeEvents(n); } public EventLog getEventLog() { return eventLog; } }
getSeconds will round down. 3.5 => 3, 0.9 => 0. That might be fine here in the test, but generally it is not. Can Duration be converted to a double in seconds ?
private void setWantedState(Node n, NodeState ns, Map<Node, NodeState> wantedStates) { int rpcPort = fleetController.getRpcPort(); if (supervisor == null) { supervisor = new Supervisor(new Transport()); } Target connection = supervisor.connect(new Spec("localhost", rpcPort)); assertTrue(connection.isValid()); Request req = new Request("setNodeState"); req.parameters().add(new StringValue("storage/cluster.mycluster/" + n.getType().toString() + "/" + n.getIndex())); req.parameters().add(new StringValue(ns.serialize(true))); connection.invokeSync(req, timeout.getSeconds()); assertEquals(ErrorCode.NONE, req.errorCode(), req.toString()); assertTrue(req.checkReturnTypes("s"), req.toString()); wantedStates.put(n, ns); }
connection.invokeSync(req, timeout.getSeconds());
private void setWantedState(Node n, NodeState ns, Map<Node, NodeState> wantedStates) { int rpcPort = fleetController.getRpcPort(); if (supervisor == null) { supervisor = new Supervisor(new Transport()); } Target connection = supervisor.connect(new Spec("localhost", rpcPort)); assertTrue(connection.isValid()); Request req = new Request("setNodeState"); req.parameters().add(new StringValue("storage/cluster.mycluster/" + n.getType().toString() + "/" + n.getIndex())); req.parameters().add(new StringValue(ns.serialize(true))); connection.invokeSync(req, timeoutInSeconds()); assertEquals(ErrorCode.NONE, req.errorCode(), req.toString()); assertTrue(req.checkReturnTypes("s"), req.toString()); wantedStates.put(n, ns); }
class DatabaseTest extends FleetControllerTest { private static final Logger log = Logger.getLogger(DatabaseTest.class.getName()); @Test void testWantedStatesInZooKeeper() throws Exception { startingTest("DatabaseTest::testWantedStatesInZooKeeper"); FleetControllerOptions options = defaultOptions("mycluster"); options.zooKeeperServerAddress = "127.0.0.1"; setUpFleetController(true, options); setUpVdsNodes(true, new DummyVdsNodeOptions()); log.info("WAITING FOR STABLE SYSTEM"); waitForStableSystem(); log.info("VALIDATE STARTING WANTED STATES"); Map<Node, NodeState> wantedStates = new TreeMap<>(); for (DummyVdsNode node : nodes) { wantedStates.put(node.getNode(), new NodeState(node.getType(), State.UP)); } assertWantedStates(wantedStates); log.info("SET A WANTED STATE AND SEE THAT IT GETS PROPAGATED"); setWantedState(new Node(NodeType.STORAGE, 3), new NodeState(NodeType.STORAGE, State.MAINTENANCE).setDescription("Yoo"), wantedStates); waitForState("version:\\d+ distributor:10 storage:10 .3.s:m"); assertWantedStates(wantedStates); log.info("SET ANOTHER WANTED STATE AND SEE THAT IT GETS PROPAGATED"); setWantedState(new Node(NodeType.DISTRIBUTOR, 2), new NodeState(NodeType.DISTRIBUTOR, State.DOWN), wantedStates); waitForState("version:\\d+ distributor:10 .2.s:d storage:10 .3.s:m"); assertWantedStates(wantedStates); log.info("SET YET ANOTHER WANTED STATE AND SEE THAT IT GETS PROPAGATED"); setWantedState(new Node(NodeType.STORAGE, 7), new NodeState(NodeType.STORAGE, State.RETIRED).setDescription("We wanna replace this node"), wantedStates); waitForState("version:\\d+ distributor:10 .2.s:d storage:10 .3.s:m .7.s:r"); assertWantedStates(wantedStates); log.info("CHECK THAT WANTED STATES PERSIST FLEETCONTROLLER RESTART"); stopFleetController(); startFleetController(false); waitForState("version:\\d+ distributor:10 .2.s:d storage:10 .3.s:m .7.s:r"); assertWantedStates(wantedStates); log.info("CLEAR WANTED STATE"); setWantedState(new Node(NodeType.STORAGE, 7), new NodeState(NodeType.STORAGE, State.UP), wantedStates); assertWantedStates(wantedStates); setWantedState(new Node(NodeType.DISTRIBUTOR, 5), new NodeState(NodeType.DISTRIBUTOR, State.DOWN), wantedStates); assertWantedStates(wantedStates); setWantedState(new Node(NodeType.DISTRIBUTOR, 2), new NodeState(NodeType.DISTRIBUTOR, State.UP), wantedStates); assertWantedStates(wantedStates); setWantedState(new Node(NodeType.STORAGE, 9), new NodeState(NodeType.STORAGE, State.DOWN), wantedStates); assertWantedStates(wantedStates); } @Test void testWantedStateOfUnknownNode() throws Exception { startingTest("DatabaseTest::testWantedStatesOfUnknownNode"); FleetControllerOptions options = defaultOptions("mycluster"); options.minRatioOfDistributorNodesUp = 0; options.minRatioOfStorageNodesUp = 0; options.zooKeeperServerAddress = "localhost"; setUpFleetController(true, options); setUpVdsNodes(true, new DummyVdsNodeOptions()); waitForStableSystem(); Map<Node, NodeState> wantedStates = new TreeMap<>(); for (DummyVdsNode node : nodes) { wantedStates.put(node.getNode(), new NodeState(node.getType(), State.UP)); } assertWantedStates(wantedStates); setWantedState(new Node(NodeType.STORAGE, 1), new NodeState(NodeType.STORAGE, State.MAINTENANCE).setDescription("Yoo"), wantedStates); waitForState("version:\\d+ distributor:10 storage:10 .1.s:m"); assertWantedStates(wantedStates); setWantedState(new Node(NodeType.DISTRIBUTOR, 8), new NodeState(NodeType.DISTRIBUTOR, State.DOWN), wantedStates); waitForState("version:\\d+ distributor:10 .8.s:d storage:10 .1.s:m"); assertWantedStates(wantedStates); setWantedState(new Node(NodeType.STORAGE, 6), new NodeState(NodeType.STORAGE, State.MAINTENANCE).setDescription("foobar"), wantedStates); waitForState("version:\\d+ distributor:10 .8.s:d storage:10 .1.s:m .6.s:m"); assertWantedStates(wantedStates); setWantedState(new Node(NodeType.STORAGE, 7), new NodeState(NodeType.STORAGE, State.RETIRED).setDescription("foobar"), wantedStates); waitForState("version:\\d+ distributor:10 .8.s:d storage:10 .1.s:m .6.s:m .7.s:r"); assertWantedStates(wantedStates); setWantedState(new Node(NodeType.STORAGE, 8), new NodeState(NodeType.STORAGE, State.DOWN).setDescription("foobar"), wantedStates); waitForState("version:\\d+ distributor:10 .8.s:d storage:10 .1.s:m .6.s:m .7.s:r .8.s:d"); assertWantedStates(wantedStates); stopFleetController(); for (int i = 6; i < nodes.size(); ++i) nodes.get(i).disconnect(); startFleetController(false); waitForState("version:\\d+ distributor:3 storage:7 .1.s:m .3.s:d .4.s:d .5.s:d .6.s:m"); setWantedState(new Node(NodeType.STORAGE, 6), new NodeState(NodeType.STORAGE, State.UP), wantedStates); waitForState("version:\\d+ distributor:3 storage:3 .1.s:m"); for (int i = 6; i < nodes.size(); ++i) nodes.get(i).connect(); waitForState("version:\\d+ distributor:10 .8.s:d storage:10 .1.s:m .7.s:r .8.s:d"); assertWantedStates(wantedStates); } private void assertWantedStates(Map<Node, NodeState> wantedStates) { for (DummyVdsNode node : nodes) { assertEquals(wantedStates.get(node.getNode()), fleetController.getWantedNodeState(node.getNode()), node.getNode().toString()); } } }
class DatabaseTest extends FleetControllerTest { private static final Logger log = Logger.getLogger(DatabaseTest.class.getName()); @Test void testWantedStatesInZooKeeper() throws Exception { startingTest("DatabaseTest::testWantedStatesInZooKeeper"); FleetControllerOptions options = defaultOptions("mycluster"); options.zooKeeperServerAddress = "127.0.0.1"; setUpFleetController(true, options); setUpVdsNodes(true, new DummyVdsNodeOptions()); log.info("WAITING FOR STABLE SYSTEM"); waitForStableSystem(); log.info("VALIDATE STARTING WANTED STATES"); Map<Node, NodeState> wantedStates = new TreeMap<>(); for (DummyVdsNode node : nodes) { wantedStates.put(node.getNode(), new NodeState(node.getType(), State.UP)); } assertWantedStates(wantedStates); log.info("SET A WANTED STATE AND SEE THAT IT GETS PROPAGATED"); setWantedState(new Node(NodeType.STORAGE, 3), new NodeState(NodeType.STORAGE, State.MAINTENANCE).setDescription("Yoo"), wantedStates); waitForState("version:\\d+ distributor:10 storage:10 .3.s:m"); assertWantedStates(wantedStates); log.info("SET ANOTHER WANTED STATE AND SEE THAT IT GETS PROPAGATED"); setWantedState(new Node(NodeType.DISTRIBUTOR, 2), new NodeState(NodeType.DISTRIBUTOR, State.DOWN), wantedStates); waitForState("version:\\d+ distributor:10 .2.s:d storage:10 .3.s:m"); assertWantedStates(wantedStates); log.info("SET YET ANOTHER WANTED STATE AND SEE THAT IT GETS PROPAGATED"); setWantedState(new Node(NodeType.STORAGE, 7), new NodeState(NodeType.STORAGE, State.RETIRED).setDescription("We wanna replace this node"), wantedStates); waitForState("version:\\d+ distributor:10 .2.s:d storage:10 .3.s:m .7.s:r"); assertWantedStates(wantedStates); log.info("CHECK THAT WANTED STATES PERSIST FLEETCONTROLLER RESTART"); stopFleetController(); startFleetController(false); waitForState("version:\\d+ distributor:10 .2.s:d storage:10 .3.s:m .7.s:r"); assertWantedStates(wantedStates); log.info("CLEAR WANTED STATE"); setWantedState(new Node(NodeType.STORAGE, 7), new NodeState(NodeType.STORAGE, State.UP), wantedStates); assertWantedStates(wantedStates); setWantedState(new Node(NodeType.DISTRIBUTOR, 5), new NodeState(NodeType.DISTRIBUTOR, State.DOWN), wantedStates); assertWantedStates(wantedStates); setWantedState(new Node(NodeType.DISTRIBUTOR, 2), new NodeState(NodeType.DISTRIBUTOR, State.UP), wantedStates); assertWantedStates(wantedStates); setWantedState(new Node(NodeType.STORAGE, 9), new NodeState(NodeType.STORAGE, State.DOWN), wantedStates); assertWantedStates(wantedStates); } @Test void testWantedStateOfUnknownNode() throws Exception { startingTest("DatabaseTest::testWantedStatesOfUnknownNode"); FleetControllerOptions options = defaultOptions("mycluster"); options.minRatioOfDistributorNodesUp = 0; options.minRatioOfStorageNodesUp = 0; options.zooKeeperServerAddress = "localhost"; setUpFleetController(true, options); setUpVdsNodes(true, new DummyVdsNodeOptions()); waitForStableSystem(); Map<Node, NodeState> wantedStates = new TreeMap<>(); for (DummyVdsNode node : nodes) { wantedStates.put(node.getNode(), new NodeState(node.getType(), State.UP)); } assertWantedStates(wantedStates); setWantedState(new Node(NodeType.STORAGE, 1), new NodeState(NodeType.STORAGE, State.MAINTENANCE).setDescription("Yoo"), wantedStates); waitForState("version:\\d+ distributor:10 storage:10 .1.s:m"); assertWantedStates(wantedStates); setWantedState(new Node(NodeType.DISTRIBUTOR, 8), new NodeState(NodeType.DISTRIBUTOR, State.DOWN), wantedStates); waitForState("version:\\d+ distributor:10 .8.s:d storage:10 .1.s:m"); assertWantedStates(wantedStates); setWantedState(new Node(NodeType.STORAGE, 6), new NodeState(NodeType.STORAGE, State.MAINTENANCE).setDescription("foobar"), wantedStates); waitForState("version:\\d+ distributor:10 .8.s:d storage:10 .1.s:m .6.s:m"); assertWantedStates(wantedStates); setWantedState(new Node(NodeType.STORAGE, 7), new NodeState(NodeType.STORAGE, State.RETIRED).setDescription("foobar"), wantedStates); waitForState("version:\\d+ distributor:10 .8.s:d storage:10 .1.s:m .6.s:m .7.s:r"); assertWantedStates(wantedStates); setWantedState(new Node(NodeType.STORAGE, 8), new NodeState(NodeType.STORAGE, State.DOWN).setDescription("foobar"), wantedStates); waitForState("version:\\d+ distributor:10 .8.s:d storage:10 .1.s:m .6.s:m .7.s:r .8.s:d"); assertWantedStates(wantedStates); stopFleetController(); for (int i = 6; i < nodes.size(); ++i) nodes.get(i).disconnect(); startFleetController(false); waitForState("version:\\d+ distributor:3 storage:7 .1.s:m .3.s:d .4.s:d .5.s:d .6.s:m"); setWantedState(new Node(NodeType.STORAGE, 6), new NodeState(NodeType.STORAGE, State.UP), wantedStates); waitForState("version:\\d+ distributor:3 storage:3 .1.s:m"); for (int i = 6; i < nodes.size(); ++i) nodes.get(i).connect(); waitForState("version:\\d+ distributor:10 .8.s:d storage:10 .1.s:m .7.s:r .8.s:d"); assertWantedStates(wantedStates); } private void assertWantedStates(Map<Node, NodeState> wantedStates) { for (DummyVdsNode node : nodes) { assertEquals(wantedStates.get(node.getNode()), fleetController.getWantedNodeState(node.getNode()), node.getNode().toString()); } } }
Good point, will fix
private void setWantedState(Node n, NodeState ns, Map<Node, NodeState> wantedStates) { int rpcPort = fleetController.getRpcPort(); if (supervisor == null) { supervisor = new Supervisor(new Transport()); } Target connection = supervisor.connect(new Spec("localhost", rpcPort)); assertTrue(connection.isValid()); Request req = new Request("setNodeState"); req.parameters().add(new StringValue("storage/cluster.mycluster/" + n.getType().toString() + "/" + n.getIndex())); req.parameters().add(new StringValue(ns.serialize(true))); connection.invokeSync(req, timeout.getSeconds()); assertEquals(ErrorCode.NONE, req.errorCode(), req.toString()); assertTrue(req.checkReturnTypes("s"), req.toString()); wantedStates.put(n, ns); }
connection.invokeSync(req, timeout.getSeconds());
private void setWantedState(Node n, NodeState ns, Map<Node, NodeState> wantedStates) { int rpcPort = fleetController.getRpcPort(); if (supervisor == null) { supervisor = new Supervisor(new Transport()); } Target connection = supervisor.connect(new Spec("localhost", rpcPort)); assertTrue(connection.isValid()); Request req = new Request("setNodeState"); req.parameters().add(new StringValue("storage/cluster.mycluster/" + n.getType().toString() + "/" + n.getIndex())); req.parameters().add(new StringValue(ns.serialize(true))); connection.invokeSync(req, timeoutInSeconds()); assertEquals(ErrorCode.NONE, req.errorCode(), req.toString()); assertTrue(req.checkReturnTypes("s"), req.toString()); wantedStates.put(n, ns); }
class DatabaseTest extends FleetControllerTest { private static final Logger log = Logger.getLogger(DatabaseTest.class.getName()); @Test void testWantedStatesInZooKeeper() throws Exception { startingTest("DatabaseTest::testWantedStatesInZooKeeper"); FleetControllerOptions options = defaultOptions("mycluster"); options.zooKeeperServerAddress = "127.0.0.1"; setUpFleetController(true, options); setUpVdsNodes(true, new DummyVdsNodeOptions()); log.info("WAITING FOR STABLE SYSTEM"); waitForStableSystem(); log.info("VALIDATE STARTING WANTED STATES"); Map<Node, NodeState> wantedStates = new TreeMap<>(); for (DummyVdsNode node : nodes) { wantedStates.put(node.getNode(), new NodeState(node.getType(), State.UP)); } assertWantedStates(wantedStates); log.info("SET A WANTED STATE AND SEE THAT IT GETS PROPAGATED"); setWantedState(new Node(NodeType.STORAGE, 3), new NodeState(NodeType.STORAGE, State.MAINTENANCE).setDescription("Yoo"), wantedStates); waitForState("version:\\d+ distributor:10 storage:10 .3.s:m"); assertWantedStates(wantedStates); log.info("SET ANOTHER WANTED STATE AND SEE THAT IT GETS PROPAGATED"); setWantedState(new Node(NodeType.DISTRIBUTOR, 2), new NodeState(NodeType.DISTRIBUTOR, State.DOWN), wantedStates); waitForState("version:\\d+ distributor:10 .2.s:d storage:10 .3.s:m"); assertWantedStates(wantedStates); log.info("SET YET ANOTHER WANTED STATE AND SEE THAT IT GETS PROPAGATED"); setWantedState(new Node(NodeType.STORAGE, 7), new NodeState(NodeType.STORAGE, State.RETIRED).setDescription("We wanna replace this node"), wantedStates); waitForState("version:\\d+ distributor:10 .2.s:d storage:10 .3.s:m .7.s:r"); assertWantedStates(wantedStates); log.info("CHECK THAT WANTED STATES PERSIST FLEETCONTROLLER RESTART"); stopFleetController(); startFleetController(false); waitForState("version:\\d+ distributor:10 .2.s:d storage:10 .3.s:m .7.s:r"); assertWantedStates(wantedStates); log.info("CLEAR WANTED STATE"); setWantedState(new Node(NodeType.STORAGE, 7), new NodeState(NodeType.STORAGE, State.UP), wantedStates); assertWantedStates(wantedStates); setWantedState(new Node(NodeType.DISTRIBUTOR, 5), new NodeState(NodeType.DISTRIBUTOR, State.DOWN), wantedStates); assertWantedStates(wantedStates); setWantedState(new Node(NodeType.DISTRIBUTOR, 2), new NodeState(NodeType.DISTRIBUTOR, State.UP), wantedStates); assertWantedStates(wantedStates); setWantedState(new Node(NodeType.STORAGE, 9), new NodeState(NodeType.STORAGE, State.DOWN), wantedStates); assertWantedStates(wantedStates); } @Test void testWantedStateOfUnknownNode() throws Exception { startingTest("DatabaseTest::testWantedStatesOfUnknownNode"); FleetControllerOptions options = defaultOptions("mycluster"); options.minRatioOfDistributorNodesUp = 0; options.minRatioOfStorageNodesUp = 0; options.zooKeeperServerAddress = "localhost"; setUpFleetController(true, options); setUpVdsNodes(true, new DummyVdsNodeOptions()); waitForStableSystem(); Map<Node, NodeState> wantedStates = new TreeMap<>(); for (DummyVdsNode node : nodes) { wantedStates.put(node.getNode(), new NodeState(node.getType(), State.UP)); } assertWantedStates(wantedStates); setWantedState(new Node(NodeType.STORAGE, 1), new NodeState(NodeType.STORAGE, State.MAINTENANCE).setDescription("Yoo"), wantedStates); waitForState("version:\\d+ distributor:10 storage:10 .1.s:m"); assertWantedStates(wantedStates); setWantedState(new Node(NodeType.DISTRIBUTOR, 8), new NodeState(NodeType.DISTRIBUTOR, State.DOWN), wantedStates); waitForState("version:\\d+ distributor:10 .8.s:d storage:10 .1.s:m"); assertWantedStates(wantedStates); setWantedState(new Node(NodeType.STORAGE, 6), new NodeState(NodeType.STORAGE, State.MAINTENANCE).setDescription("foobar"), wantedStates); waitForState("version:\\d+ distributor:10 .8.s:d storage:10 .1.s:m .6.s:m"); assertWantedStates(wantedStates); setWantedState(new Node(NodeType.STORAGE, 7), new NodeState(NodeType.STORAGE, State.RETIRED).setDescription("foobar"), wantedStates); waitForState("version:\\d+ distributor:10 .8.s:d storage:10 .1.s:m .6.s:m .7.s:r"); assertWantedStates(wantedStates); setWantedState(new Node(NodeType.STORAGE, 8), new NodeState(NodeType.STORAGE, State.DOWN).setDescription("foobar"), wantedStates); waitForState("version:\\d+ distributor:10 .8.s:d storage:10 .1.s:m .6.s:m .7.s:r .8.s:d"); assertWantedStates(wantedStates); stopFleetController(); for (int i = 6; i < nodes.size(); ++i) nodes.get(i).disconnect(); startFleetController(false); waitForState("version:\\d+ distributor:3 storage:7 .1.s:m .3.s:d .4.s:d .5.s:d .6.s:m"); setWantedState(new Node(NodeType.STORAGE, 6), new NodeState(NodeType.STORAGE, State.UP), wantedStates); waitForState("version:\\d+ distributor:3 storage:3 .1.s:m"); for (int i = 6; i < nodes.size(); ++i) nodes.get(i).connect(); waitForState("version:\\d+ distributor:10 .8.s:d storage:10 .1.s:m .7.s:r .8.s:d"); assertWantedStates(wantedStates); } private void assertWantedStates(Map<Node, NodeState> wantedStates) { for (DummyVdsNode node : nodes) { assertEquals(wantedStates.get(node.getNode()), fleetController.getWantedNodeState(node.getNode()), node.getNode().toString()); } } }
class DatabaseTest extends FleetControllerTest { private static final Logger log = Logger.getLogger(DatabaseTest.class.getName()); @Test void testWantedStatesInZooKeeper() throws Exception { startingTest("DatabaseTest::testWantedStatesInZooKeeper"); FleetControllerOptions options = defaultOptions("mycluster"); options.zooKeeperServerAddress = "127.0.0.1"; setUpFleetController(true, options); setUpVdsNodes(true, new DummyVdsNodeOptions()); log.info("WAITING FOR STABLE SYSTEM"); waitForStableSystem(); log.info("VALIDATE STARTING WANTED STATES"); Map<Node, NodeState> wantedStates = new TreeMap<>(); for (DummyVdsNode node : nodes) { wantedStates.put(node.getNode(), new NodeState(node.getType(), State.UP)); } assertWantedStates(wantedStates); log.info("SET A WANTED STATE AND SEE THAT IT GETS PROPAGATED"); setWantedState(new Node(NodeType.STORAGE, 3), new NodeState(NodeType.STORAGE, State.MAINTENANCE).setDescription("Yoo"), wantedStates); waitForState("version:\\d+ distributor:10 storage:10 .3.s:m"); assertWantedStates(wantedStates); log.info("SET ANOTHER WANTED STATE AND SEE THAT IT GETS PROPAGATED"); setWantedState(new Node(NodeType.DISTRIBUTOR, 2), new NodeState(NodeType.DISTRIBUTOR, State.DOWN), wantedStates); waitForState("version:\\d+ distributor:10 .2.s:d storage:10 .3.s:m"); assertWantedStates(wantedStates); log.info("SET YET ANOTHER WANTED STATE AND SEE THAT IT GETS PROPAGATED"); setWantedState(new Node(NodeType.STORAGE, 7), new NodeState(NodeType.STORAGE, State.RETIRED).setDescription("We wanna replace this node"), wantedStates); waitForState("version:\\d+ distributor:10 .2.s:d storage:10 .3.s:m .7.s:r"); assertWantedStates(wantedStates); log.info("CHECK THAT WANTED STATES PERSIST FLEETCONTROLLER RESTART"); stopFleetController(); startFleetController(false); waitForState("version:\\d+ distributor:10 .2.s:d storage:10 .3.s:m .7.s:r"); assertWantedStates(wantedStates); log.info("CLEAR WANTED STATE"); setWantedState(new Node(NodeType.STORAGE, 7), new NodeState(NodeType.STORAGE, State.UP), wantedStates); assertWantedStates(wantedStates); setWantedState(new Node(NodeType.DISTRIBUTOR, 5), new NodeState(NodeType.DISTRIBUTOR, State.DOWN), wantedStates); assertWantedStates(wantedStates); setWantedState(new Node(NodeType.DISTRIBUTOR, 2), new NodeState(NodeType.DISTRIBUTOR, State.UP), wantedStates); assertWantedStates(wantedStates); setWantedState(new Node(NodeType.STORAGE, 9), new NodeState(NodeType.STORAGE, State.DOWN), wantedStates); assertWantedStates(wantedStates); } @Test void testWantedStateOfUnknownNode() throws Exception { startingTest("DatabaseTest::testWantedStatesOfUnknownNode"); FleetControllerOptions options = defaultOptions("mycluster"); options.minRatioOfDistributorNodesUp = 0; options.minRatioOfStorageNodesUp = 0; options.zooKeeperServerAddress = "localhost"; setUpFleetController(true, options); setUpVdsNodes(true, new DummyVdsNodeOptions()); waitForStableSystem(); Map<Node, NodeState> wantedStates = new TreeMap<>(); for (DummyVdsNode node : nodes) { wantedStates.put(node.getNode(), new NodeState(node.getType(), State.UP)); } assertWantedStates(wantedStates); setWantedState(new Node(NodeType.STORAGE, 1), new NodeState(NodeType.STORAGE, State.MAINTENANCE).setDescription("Yoo"), wantedStates); waitForState("version:\\d+ distributor:10 storage:10 .1.s:m"); assertWantedStates(wantedStates); setWantedState(new Node(NodeType.DISTRIBUTOR, 8), new NodeState(NodeType.DISTRIBUTOR, State.DOWN), wantedStates); waitForState("version:\\d+ distributor:10 .8.s:d storage:10 .1.s:m"); assertWantedStates(wantedStates); setWantedState(new Node(NodeType.STORAGE, 6), new NodeState(NodeType.STORAGE, State.MAINTENANCE).setDescription("foobar"), wantedStates); waitForState("version:\\d+ distributor:10 .8.s:d storage:10 .1.s:m .6.s:m"); assertWantedStates(wantedStates); setWantedState(new Node(NodeType.STORAGE, 7), new NodeState(NodeType.STORAGE, State.RETIRED).setDescription("foobar"), wantedStates); waitForState("version:\\d+ distributor:10 .8.s:d storage:10 .1.s:m .6.s:m .7.s:r"); assertWantedStates(wantedStates); setWantedState(new Node(NodeType.STORAGE, 8), new NodeState(NodeType.STORAGE, State.DOWN).setDescription("foobar"), wantedStates); waitForState("version:\\d+ distributor:10 .8.s:d storage:10 .1.s:m .6.s:m .7.s:r .8.s:d"); assertWantedStates(wantedStates); stopFleetController(); for (int i = 6; i < nodes.size(); ++i) nodes.get(i).disconnect(); startFleetController(false); waitForState("version:\\d+ distributor:3 storage:7 .1.s:m .3.s:d .4.s:d .5.s:d .6.s:m"); setWantedState(new Node(NodeType.STORAGE, 6), new NodeState(NodeType.STORAGE, State.UP), wantedStates); waitForState("version:\\d+ distributor:3 storage:3 .1.s:m"); for (int i = 6; i < nodes.size(); ++i) nodes.get(i).connect(); waitForState("version:\\d+ distributor:10 .8.s:d storage:10 .1.s:m .7.s:r .8.s:d"); assertWantedStates(wantedStates); } private void assertWantedStates(Map<Node, NodeState> wantedStates) { for (DummyVdsNode node : nodes) { assertEquals(wantedStates.get(node.getNode()), fleetController.getWantedNodeState(node.getNode()), node.getNode().toString()); } } }
👍
public void waitForNodesInSlobrok(int distNodeCount, int storNodeCount, Duration timeout) throws InterruptedException { Instant endTime = Instant.now().plus(timeout); synchronized (monitor) { while (true) { int distCount = 0, storCount = 0; for (NodeInfo info : cluster.getNodeInfos()) { if (!info.isRpcAddressOutdated()) { if (info.isDistributor()) ++distCount; else ++storCount; } } if (distCount == distNodeCount && storCount == storNodeCount) return; Duration remainingTime = Duration.between(Instant.now(), endTime); if (remainingTime.isNegative()) { throw new IllegalStateException("Did not get all " + distNodeCount + " distributors and " + storNodeCount + " storage nodes registered in slobrok within timeout of " + timeout + ". (Got " + distCount + " distributors and " + storCount + " storage nodes)"); } monitor.wait(10); } } }
if (remainingTime.isNegative()) {
public void waitForNodesInSlobrok(int distNodeCount, int storNodeCount, Duration timeout) throws InterruptedException { Instant endTime = Instant.now().plus(timeout); synchronized (monitor) { while (true) { int distCount = 0, storCount = 0; for (NodeInfo info : cluster.getNodeInfos()) { if (!info.isRpcAddressOutdated()) { if (info.isDistributor()) ++distCount; else ++storCount; } } if (distCount == distNodeCount && storCount == storNodeCount) return; if (Instant.now().isAfter(endTime)) { throw new IllegalStateException("Did not get all " + distNodeCount + " distributors and " + storNodeCount + " storage nodes registered in slobrok within timeout of " + timeout + ". (Got " + distCount + " distributors and " + storCount + " storage nodes)"); } monitor.wait(10); } } }
class FleetController implements NodeListener, SlobrokListener, SystemStateListener, Runnable, RemoteClusterControllerTaskScheduler { private static final Logger logger = Logger.getLogger(FleetController.class.getName()); private final FleetControllerContext context; private final Timer timer; private final Object monitor; private final EventLog eventLog; private final NodeLookup nodeLookup; private final ContentCluster cluster; private final Communicator communicator; private final NodeStateGatherer stateGatherer; private final StateChangeHandler stateChangeHandler; private final SystemStateBroadcaster systemStateBroadcaster; private final StateVersionTracker stateVersionTracker; private final StatusPageServerInterface statusPageServer; private final RpcServer rpcServer; private final DatabaseHandler database; private final MasterElectionHandler masterElectionHandler; private Thread runner = null; private final AtomicBoolean running = new AtomicBoolean(true); private FleetControllerOptions options; private FleetControllerOptions nextOptions; private final List<SystemStateListener> systemStateListeners = new CopyOnWriteArrayList<>(); private boolean processingCycle = false; private boolean wantedStateChanged = false; private long cycleCount = 0; private long lastMetricUpdateCycleCount = 0; private long nextStateSendTime = 0; private Long controllerThreadId = null; private boolean waitingForCycle = false; private final StatusPageServer.PatternRequestRouter statusRequestRouter = new StatusPageServer.PatternRequestRouter(); private final List<ClusterStateBundle> newStates = new ArrayList<>(); private final List<ClusterStateBundle> convergedStates = new ArrayList<>(); private final Queue<RemoteClusterControllerTask> remoteTasks = new LinkedList<>(); private final MetricUpdater metricUpdater; private boolean isMaster = false; private boolean inMasterMoratorium = false; private boolean isStateGatherer = false; private long firstAllowedStateBroadcast = Long.MAX_VALUE; private long tickStartTime = Long.MAX_VALUE; private final List<RemoteClusterControllerTask> tasksPendingStateRecompute = new ArrayList<>(); private final Queue<VersionDependentTaskCompletion> taskCompletionQueue = new ArrayDeque<>(); private Set<String> configuredBucketSpaces = Collections.emptySet(); private final RunDataExtractor dataExtractor = new RunDataExtractor() { @Override public FleetControllerOptions getOptions() { return options; } @Override public ContentCluster getCluster() { return cluster; } }; public FleetController(FleetControllerContext context, Timer timer, EventLog eventLog, ContentCluster cluster, NodeStateGatherer nodeStateGatherer, Communicator communicator, StatusPageServerInterface statusPage, RpcServer server, NodeLookup nodeLookup, DatabaseHandler database, StateChangeHandler stateChangeHandler, SystemStateBroadcaster systemStateBroadcaster, MasterElectionHandler masterElectionHandler, MetricUpdater metricUpdater, FleetControllerOptions options) { context.log(logger, Level.INFO, "Created"); this.context = context; this.timer = timer; this.monitor = timer; this.eventLog = eventLog; this.options = options; this.nodeLookup = nodeLookup; this.cluster = cluster; this.communicator = communicator; this.database = database; this.stateGatherer = nodeStateGatherer; this.stateChangeHandler = stateChangeHandler; this.systemStateBroadcaster = systemStateBroadcaster; this.stateVersionTracker = new StateVersionTracker(options.minMergeCompletionRatio); this.metricUpdater = metricUpdater; this.statusPageServer = statusPage; this.rpcServer = server; this.masterElectionHandler = masterElectionHandler; this.statusRequestRouter.addHandler( "^/node=([a-z]+)\\.(\\d+)$", new LegacyNodePageRequestHandler(timer, eventLog, cluster)); this.statusRequestRouter.addHandler( "^/state.*", new NodeHealthRequestHandler()); this.statusRequestRouter.addHandler( "^/clusterstate", new ClusterStateRequestHandler(stateVersionTracker)); this.statusRequestRouter.addHandler( "^/$", new LegacyIndexPageRequestHandler(timer, cluster, masterElectionHandler, stateVersionTracker, eventLog, timer.getCurrentTimeInMillis(), dataExtractor)); propagateOptions(); } public static FleetController create(FleetControllerOptions options, StatusPageServerInterface statusPageServer, MetricReporter metricReporter) throws Exception { var context = new FleetControllerContextImpl(options); var timer = new RealTimer(); var metricUpdater = new MetricUpdater(metricReporter, options.fleetControllerIndex, options.clusterName); var log = new EventLog(timer, metricUpdater); var cluster = new ContentCluster( options.clusterName, options.nodes, options.storageDistribution); var stateGatherer = new NodeStateGatherer(timer, timer, log); var communicator = new RPCCommunicator( RPCCommunicator.createRealSupervisor(), timer, options.fleetControllerIndex, options.nodeStateRequestTimeoutMS, options.nodeStateRequestTimeoutEarliestPercentage, options.nodeStateRequestTimeoutLatestPercentage, options.nodeStateRequestRoundTripTimeMaxSeconds); var database = new DatabaseHandler(context, new ZooKeeperDatabaseFactory(context), timer, options.zooKeeperServerAddress, timer); var lookUp = new SlobrokClient(context, timer); var stateGenerator = new StateChangeHandler(context, timer, log); var stateBroadcaster = new SystemStateBroadcaster(context, timer, timer); var masterElectionHandler = new MasterElectionHandler(context, options.fleetControllerIndex, options.fleetControllerCount, timer, timer); var controller = new FleetController(context, timer, log, cluster, stateGatherer, communicator, statusPageServer, null, lookUp, database, stateGenerator, stateBroadcaster, masterElectionHandler, metricUpdater, options); controller.start(); return controller; } public void start() { runner = new Thread(this); runner.start(); } public Object getMonitor() { return monitor; } public boolean isRunning() { return running.get(); } public boolean isMaster() { synchronized (monitor) { return isMaster; } } public ClusterState getClusterState() { synchronized (monitor) { return systemStateBroadcaster.getClusterState(); } } public ClusterStateBundle getClusterStateBundle() { synchronized (monitor) { return systemStateBroadcaster.getClusterStateBundle(); } } public void schedule(RemoteClusterControllerTask task) { synchronized (monitor) { context.log(logger, Level.FINE, "Scheduled remote task " + task.getClass().getName() + " for execution"); remoteTasks.add(task); } } /** Used for unit testing. */ public void addSystemStateListener(SystemStateListener listener) { systemStateListeners.add(listener); com.yahoo.vdslib.state.ClusterState state = getSystemState(); if (state == null) { throw new NullPointerException("Cluster state should never be null at this point"); } listener.handleNewPublishedState(ClusterStateBundle.ofBaselineOnly(AnnotatedClusterState.withoutAnnotations(state))); ClusterStateBundle convergedState = systemStateBroadcaster.getLastClusterStateBundleConverged(); if (convergedState != null) { listener.handleStateConvergedInCluster(convergedState); } } public FleetControllerOptions getOptions() { synchronized(monitor) { return options.clone(); } } public NodeState getReportedNodeState(Node n) { synchronized(monitor) { NodeInfo node = cluster.getNodeInfo(n); if (node == null) { throw new IllegalStateException("Did not find node " + n + " in cluster " + cluster); } return node.getReportedState(); } } public NodeState getWantedNodeState(Node n) { synchronized(monitor) { return cluster.getNodeInfo(n).getWantedState(); } } public com.yahoo.vdslib.state.ClusterState getSystemState() { synchronized(monitor) { return stateVersionTracker.getVersionedClusterState(); } } public int getRpcPort() { return rpcServer.getPort(); } public void shutdown() throws InterruptedException, java.io.IOException { if (runner != null && isRunning()) { context.log(logger, Level.INFO, "Joining event thread."); running.set(false); synchronized(monitor) { monitor.notifyAll(); } runner.join(); } context.log(logger, Level.INFO, "FleetController done shutting down event thread."); controllerThreadId = Thread.currentThread().getId(); database.shutdown(databaseContext); if (statusPageServer != null) { statusPageServer.shutdown(); } if (rpcServer != null) { rpcServer.shutdown(); } communicator.shutdown(); nodeLookup.shutdown(); } public void updateOptions(FleetControllerOptions options) { var newId = FleetControllerId.fromOptions(options); synchronized(monitor) { assert newId.equals(context.id()); context.log(logger, Level.INFO, "FleetController has new options"); nextOptions = options.clone(); monitor.notifyAll(); } } private void verifyInControllerThread() { if (controllerThreadId != null && controllerThreadId != Thread.currentThread().getId()) { throw new IllegalStateException("Function called from non-controller thread. Shouldn't happen."); } } private ClusterState latestCandidateClusterState() { return stateVersionTracker.getLatestCandidateState().getClusterState(); } @Override public void handleNewNodeState(NodeInfo node, NodeState newState) { verifyInControllerThread(); stateChangeHandler.handleNewReportedNodeState(latestCandidateClusterState(), node, newState, this); } @Override public void handleNewWantedNodeState(NodeInfo node, NodeState newState) { verifyInControllerThread(); wantedStateChanged = true; stateChangeHandler.proposeNewNodeState(stateVersionTracker.getVersionedClusterState(), node, newState); } @Override public void handleRemovedNode(Node node) { verifyInControllerThread(); wantedStateChanged = true; } @Override public void handleUpdatedHostInfo(NodeInfo nodeInfo, HostInfo newHostInfo) { verifyInControllerThread(); triggerBundleRecomputationIfResourceExhaustionStateChanged(nodeInfo, newHostInfo); stateVersionTracker.handleUpdatedHostInfo(nodeInfo, newHostInfo); } private void triggerBundleRecomputationIfResourceExhaustionStateChanged(NodeInfo nodeInfo, HostInfo newHostInfo) { if (!options.clusterFeedBlockEnabled) { return; } var calc = createResourceExhaustionCalculator(); var previouslyExhausted = calc.enumerateNodeResourceExhaustions(nodeInfo); var nowExhausted = calc.resourceExhaustionsFromHostInfo(nodeInfo, newHostInfo); if (!previouslyExhausted.equals(nowExhausted)) { context.log(logger, Level.FINE, () -> String.format("Triggering state recomputation due to change in cluster feed block: %s -> %s", previouslyExhausted, nowExhausted)); stateChangeHandler.setStateChangedFlag(); } } @Override public void handleNewNode(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleNewNode(node); } @Override public void handleMissingNode(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleMissingNode(stateVersionTracker.getVersionedClusterState(), node, this); } @Override public void handleNewRpcAddress(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleNewRpcAddress(node); } @Override public void handleReturnedRpcAddress(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleReturnedRpcAddress(node); } @Override public void handleNewPublishedState(ClusterStateBundle stateBundle) { verifyInControllerThread(); ClusterState baselineState = stateBundle.getBaselineClusterState(); newStates.add(stateBundle); metricUpdater.updateClusterStateMetrics(cluster, baselineState, ResourceUsageStats.calculateFrom(cluster.getNodeInfos(), options.clusterFeedBlockLimit, stateBundle.getFeedBlock())); lastMetricUpdateCycleCount = cycleCount; systemStateBroadcaster.handleNewClusterStates(stateBundle); if (isMaster) { storeClusterStateMetaDataToZooKeeper(stateBundle); } } private boolean maybePublishOldMetrics() { verifyInControllerThread(); if (isMaster() && cycleCount > 300 + lastMetricUpdateCycleCount) { ClusterStateBundle stateBundle = stateVersionTracker.getVersionedClusterStateBundle(); ClusterState baselineState = stateBundle.getBaselineClusterState(); metricUpdater.updateClusterStateMetrics(cluster, baselineState, ResourceUsageStats.calculateFrom(cluster.getNodeInfos(), options.clusterFeedBlockLimit, stateBundle.getFeedBlock())); lastMetricUpdateCycleCount = cycleCount; return true; } else { return false; } } private void storeClusterStateMetaDataToZooKeeper(ClusterStateBundle stateBundle) { try { database.saveLatestSystemStateVersion(databaseContext, stateBundle.getVersion()); database.saveLatestClusterStateBundle(databaseContext, stateBundle); } catch (InterruptedException e) { throw new RuntimeException("ZooKeeper write interrupted", e); } } /** * This function gives data of the current state in master election. * The keys in the given map are indices of fleet controllers. * The values are what fleetcontroller that fleetcontroller wants to * become master. * * If more than half the fleetcontrollers want a node to be master and * that node also wants itself as master, that node is the single master. * If this condition is not met, there is currently no master. */ public void handleFleetData(Map<Integer, Integer> data) { verifyInControllerThread(); context.log(logger, Level.FINEST, "Sending fleet data event on to master election handler"); metricUpdater.updateMasterElectionMetrics(data); masterElectionHandler.handleFleetData(data); } /** * Called when we can no longer contact database. */ public void lostDatabaseConnection() { verifyInControllerThread(); boolean wasMaster = isMaster; masterElectionHandler.lostDatabaseConnection(); if (wasMaster) { dropLeadershipState(); metricUpdater.updateMasterState(false); } } private void failAllVersionDependentTasks() { tasksPendingStateRecompute.forEach(task -> { task.handleFailure(RemoteClusterControllerTask.Failure.of( RemoteClusterControllerTask.FailureCondition.LEADERSHIP_LOST)); task.notifyCompleted(); }); tasksPendingStateRecompute.clear(); taskCompletionQueue.forEach(task -> { task.getTask().handleFailure(RemoteClusterControllerTask.Failure.of( RemoteClusterControllerTask.FailureCondition.LEADERSHIP_LOST)); task.getTask().notifyCompleted(); }); taskCompletionQueue.clear(); } /** Called when all distributors have acked newest cluster state version. */ public void handleAllDistributorsInSync(DatabaseHandler database, DatabaseHandler.DatabaseContext dbContext) throws InterruptedException { Set<ConfiguredNode> nodes = new HashSet<>(cluster.clusterInfo().getConfiguredNodes().values()); var currentBundle = stateVersionTracker.getVersionedClusterStateBundle(); context.log(logger, Level.FINE, () -> String.format("All distributors have ACKed cluster state version %d", currentBundle.getVersion())); stateChangeHandler.handleAllDistributorsInSync(currentBundle.getBaselineClusterState(), nodes, database, dbContext); convergedStates.add(currentBundle); } private boolean changesConfiguredNodeSet(Collection<ConfiguredNode> newNodes) { if (newNodes.size() != cluster.getConfiguredNodes().size()) return true; if (! cluster.getConfiguredNodes().values().containsAll(newNodes)) return true; for (ConfiguredNode node : newNodes) { if (node.retired() != cluster.getConfiguredNodes().get(node.index()).retired()) { return true; } } return false; } /** This is called when the options field has been set to a new set of options */ private void propagateOptions() { verifyInControllerThread(); selfTerminateIfConfiguredNodeIndexHasChanged(); if (changesConfiguredNodeSet(options.nodes)) { cluster.setSlobrokGenerationCount(0); } configuredBucketSpaces = Set.of(FixedBucketSpaces.defaultSpace(), FixedBucketSpaces.globalSpace()); stateVersionTracker.setMinMergeCompletionRatio(options.minMergeCompletionRatio); communicator.propagateOptions(options); if (nodeLookup instanceof SlobrokClient) { ((SlobrokClient) nodeLookup).setSlobrokConnectionSpecs(options.slobrokConnectionSpecs); } eventLog.setMaxSize(options.eventLogMaxSize, options.eventNodeLogMaxSize); cluster.setPollingFrequency(options.statePollingFrequency); cluster.setDistribution(options.storageDistribution); cluster.setNodes(options.nodes, databaseContext.getNodeStateUpdateListener()); database.setZooKeeperAddress(options.zooKeeperServerAddress, databaseContext); database.setZooKeeperSessionTimeout(options.zooKeeperSessionTimeout, databaseContext); stateGatherer.setMaxSlobrokDisconnectGracePeriod(options.maxSlobrokDisconnectGracePeriod); stateGatherer.setNodeStateRequestTimeout(options.nodeStateRequestTimeoutMS); stateChangeHandler.reconfigureFromOptions(options); stateChangeHandler.setStateChangedFlag(); masterElectionHandler.setFleetControllerCount(options.fleetControllerCount); masterElectionHandler.setMasterZooKeeperCooldownPeriod(options.masterZooKeeperCooldownPeriod); masterElectionHandler.setUsingZooKeeper(options.zooKeeperServerAddress != null && !options.zooKeeperServerAddress.isEmpty()); if (rpcServer != null) { rpcServer.setMasterElectionHandler(masterElectionHandler); try{ rpcServer.setSlobrokConnectionSpecs(options.slobrokConnectionSpecs, options.rpcPort); } catch (ListenFailedException e) { context.log(logger, Level.WARNING, "Failed to bind RPC server to port " + options.rpcPort + ". This may be natural if cluster has altered the services running on this node: " + e.getMessage()); } catch (Exception e) { context.log(logger, Level.WARNING, "Failed to initialize RPC server socket: " + e.getMessage()); } } if (statusPageServer != null) { try{ statusPageServer.setPort(options.httpPort); } catch (Exception e) { context.log(logger, Level.WARNING, "Failed to initialize status server socket. This may be natural if cluster has altered the services running on this node: " + e.getMessage()); } } long currentTime = timer.getCurrentTimeInMillis(); nextStateSendTime = Math.min(currentTime + options.minTimeBetweenNewSystemStates, nextStateSendTime); } private void selfTerminateIfConfiguredNodeIndexHasChanged() { var newId = new FleetControllerId(options.clusterName, options.fleetControllerIndex); if (!newId.equals(context.id())) { context.log(logger, Level.WARNING, context.id() + " got new configuration for " + newId + ". We do not support doing this live; " + "immediately exiting now to force new configuration"); prepareShutdownEdge(); System.exit(1); } } public StatusPageResponse fetchStatusPage(StatusPageServer.HttpRequest httpRequest) { verifyInControllerThread(); StatusPageResponse.ResponseCode responseCode; String message; final String hiddenMessage; try { StatusPageServer.RequestHandler handler = statusRequestRouter.resolveHandler(httpRequest); if (handler == null) { throw new FileNotFoundException("No handler found for request: " + httpRequest.getPath()); } return handler.handle(httpRequest); } catch (FileNotFoundException e) { responseCode = StatusPageResponse.ResponseCode.NOT_FOUND; message = e.getMessage(); hiddenMessage = ""; } catch (Exception e) { responseCode = StatusPageResponse.ResponseCode.INTERNAL_SERVER_ERROR; message = "Internal Server Error"; hiddenMessage = ExceptionUtils.getStackTraceAsString(e); context.log(logger, Level.FINE, () -> "Unknown exception thrown for request " + httpRequest.getRequest() + ": " + hiddenMessage); } TimeZone tz = TimeZone.getTimeZone("UTC"); long currentTime = timer.getCurrentTimeInMillis(); StatusPageResponse response = new StatusPageResponse(); StringBuilder content = new StringBuilder(); response.setContentType("text/html"); response.setResponseCode(responseCode); content.append("<!-- Answer to request " + httpRequest.getRequest() + " -->\n"); content.append("<p>UTC time when creating this page: ").append(RealTimer.printDateNoMilliSeconds(currentTime, tz)).append("</p>"); response.writeHtmlHeader(content, message); response.writeHtmlFooter(content, hiddenMessage); response.writeContent(content.toString()); return response; } public void tick() throws Exception { synchronized (monitor) { boolean didWork; didWork = metricUpdater.forWork("doNextZooKeeperTask", () -> database.doNextZooKeeperTask(databaseContext)); didWork |= metricUpdater.forWork("updateMasterElectionState", this::updateMasterElectionState); didWork |= metricUpdater.forWork("handleLeadershipEdgeTransitions", this::handleLeadershipEdgeTransitions); stateChangeHandler.setMaster(isMaster); if ( ! isRunning()) { return; } didWork |= metricUpdater.forWork("stateGatherer-processResponses", () -> stateGatherer.processResponses(this)); if ( ! isRunning()) { return; } if (masterElectionHandler.isAmongNthFirst(options.stateGatherCount)) { didWork |= resyncLocallyCachedState(); } else { stepDownAsStateGatherer(); } if ( ! isRunning()) { return; } didWork |= metricUpdater.forWork("systemStateBroadcaster-processResponses", systemStateBroadcaster::processResponses); if ( ! isRunning()) { return; } if (isMaster) { didWork |= metricUpdater.forWork("broadcastClusterStateToEligibleNodes", this::broadcastClusterStateToEligibleNodes); systemStateBroadcaster.checkIfClusterStateIsAckedByAllDistributors(database, databaseContext, this); } if ( ! isRunning()) { return; } didWork |= metricUpdater.forWork("processAnyPendingStatusPageRequest", this::processAnyPendingStatusPageRequest); if ( ! isRunning()) { return; } if (rpcServer != null) { didWork |= metricUpdater.forWork("handleRpcRequests", () -> rpcServer.handleRpcRequests(cluster, consolidatedClusterState(), this)); } if ( ! isRunning()) { return; } didWork |= metricUpdater.forWork("processNextQueuedRemoteTask", this::processNextQueuedRemoteTask); didWork |= metricUpdater.forWork("completeSatisfiedVersionDependentTasks", this::completeSatisfiedVersionDependentTasks); didWork |= metricUpdater.forWork("maybePublishOldMetrics", this::maybePublishOldMetrics); processingCycle = false; ++cycleCount; long tickStopTime = timer.getCurrentTimeInMillis(); if (tickStopTime >= tickStartTime) { metricUpdater.addTickTime(tickStopTime - tickStartTime, didWork); } monitor.wait(didWork || waitingForCycle ? 1 : options.cycleWaitTime); if ( ! isRunning()) { return; } tickStartTime = timer.getCurrentTimeInMillis(); processingCycle = true; if (nextOptions != null) { switchToNewConfig(); } } if (isRunning()) { propagateNewStatesToListeners(); } } private boolean updateMasterElectionState() { try { return masterElectionHandler.watchMasterElection(database, databaseContext); } catch (InterruptedException e) { throw new RuntimeException(e); } catch (Exception e) { context.log(logger, Level.WARNING, "Failed to watch master election: " + e); } return false; } private void stepDownAsStateGatherer() { if (isStateGatherer) { cluster.clearStates(); eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer a node state gatherer.", timer.getCurrentTimeInMillis())); } isStateGatherer = false; } private void switchToNewConfig() { options = nextOptions; nextOptions = null; try { propagateOptions(); } catch (Exception e) { context.log(logger, Level.SEVERE, "Failed to handle new fleet controller config", e); } } private boolean processAnyPendingStatusPageRequest() { if (statusPageServer != null) { StatusPageServer.HttpRequest statusRequest = statusPageServer.getCurrentHttpRequest(); if (statusRequest != null) { statusPageServer.answerCurrentStatusRequest(fetchStatusPage(statusRequest)); return true; } } return false; } private boolean broadcastClusterStateToEligibleNodes() { if (database.hasPendingClusterStateMetaDataStore()) { context.log(logger, Level.FINE, "Can't publish current cluster state as it has one or more pending ZooKeeper stores"); return false; } boolean sentAny = false; long currentTime = timer.getCurrentTimeInMillis(); if ((currentTime >= firstAllowedStateBroadcast || cluster.allStatesReported()) && currentTime >= nextStateSendTime) { if (inMasterMoratorium) { context.log(logger, Level.INFO, currentTime < firstAllowedStateBroadcast ? "Master moratorium complete: all nodes have reported in" : "Master moratorium complete: timed out waiting for all nodes to report in"); firstAllowedStateBroadcast = currentTime; inMasterMoratorium = false; } sentAny = systemStateBroadcaster.broadcastNewStateBundleIfRequired( databaseContext, communicator, database.getLastKnownStateBundleVersionWrittenBySelf()); if (sentAny) { nextStateSendTime = currentTime + options.minTimeBetweenNewSystemStates; } } sentAny |= systemStateBroadcaster.broadcastStateActivationsIfRequired(databaseContext, communicator); return sentAny; } private void propagateNewStatesToListeners() { if ( ! newStates.isEmpty()) { synchronized (systemStateListeners) { for (ClusterStateBundle stateBundle : newStates) { for (SystemStateListener listener : systemStateListeners) { listener.handleNewPublishedState(stateBundle); } } newStates.clear(); } } if ( ! convergedStates.isEmpty()) { synchronized (systemStateListeners) { for (ClusterStateBundle stateBundle : convergedStates) { for (SystemStateListener listener : systemStateListeners) { listener.handleStateConvergedInCluster(stateBundle); } } convergedStates.clear(); } } } private boolean processNextQueuedRemoteTask() { metricUpdater.updateRemoteTaskQueueSize(remoteTasks.size()); RemoteClusterControllerTask task = remoteTasks.poll(); if (task == null) { return false; } final RemoteClusterControllerTask.Context taskContext = createRemoteTaskProcessingContext(); context.log(logger, Level.FINEST, () -> String.format("Processing remote task of type '%s'", task.getClass().getName())); task.doRemoteFleetControllerTask(taskContext); if (taskMayBeCompletedImmediately(task)) { context.log(logger, Level.FINEST, () -> String.format("Done processing remote task of type '%s'", task.getClass().getName())); task.notifyCompleted(); } else { context.log(logger, Level.FINEST, () -> String.format("Remote task of type '%s' queued until state recomputation", task.getClass().getName())); tasksPendingStateRecompute.add(task); } return true; } private boolean taskMayBeCompletedImmediately(RemoteClusterControllerTask task) { return (!task.hasVersionAckDependency() || task.isFailed() || !isMaster); } private RemoteClusterControllerTask.Context createRemoteTaskProcessingContext() { final RemoteClusterControllerTask.Context context = new RemoteClusterControllerTask.Context(); context.cluster = cluster; context.currentConsolidatedState = consolidatedClusterState(); context.publishedClusterStateBundle = stateVersionTracker.getVersionedClusterStateBundle(); context.masterInfo = new MasterInterface() { @Override public boolean isMaster() { return isMaster; } @Override public Integer getMaster() { return masterElectionHandler.getMaster(); } @Override public boolean inMasterMoratorium() { return inMasterMoratorium; } }; context.nodeListener = this; context.slobrokListener = this; return context; } private static long effectiveActivatedStateVersion(NodeInfo nodeInfo, ClusterStateBundle bundle) { return bundle.deferredActivation() ? nodeInfo.getClusterStateVersionActivationAcked() : nodeInfo.getClusterStateVersionBundleAcknowledged(); } private List<Node> enumerateNodesNotYetAckedAtLeastVersion(long version) { var bundle = systemStateBroadcaster.getClusterStateBundle(); if (bundle == null) { return List.of(); } return cluster.getNodeInfos().stream(). filter(n -> effectiveActivatedStateVersion(n, bundle) < version). map(NodeInfo::getNode). collect(Collectors.toList()); } private static <E> String stringifyListWithLimits(List<E> list, int limit) { if (list.size() > limit) { var sub = list.subList(0, limit); return String.format("%s (... and %d more)", sub.stream().map(E::toString).collect(Collectors.joining(", ")), list.size() - limit); } else { return list.stream().map(E::toString).collect(Collectors.joining(", ")); } } private String buildNodesNotYetConvergedMessage(long taskConvergeVersion) { var nodes = enumerateNodesNotYetAckedAtLeastVersion(taskConvergeVersion); if (nodes.isEmpty()) { return ""; } return String.format("the following nodes have not converged to at least version %d: %s", taskConvergeVersion, stringifyListWithLimits(nodes, options.maxDivergentNodesPrintedInTaskErrorMessages)); } private boolean completeSatisfiedVersionDependentTasks() { int publishedVersion = systemStateBroadcaster.lastClusterStateVersionInSync(); long queueSizeBefore = taskCompletionQueue.size(); final long now = timer.getCurrentTimeInMillis(); while (!taskCompletionQueue.isEmpty()) { VersionDependentTaskCompletion taskCompletion = taskCompletionQueue.peek(); if (publishedVersion >= taskCompletion.getMinimumVersion()) { context.log(logger, Level.FINE, () -> String.format("Deferred task of type '%s' has minimum version %d, published is %d; completing", taskCompletion.getTask().getClass().getName(), taskCompletion.getMinimumVersion(), publishedVersion)); taskCompletion.getTask().notifyCompleted(); taskCompletionQueue.remove(); } else if (taskCompletion.getDeadlineTimePointMs() <= now) { var details = buildNodesNotYetConvergedMessage(taskCompletion.getMinimumVersion()); context.log(logger, Level.WARNING, () -> String.format("Deferred task of type '%s' has exceeded wait deadline; completing with failure (details: %s)", taskCompletion.getTask().getClass().getName(), details)); taskCompletion.getTask().handleFailure(RemoteClusterControllerTask.Failure.of( RemoteClusterControllerTask.FailureCondition.DEADLINE_EXCEEDED, details)); taskCompletion.getTask().notifyCompleted(); taskCompletionQueue.remove(); } else { break; } } return (taskCompletionQueue.size() != queueSizeBefore); } /** * A "consolidated" cluster state is guaranteed to have up-to-date information on which nodes are * up or down even when the whole cluster is down. The regular, published cluster state is not * normally updated to reflect node events when the cluster is down. */ ClusterState consolidatedClusterState() { final ClusterState publishedState = stateVersionTracker.getVersionedClusterState(); if (publishedState.getClusterState() == State.UP) { return publishedState; } final ClusterState current = stateVersionTracker.getLatestCandidateState().getClusterState().clone(); current.setVersion(publishedState.getVersion()); return current; } /* System test observations: - a node that stops normally (U -> S) then goes down erroneously triggers premature crash handling - long time before content node state convergence (though this seems to be the case for legacy impl as well) */ private boolean resyncLocallyCachedState() { boolean didWork = false; if ( ! isMaster && cycleCount % 100 == 0) { didWork = metricUpdater.forWork("loadWantedStates", () -> database.loadWantedStates(databaseContext)); didWork |= metricUpdater.forWork("loadStartTimestamps", () -> database.loadStartTimestamps(cluster)); } didWork |= metricUpdater.forWork("updateCluster", () -> nodeLookup.updateCluster(cluster, this)); didWork |= metricUpdater.forWork("sendMessages", () -> stateGatherer.sendMessages(cluster, communicator, this)); didWork |= metricUpdater.forWork( "watchTimers", () -> stateChangeHandler.watchTimers(cluster, stateVersionTracker.getLatestCandidateState().getClusterState(), this)); didWork |= metricUpdater.forWork("recomputeClusterStateIfRequired", this::recomputeClusterStateIfRequired); if ( ! isStateGatherer) { if ( ! isMaster) { eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became node state gatherer as we are fleetcontroller master candidate.", timer.getCurrentTimeInMillis())); stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion()); stateChangeHandler.setStateChangedFlag(); } } isStateGatherer = true; return didWork; } private void invokeCandidateStateListeners(ClusterStateBundle candidateBundle) { systemStateListeners.forEach(listener -> listener.handleNewCandidateState(candidateBundle)); } private boolean hasPassedFirstStateBroadcastTimePoint(long timeNowMs) { return timeNowMs >= firstAllowedStateBroadcast || cluster.allStatesReported(); } private boolean recomputeClusterStateIfRequired() { boolean stateWasChanged = false; if (mustRecomputeCandidateClusterState()) { stateChangeHandler.unsetStateChangedFlag(); final AnnotatedClusterState candidate = computeCurrentAnnotatedState(); final ClusterStateBundle candidateBundle = ClusterStateBundle.builder(candidate) .bucketSpaces(configuredBucketSpaces) .stateDeriver(createBucketSpaceStateDeriver()) .deferredActivation(options.enableTwoPhaseClusterStateActivation) .feedBlock(createResourceExhaustionCalculator() .inferContentClusterFeedBlockOrNull(cluster.getNodeInfos())) .deriveAndBuild(); stateVersionTracker.updateLatestCandidateStateBundle(candidateBundle); invokeCandidateStateListeners(candidateBundle); final long timeNowMs = timer.getCurrentTimeInMillis(); if (hasPassedFirstStateBroadcastTimePoint(timeNowMs) && (stateVersionTracker.candidateChangedEnoughFromCurrentToWarrantPublish() || stateVersionTracker.hasReceivedNewVersionFromZooKeeper())) { final ClusterStateBundle before = stateVersionTracker.getVersionedClusterStateBundle(); stateVersionTracker.promoteCandidateToVersionedState(timeNowMs); emitEventsForAlteredStateEdges(before, stateVersionTracker.getVersionedClusterStateBundle(), timeNowMs); handleNewPublishedState(stateVersionTracker.getVersionedClusterStateBundle()); stateWasChanged = true; } } /* * This works transparently for tasks that end up changing the current cluster state (i.e. * requiring a new state to be published) and for those whose changes are no-ops (because * the changes they request are already part of the current state). In the former case the * tasks will depend on the version that was generated based upon them. In the latter case * the tasks will depend on the version that is already published (or in the process of * being published). */ scheduleVersionDependentTasksForFutureCompletion(stateVersionTracker.getCurrentVersion()); return stateWasChanged; } private ClusterStateDeriver createBucketSpaceStateDeriver() { if (options.clusterHasGlobalDocumentTypes) { return new MaintenanceWhenPendingGlobalMerges(stateVersionTracker.createMergePendingChecker(), createDefaultSpaceMaintenanceTransitionConstraint()); } else { return createIdentityClonedBucketSpaceStateDeriver(); } } private ResourceExhaustionCalculator createResourceExhaustionCalculator() { return new ResourceExhaustionCalculator( options.clusterFeedBlockEnabled, options.clusterFeedBlockLimit, stateVersionTracker.getLatestCandidateStateBundle().getFeedBlockOrNull(), options.clusterFeedBlockNoiseLevel); } private static ClusterStateDeriver createIdentityClonedBucketSpaceStateDeriver() { return (state, space) -> state.clone(); } private MaintenanceTransitionConstraint createDefaultSpaceMaintenanceTransitionConstraint() { AnnotatedClusterState currentDefaultSpaceState = stateVersionTracker.getVersionedClusterStateBundle() .getDerivedBucketSpaceStates().getOrDefault(FixedBucketSpaces.defaultSpace(), AnnotatedClusterState.emptyState()); return UpEdgeMaintenanceTransitionConstraint.forPreviouslyPublishedState(currentDefaultSpaceState.getClusterState()); } /** * Move tasks that are dependent on the most recently generated state being published into * a completion queue with a dependency on the provided version argument. Once that version * has been ACKed by all distributors in the system, those tasks will be marked as completed. */ private void scheduleVersionDependentTasksForFutureCompletion(int completeAtVersion) { final long maxDeadlineTimePointMs = timer.getCurrentTimeInMillis() + options.getMaxDeferredTaskVersionWaitTime().toMillis(); for (RemoteClusterControllerTask task : tasksPendingStateRecompute) { context.log(logger, Level.INFO, task + " will be completed at version " + completeAtVersion); taskCompletionQueue.add(new VersionDependentTaskCompletion(completeAtVersion, task, maxDeadlineTimePointMs)); } tasksPendingStateRecompute.clear(); } private AnnotatedClusterState computeCurrentAnnotatedState() { ClusterStateGenerator.Params params = ClusterStateGenerator.Params.fromOptions(options); params.currentTimeInMilllis(timer.getCurrentTimeInMillis()) .cluster(cluster) .lowestObservedDistributionBitCount(stateVersionTracker.getLowestObservedDistributionBits()); return ClusterStateGenerator.generatedStateFrom(params); } private void emitEventsForAlteredStateEdges(final ClusterStateBundle fromState, final ClusterStateBundle toState, final long timeNowMs) { final List<Event> deltaEvents = EventDiffCalculator.computeEventDiff( EventDiffCalculator.params() .cluster(cluster) .fromState(fromState) .toState(toState) .currentTimeMs(timeNowMs) .maxMaintenanceGracePeriodTimeMs(options.storageNodeMaxTransitionTimeMs())); for (Event event : deltaEvents) { eventLog.add(event, isMaster); } emitStateAppliedEvents(timeNowMs, fromState.getBaselineClusterState(), toState.getBaselineClusterState()); } private void emitStateAppliedEvents(long timeNowMs, ClusterState fromClusterState, ClusterState toClusterState) { eventLog.add(new ClusterEvent( ClusterEvent.Type.SYSTEMSTATE, "New cluster state version " + toClusterState.getVersion() + ". Change from last: " + fromClusterState.getTextualDifference(toClusterState), timeNowMs), isMaster); if (toClusterState.getDistributionBitCount() != fromClusterState.getDistributionBitCount()) { eventLog.add(new ClusterEvent( ClusterEvent.Type.SYSTEMSTATE, "Altering distribution bits in system from " + fromClusterState.getDistributionBitCount() + " to " + toClusterState.getDistributionBitCount(), timeNowMs), isMaster); } } private boolean atFirstClusterStateSendTimeEdge() { if (!isMaster || systemStateBroadcaster.hasBroadcastedClusterStateBundle()) { return false; } return hasPassedFirstStateBroadcastTimePoint(timer.getCurrentTimeInMillis()); } private boolean mustRecomputeCandidateClusterState() { return stateChangeHandler.stateMayHaveChanged() || stateVersionTracker.bucketSpaceMergeCompletionStateHasChanged() || atFirstClusterStateSendTimeEdge(); } private boolean handleLeadershipEdgeTransitions() { boolean didWork = false; if (masterElectionHandler.isMaster()) { if ( ! isMaster) { stateChangeHandler.setStateChangedFlag(); systemStateBroadcaster.resetBroadcastedClusterStateBundle(); stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion()); ClusterStateBundle previousBundle = database.getLatestClusterStateBundle(); database.loadStartTimestamps(cluster); database.loadWantedStates(databaseContext); context.log(logger, Level.INFO, () -> String.format("Loaded previous cluster state bundle from ZooKeeper: %s", previousBundle)); stateVersionTracker.setClusterStateBundleRetrievedFromZooKeeper(previousBundle); eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became fleetcontroller master. Bumped version to " + stateVersionTracker.getCurrentVersion() + " to be in line.", timer.getCurrentTimeInMillis())); long currentTime = timer.getCurrentTimeInMillis(); firstAllowedStateBroadcast = currentTime + options.minTimeBeforeFirstSystemStateBroadcast; isMaster = true; inMasterMoratorium = true; context.log(logger, Level.FINE, () -> "At time " + currentTime + " we set first system state broadcast time to be " + options.minTimeBeforeFirstSystemStateBroadcast + " ms after at time " + firstAllowedStateBroadcast + "."); didWork = true; } if (wantedStateChanged) { didWork |= database.saveWantedStates(databaseContext); wantedStateChanged = false; } } else { dropLeadershipState(); } metricUpdater.updateMasterState(isMaster); return didWork; } private void dropLeadershipState() { if (isMaster) { eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer fleetcontroller master.", timer.getCurrentTimeInMillis())); firstAllowedStateBroadcast = Long.MAX_VALUE; failAllVersionDependentTasks(); } wantedStateChanged = false; isMaster = false; inMasterMoratorium = false; } @Override public void run() { controllerThreadId = Thread.currentThread().getId(); context.log(logger, Level.INFO, "Starting tick loop"); try { processingCycle = true; while (isRunning()) { tick(); } context.log(logger, Level.INFO, "Tick loop stopped"); } catch (InterruptedException e) { context.log(logger, Level.INFO, "Event thread stopped by interrupt exception: ", e); } catch (Throwable t) { t.printStackTrace(); context.log(logger, Level.SEVERE, "Fatal error killed fleet controller", t); synchronized (monitor) { running.set(false); } System.exit(1); } finally { prepareShutdownEdge(); } } private void prepareShutdownEdge() { running.set(false); failAllVersionDependentTasks(); synchronized (monitor) { monitor.notifyAll(); } } public DatabaseHandler.DatabaseContext databaseContext = new DatabaseHandler.DatabaseContext() { @Override public ContentCluster getCluster() { return cluster; } @Override public FleetController getFleetController() { return FleetController.this; } @Override public SlobrokListener getNodeAddedOrRemovedListener() { return FleetController.this; } @Override public NodeListener getNodeStateUpdateListener() { return FleetController.this; } }; public void waitForCompleteCycle(Duration timeout) { Instant endTime = Instant.now().plus(timeout); synchronized (monitor) { long wantedCycle = cycleCount + (processingCycle ? 2 : 1); waitingForCycle = true; try{ while (cycleCount < wantedCycle) { if (Instant.now().isAfter(endTime)) throw new IllegalStateException("Timed out waiting for cycle to complete. Not completed after " + timeout); if ( !isRunning() ) throw new IllegalStateException("Fleetcontroller not running. Will never complete cycles"); try{ monitor.wait(100); } catch (InterruptedException e) {} } } finally { waitingForCycle = false; } } } /** * This function might not be 100% threadsafe, as in theory cluster can be changing while accessed. * But it is only used in unit tests that should not trigger any thread issues. Don't want to add locks that reduce * live performance to remove a non-problem. */ public void waitForNodesHavingSystemStateVersionEqualToOrAbove(int version, int nodeCount, Duration timeout) throws InterruptedException { Instant endTime = Instant.now().plus(timeout); synchronized (monitor) { while (true) { int ackedNodes = 0; for (NodeInfo node : cluster.getNodeInfos()) { if (node.getClusterStateVersionBundleAcknowledged() >= version) { ++ackedNodes; } } if (ackedNodes >= nodeCount) { context.log(logger, Level.INFO, ackedNodes + " nodes now have acked system state " + version + " or higher."); return; } Duration remainingTime = Duration.between(Instant.now(), endTime); if (remainingTime.isNegative()) { throw new IllegalStateException("Did not get " + nodeCount + " nodes to system state " + version + " within timeout of " + timeout); } monitor.wait(10); } } } public boolean hasZookeeperConnection() { return !database.isClosed(); } public int getSlobrokMirrorUpdates() { return ((SlobrokClient)nodeLookup).getMirror().updates(); } public ContentCluster getCluster() { return cluster; } public List<NodeEvent> getNodeEvents(Node n) { return eventLog.getNodeEvents(n); } public EventLog getEventLog() { return eventLog; } }
class FleetController implements NodeListener, SlobrokListener, SystemStateListener, Runnable, RemoteClusterControllerTaskScheduler { private static final Logger logger = Logger.getLogger(FleetController.class.getName()); private final FleetControllerContext context; private final Timer timer; private final Object monitor; private final EventLog eventLog; private final NodeLookup nodeLookup; private final ContentCluster cluster; private final Communicator communicator; private final NodeStateGatherer stateGatherer; private final StateChangeHandler stateChangeHandler; private final SystemStateBroadcaster systemStateBroadcaster; private final StateVersionTracker stateVersionTracker; private final StatusPageServerInterface statusPageServer; private final RpcServer rpcServer; private final DatabaseHandler database; private final MasterElectionHandler masterElectionHandler; private Thread runner = null; private final AtomicBoolean running = new AtomicBoolean(true); private FleetControllerOptions options; private FleetControllerOptions nextOptions; private final List<SystemStateListener> systemStateListeners = new CopyOnWriteArrayList<>(); private boolean processingCycle = false; private boolean wantedStateChanged = false; private long cycleCount = 0; private long lastMetricUpdateCycleCount = 0; private long nextStateSendTime = 0; private Long controllerThreadId = null; private boolean waitingForCycle = false; private final StatusPageServer.PatternRequestRouter statusRequestRouter = new StatusPageServer.PatternRequestRouter(); private final List<ClusterStateBundle> newStates = new ArrayList<>(); private final List<ClusterStateBundle> convergedStates = new ArrayList<>(); private final Queue<RemoteClusterControllerTask> remoteTasks = new LinkedList<>(); private final MetricUpdater metricUpdater; private boolean isMaster = false; private boolean inMasterMoratorium = false; private boolean isStateGatherer = false; private long firstAllowedStateBroadcast = Long.MAX_VALUE; private long tickStartTime = Long.MAX_VALUE; private final List<RemoteClusterControllerTask> tasksPendingStateRecompute = new ArrayList<>(); private final Queue<VersionDependentTaskCompletion> taskCompletionQueue = new ArrayDeque<>(); private Set<String> configuredBucketSpaces = Collections.emptySet(); private final RunDataExtractor dataExtractor = new RunDataExtractor() { @Override public FleetControllerOptions getOptions() { return options; } @Override public ContentCluster getCluster() { return cluster; } }; public FleetController(FleetControllerContext context, Timer timer, EventLog eventLog, ContentCluster cluster, NodeStateGatherer nodeStateGatherer, Communicator communicator, StatusPageServerInterface statusPage, RpcServer server, NodeLookup nodeLookup, DatabaseHandler database, StateChangeHandler stateChangeHandler, SystemStateBroadcaster systemStateBroadcaster, MasterElectionHandler masterElectionHandler, MetricUpdater metricUpdater, FleetControllerOptions options) { context.log(logger, Level.INFO, "Created"); this.context = context; this.timer = timer; this.monitor = timer; this.eventLog = eventLog; this.options = options; this.nodeLookup = nodeLookup; this.cluster = cluster; this.communicator = communicator; this.database = database; this.stateGatherer = nodeStateGatherer; this.stateChangeHandler = stateChangeHandler; this.systemStateBroadcaster = systemStateBroadcaster; this.stateVersionTracker = new StateVersionTracker(options.minMergeCompletionRatio); this.metricUpdater = metricUpdater; this.statusPageServer = statusPage; this.rpcServer = server; this.masterElectionHandler = masterElectionHandler; this.statusRequestRouter.addHandler( "^/node=([a-z]+)\\.(\\d+)$", new LegacyNodePageRequestHandler(timer, eventLog, cluster)); this.statusRequestRouter.addHandler( "^/state.*", new NodeHealthRequestHandler()); this.statusRequestRouter.addHandler( "^/clusterstate", new ClusterStateRequestHandler(stateVersionTracker)); this.statusRequestRouter.addHandler( "^/$", new LegacyIndexPageRequestHandler(timer, cluster, masterElectionHandler, stateVersionTracker, eventLog, timer.getCurrentTimeInMillis(), dataExtractor)); propagateOptions(); } public static FleetController create(FleetControllerOptions options, StatusPageServerInterface statusPageServer, MetricReporter metricReporter) throws Exception { var context = new FleetControllerContextImpl(options); var timer = new RealTimer(); var metricUpdater = new MetricUpdater(metricReporter, options.fleetControllerIndex, options.clusterName); var log = new EventLog(timer, metricUpdater); var cluster = new ContentCluster( options.clusterName, options.nodes, options.storageDistribution); var stateGatherer = new NodeStateGatherer(timer, timer, log); var communicator = new RPCCommunicator( RPCCommunicator.createRealSupervisor(), timer, options.fleetControllerIndex, options.nodeStateRequestTimeoutMS, options.nodeStateRequestTimeoutEarliestPercentage, options.nodeStateRequestTimeoutLatestPercentage, options.nodeStateRequestRoundTripTimeMaxSeconds); var database = new DatabaseHandler(context, new ZooKeeperDatabaseFactory(context), timer, options.zooKeeperServerAddress, timer); var lookUp = new SlobrokClient(context, timer); var stateGenerator = new StateChangeHandler(context, timer, log); var stateBroadcaster = new SystemStateBroadcaster(context, timer, timer); var masterElectionHandler = new MasterElectionHandler(context, options.fleetControllerIndex, options.fleetControllerCount, timer, timer); var controller = new FleetController(context, timer, log, cluster, stateGatherer, communicator, statusPageServer, null, lookUp, database, stateGenerator, stateBroadcaster, masterElectionHandler, metricUpdater, options); controller.start(); return controller; } public void start() { runner = new Thread(this); runner.start(); } public Object getMonitor() { return monitor; } public boolean isRunning() { return running.get(); } public boolean isMaster() { synchronized (monitor) { return isMaster; } } public ClusterState getClusterState() { synchronized (monitor) { return systemStateBroadcaster.getClusterState(); } } public ClusterStateBundle getClusterStateBundle() { synchronized (monitor) { return systemStateBroadcaster.getClusterStateBundle(); } } public void schedule(RemoteClusterControllerTask task) { synchronized (monitor) { context.log(logger, Level.FINE, "Scheduled remote task " + task.getClass().getName() + " for execution"); remoteTasks.add(task); } } /** Used for unit testing. */ public void addSystemStateListener(SystemStateListener listener) { systemStateListeners.add(listener); com.yahoo.vdslib.state.ClusterState state = getSystemState(); if (state == null) { throw new NullPointerException("Cluster state should never be null at this point"); } listener.handleNewPublishedState(ClusterStateBundle.ofBaselineOnly(AnnotatedClusterState.withoutAnnotations(state))); ClusterStateBundle convergedState = systemStateBroadcaster.getLastClusterStateBundleConverged(); if (convergedState != null) { listener.handleStateConvergedInCluster(convergedState); } } public FleetControllerOptions getOptions() { synchronized(monitor) { return options.clone(); } } public NodeState getReportedNodeState(Node n) { synchronized(monitor) { NodeInfo node = cluster.getNodeInfo(n); if (node == null) { throw new IllegalStateException("Did not find node " + n + " in cluster " + cluster); } return node.getReportedState(); } } public NodeState getWantedNodeState(Node n) { synchronized(monitor) { return cluster.getNodeInfo(n).getWantedState(); } } public com.yahoo.vdslib.state.ClusterState getSystemState() { synchronized(monitor) { return stateVersionTracker.getVersionedClusterState(); } } public int getRpcPort() { return rpcServer.getPort(); } public void shutdown() throws InterruptedException, java.io.IOException { if (runner != null && isRunning()) { context.log(logger, Level.INFO, "Joining event thread."); running.set(false); synchronized(monitor) { monitor.notifyAll(); } runner.join(); } context.log(logger, Level.INFO, "FleetController done shutting down event thread."); controllerThreadId = Thread.currentThread().getId(); database.shutdown(databaseContext); if (statusPageServer != null) { statusPageServer.shutdown(); } if (rpcServer != null) { rpcServer.shutdown(); } communicator.shutdown(); nodeLookup.shutdown(); } public void updateOptions(FleetControllerOptions options) { var newId = FleetControllerId.fromOptions(options); synchronized(monitor) { assert newId.equals(context.id()); context.log(logger, Level.INFO, "FleetController has new options"); nextOptions = options.clone(); monitor.notifyAll(); } } private void verifyInControllerThread() { if (controllerThreadId != null && controllerThreadId != Thread.currentThread().getId()) { throw new IllegalStateException("Function called from non-controller thread. Shouldn't happen."); } } private ClusterState latestCandidateClusterState() { return stateVersionTracker.getLatestCandidateState().getClusterState(); } @Override public void handleNewNodeState(NodeInfo node, NodeState newState) { verifyInControllerThread(); stateChangeHandler.handleNewReportedNodeState(latestCandidateClusterState(), node, newState, this); } @Override public void handleNewWantedNodeState(NodeInfo node, NodeState newState) { verifyInControllerThread(); wantedStateChanged = true; stateChangeHandler.proposeNewNodeState(stateVersionTracker.getVersionedClusterState(), node, newState); } @Override public void handleRemovedNode(Node node) { verifyInControllerThread(); wantedStateChanged = true; } @Override public void handleUpdatedHostInfo(NodeInfo nodeInfo, HostInfo newHostInfo) { verifyInControllerThread(); triggerBundleRecomputationIfResourceExhaustionStateChanged(nodeInfo, newHostInfo); stateVersionTracker.handleUpdatedHostInfo(nodeInfo, newHostInfo); } private void triggerBundleRecomputationIfResourceExhaustionStateChanged(NodeInfo nodeInfo, HostInfo newHostInfo) { if (!options.clusterFeedBlockEnabled) { return; } var calc = createResourceExhaustionCalculator(); var previouslyExhausted = calc.enumerateNodeResourceExhaustions(nodeInfo); var nowExhausted = calc.resourceExhaustionsFromHostInfo(nodeInfo, newHostInfo); if (!previouslyExhausted.equals(nowExhausted)) { context.log(logger, Level.FINE, () -> String.format("Triggering state recomputation due to change in cluster feed block: %s -> %s", previouslyExhausted, nowExhausted)); stateChangeHandler.setStateChangedFlag(); } } @Override public void handleNewNode(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleNewNode(node); } @Override public void handleMissingNode(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleMissingNode(stateVersionTracker.getVersionedClusterState(), node, this); } @Override public void handleNewRpcAddress(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleNewRpcAddress(node); } @Override public void handleReturnedRpcAddress(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleReturnedRpcAddress(node); } @Override public void handleNewPublishedState(ClusterStateBundle stateBundle) { verifyInControllerThread(); ClusterState baselineState = stateBundle.getBaselineClusterState(); newStates.add(stateBundle); metricUpdater.updateClusterStateMetrics(cluster, baselineState, ResourceUsageStats.calculateFrom(cluster.getNodeInfos(), options.clusterFeedBlockLimit, stateBundle.getFeedBlock())); lastMetricUpdateCycleCount = cycleCount; systemStateBroadcaster.handleNewClusterStates(stateBundle); if (isMaster) { storeClusterStateMetaDataToZooKeeper(stateBundle); } } private boolean maybePublishOldMetrics() { verifyInControllerThread(); if (isMaster() && cycleCount > 300 + lastMetricUpdateCycleCount) { ClusterStateBundle stateBundle = stateVersionTracker.getVersionedClusterStateBundle(); ClusterState baselineState = stateBundle.getBaselineClusterState(); metricUpdater.updateClusterStateMetrics(cluster, baselineState, ResourceUsageStats.calculateFrom(cluster.getNodeInfos(), options.clusterFeedBlockLimit, stateBundle.getFeedBlock())); lastMetricUpdateCycleCount = cycleCount; return true; } else { return false; } } private void storeClusterStateMetaDataToZooKeeper(ClusterStateBundle stateBundle) { try { database.saveLatestSystemStateVersion(databaseContext, stateBundle.getVersion()); database.saveLatestClusterStateBundle(databaseContext, stateBundle); } catch (InterruptedException e) { throw new RuntimeException("ZooKeeper write interrupted", e); } } /** * This function gives data of the current state in master election. * The keys in the given map are indices of fleet controllers. * The values are what fleetcontroller that fleetcontroller wants to * become master. * * If more than half the fleetcontrollers want a node to be master and * that node also wants itself as master, that node is the single master. * If this condition is not met, there is currently no master. */ public void handleFleetData(Map<Integer, Integer> data) { verifyInControllerThread(); context.log(logger, Level.FINEST, "Sending fleet data event on to master election handler"); metricUpdater.updateMasterElectionMetrics(data); masterElectionHandler.handleFleetData(data); } /** * Called when we can no longer contact database. */ public void lostDatabaseConnection() { verifyInControllerThread(); boolean wasMaster = isMaster; masterElectionHandler.lostDatabaseConnection(); if (wasMaster) { dropLeadershipState(); metricUpdater.updateMasterState(false); } } private void failAllVersionDependentTasks() { tasksPendingStateRecompute.forEach(task -> { task.handleFailure(RemoteClusterControllerTask.Failure.of( RemoteClusterControllerTask.FailureCondition.LEADERSHIP_LOST)); task.notifyCompleted(); }); tasksPendingStateRecompute.clear(); taskCompletionQueue.forEach(task -> { task.getTask().handleFailure(RemoteClusterControllerTask.Failure.of( RemoteClusterControllerTask.FailureCondition.LEADERSHIP_LOST)); task.getTask().notifyCompleted(); }); taskCompletionQueue.clear(); } /** Called when all distributors have acked newest cluster state version. */ public void handleAllDistributorsInSync(DatabaseHandler database, DatabaseHandler.DatabaseContext dbContext) throws InterruptedException { Set<ConfiguredNode> nodes = new HashSet<>(cluster.clusterInfo().getConfiguredNodes().values()); var currentBundle = stateVersionTracker.getVersionedClusterStateBundle(); context.log(logger, Level.FINE, () -> String.format("All distributors have ACKed cluster state version %d", currentBundle.getVersion())); stateChangeHandler.handleAllDistributorsInSync(currentBundle.getBaselineClusterState(), nodes, database, dbContext); convergedStates.add(currentBundle); } private boolean changesConfiguredNodeSet(Collection<ConfiguredNode> newNodes) { if (newNodes.size() != cluster.getConfiguredNodes().size()) return true; if (! cluster.getConfiguredNodes().values().containsAll(newNodes)) return true; for (ConfiguredNode node : newNodes) { if (node.retired() != cluster.getConfiguredNodes().get(node.index()).retired()) { return true; } } return false; } /** This is called when the options field has been set to a new set of options */ private void propagateOptions() { verifyInControllerThread(); selfTerminateIfConfiguredNodeIndexHasChanged(); if (changesConfiguredNodeSet(options.nodes)) { cluster.setSlobrokGenerationCount(0); } configuredBucketSpaces = Set.of(FixedBucketSpaces.defaultSpace(), FixedBucketSpaces.globalSpace()); stateVersionTracker.setMinMergeCompletionRatio(options.minMergeCompletionRatio); communicator.propagateOptions(options); if (nodeLookup instanceof SlobrokClient) { ((SlobrokClient) nodeLookup).setSlobrokConnectionSpecs(options.slobrokConnectionSpecs); } eventLog.setMaxSize(options.eventLogMaxSize, options.eventNodeLogMaxSize); cluster.setPollingFrequency(options.statePollingFrequency); cluster.setDistribution(options.storageDistribution); cluster.setNodes(options.nodes, databaseContext.getNodeStateUpdateListener()); database.setZooKeeperAddress(options.zooKeeperServerAddress, databaseContext); database.setZooKeeperSessionTimeout(options.zooKeeperSessionTimeout, databaseContext); stateGatherer.setMaxSlobrokDisconnectGracePeriod(options.maxSlobrokDisconnectGracePeriod); stateGatherer.setNodeStateRequestTimeout(options.nodeStateRequestTimeoutMS); stateChangeHandler.reconfigureFromOptions(options); stateChangeHandler.setStateChangedFlag(); masterElectionHandler.setFleetControllerCount(options.fleetControllerCount); masterElectionHandler.setMasterZooKeeperCooldownPeriod(options.masterZooKeeperCooldownPeriod); masterElectionHandler.setUsingZooKeeper(options.zooKeeperServerAddress != null && !options.zooKeeperServerAddress.isEmpty()); if (rpcServer != null) { rpcServer.setMasterElectionHandler(masterElectionHandler); try{ rpcServer.setSlobrokConnectionSpecs(options.slobrokConnectionSpecs, options.rpcPort); } catch (ListenFailedException e) { context.log(logger, Level.WARNING, "Failed to bind RPC server to port " + options.rpcPort + ". This may be natural if cluster has altered the services running on this node: " + e.getMessage()); } catch (Exception e) { context.log(logger, Level.WARNING, "Failed to initialize RPC server socket: " + e.getMessage()); } } if (statusPageServer != null) { try{ statusPageServer.setPort(options.httpPort); } catch (Exception e) { context.log(logger, Level.WARNING, "Failed to initialize status server socket. This may be natural if cluster has altered the services running on this node: " + e.getMessage()); } } long currentTime = timer.getCurrentTimeInMillis(); nextStateSendTime = Math.min(currentTime + options.minTimeBetweenNewSystemStates, nextStateSendTime); } private void selfTerminateIfConfiguredNodeIndexHasChanged() { var newId = new FleetControllerId(options.clusterName, options.fleetControllerIndex); if (!newId.equals(context.id())) { context.log(logger, Level.WARNING, context.id() + " got new configuration for " + newId + ". We do not support doing this live; " + "immediately exiting now to force new configuration"); prepareShutdownEdge(); System.exit(1); } } public StatusPageResponse fetchStatusPage(StatusPageServer.HttpRequest httpRequest) { verifyInControllerThread(); StatusPageResponse.ResponseCode responseCode; String message; final String hiddenMessage; try { StatusPageServer.RequestHandler handler = statusRequestRouter.resolveHandler(httpRequest); if (handler == null) { throw new FileNotFoundException("No handler found for request: " + httpRequest.getPath()); } return handler.handle(httpRequest); } catch (FileNotFoundException e) { responseCode = StatusPageResponse.ResponseCode.NOT_FOUND; message = e.getMessage(); hiddenMessage = ""; } catch (Exception e) { responseCode = StatusPageResponse.ResponseCode.INTERNAL_SERVER_ERROR; message = "Internal Server Error"; hiddenMessage = ExceptionUtils.getStackTraceAsString(e); context.log(logger, Level.FINE, () -> "Unknown exception thrown for request " + httpRequest.getRequest() + ": " + hiddenMessage); } TimeZone tz = TimeZone.getTimeZone("UTC"); long currentTime = timer.getCurrentTimeInMillis(); StatusPageResponse response = new StatusPageResponse(); StringBuilder content = new StringBuilder(); response.setContentType("text/html"); response.setResponseCode(responseCode); content.append("<!-- Answer to request " + httpRequest.getRequest() + " -->\n"); content.append("<p>UTC time when creating this page: ").append(RealTimer.printDateNoMilliSeconds(currentTime, tz)).append("</p>"); response.writeHtmlHeader(content, message); response.writeHtmlFooter(content, hiddenMessage); response.writeContent(content.toString()); return response; } public void tick() throws Exception { synchronized (monitor) { boolean didWork; didWork = metricUpdater.forWork("doNextZooKeeperTask", () -> database.doNextZooKeeperTask(databaseContext)); didWork |= metricUpdater.forWork("updateMasterElectionState", this::updateMasterElectionState); didWork |= metricUpdater.forWork("handleLeadershipEdgeTransitions", this::handleLeadershipEdgeTransitions); stateChangeHandler.setMaster(isMaster); if ( ! isRunning()) { return; } didWork |= metricUpdater.forWork("stateGatherer-processResponses", () -> stateGatherer.processResponses(this)); if ( ! isRunning()) { return; } if (masterElectionHandler.isAmongNthFirst(options.stateGatherCount)) { didWork |= resyncLocallyCachedState(); } else { stepDownAsStateGatherer(); } if ( ! isRunning()) { return; } didWork |= metricUpdater.forWork("systemStateBroadcaster-processResponses", systemStateBroadcaster::processResponses); if ( ! isRunning()) { return; } if (isMaster) { didWork |= metricUpdater.forWork("broadcastClusterStateToEligibleNodes", this::broadcastClusterStateToEligibleNodes); systemStateBroadcaster.checkIfClusterStateIsAckedByAllDistributors(database, databaseContext, this); } if ( ! isRunning()) { return; } didWork |= metricUpdater.forWork("processAnyPendingStatusPageRequest", this::processAnyPendingStatusPageRequest); if ( ! isRunning()) { return; } if (rpcServer != null) { didWork |= metricUpdater.forWork("handleRpcRequests", () -> rpcServer.handleRpcRequests(cluster, consolidatedClusterState(), this)); } if ( ! isRunning()) { return; } didWork |= metricUpdater.forWork("processNextQueuedRemoteTask", this::processNextQueuedRemoteTask); didWork |= metricUpdater.forWork("completeSatisfiedVersionDependentTasks", this::completeSatisfiedVersionDependentTasks); didWork |= metricUpdater.forWork("maybePublishOldMetrics", this::maybePublishOldMetrics); processingCycle = false; ++cycleCount; long tickStopTime = timer.getCurrentTimeInMillis(); if (tickStopTime >= tickStartTime) { metricUpdater.addTickTime(tickStopTime - tickStartTime, didWork); } monitor.wait(didWork || waitingForCycle ? 1 : options.cycleWaitTime); if ( ! isRunning()) { return; } tickStartTime = timer.getCurrentTimeInMillis(); processingCycle = true; if (nextOptions != null) { switchToNewConfig(); } } if (isRunning()) { propagateNewStatesToListeners(); } } private boolean updateMasterElectionState() { try { return masterElectionHandler.watchMasterElection(database, databaseContext); } catch (InterruptedException e) { throw new RuntimeException(e); } catch (Exception e) { context.log(logger, Level.WARNING, "Failed to watch master election: " + e); } return false; } private void stepDownAsStateGatherer() { if (isStateGatherer) { cluster.clearStates(); eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer a node state gatherer.", timer.getCurrentTimeInMillis())); } isStateGatherer = false; } private void switchToNewConfig() { options = nextOptions; nextOptions = null; try { propagateOptions(); } catch (Exception e) { context.log(logger, Level.SEVERE, "Failed to handle new fleet controller config", e); } } private boolean processAnyPendingStatusPageRequest() { if (statusPageServer != null) { StatusPageServer.HttpRequest statusRequest = statusPageServer.getCurrentHttpRequest(); if (statusRequest != null) { statusPageServer.answerCurrentStatusRequest(fetchStatusPage(statusRequest)); return true; } } return false; } private boolean broadcastClusterStateToEligibleNodes() { if (database.hasPendingClusterStateMetaDataStore()) { context.log(logger, Level.FINE, "Can't publish current cluster state as it has one or more pending ZooKeeper stores"); return false; } boolean sentAny = false; long currentTime = timer.getCurrentTimeInMillis(); if ((currentTime >= firstAllowedStateBroadcast || cluster.allStatesReported()) && currentTime >= nextStateSendTime) { if (inMasterMoratorium) { context.log(logger, Level.INFO, currentTime < firstAllowedStateBroadcast ? "Master moratorium complete: all nodes have reported in" : "Master moratorium complete: timed out waiting for all nodes to report in"); firstAllowedStateBroadcast = currentTime; inMasterMoratorium = false; } sentAny = systemStateBroadcaster.broadcastNewStateBundleIfRequired( databaseContext, communicator, database.getLastKnownStateBundleVersionWrittenBySelf()); if (sentAny) { nextStateSendTime = currentTime + options.minTimeBetweenNewSystemStates; } } sentAny |= systemStateBroadcaster.broadcastStateActivationsIfRequired(databaseContext, communicator); return sentAny; } private void propagateNewStatesToListeners() { if ( ! newStates.isEmpty()) { synchronized (systemStateListeners) { for (ClusterStateBundle stateBundle : newStates) { for (SystemStateListener listener : systemStateListeners) { listener.handleNewPublishedState(stateBundle); } } newStates.clear(); } } if ( ! convergedStates.isEmpty()) { synchronized (systemStateListeners) { for (ClusterStateBundle stateBundle : convergedStates) { for (SystemStateListener listener : systemStateListeners) { listener.handleStateConvergedInCluster(stateBundle); } } convergedStates.clear(); } } } private boolean processNextQueuedRemoteTask() { metricUpdater.updateRemoteTaskQueueSize(remoteTasks.size()); RemoteClusterControllerTask task = remoteTasks.poll(); if (task == null) { return false; } final RemoteClusterControllerTask.Context taskContext = createRemoteTaskProcessingContext(); context.log(logger, Level.FINEST, () -> String.format("Processing remote task of type '%s'", task.getClass().getName())); task.doRemoteFleetControllerTask(taskContext); if (taskMayBeCompletedImmediately(task)) { context.log(logger, Level.FINEST, () -> String.format("Done processing remote task of type '%s'", task.getClass().getName())); task.notifyCompleted(); } else { context.log(logger, Level.FINEST, () -> String.format("Remote task of type '%s' queued until state recomputation", task.getClass().getName())); tasksPendingStateRecompute.add(task); } return true; } private boolean taskMayBeCompletedImmediately(RemoteClusterControllerTask task) { return (!task.hasVersionAckDependency() || task.isFailed() || !isMaster); } private RemoteClusterControllerTask.Context createRemoteTaskProcessingContext() { final RemoteClusterControllerTask.Context context = new RemoteClusterControllerTask.Context(); context.cluster = cluster; context.currentConsolidatedState = consolidatedClusterState(); context.publishedClusterStateBundle = stateVersionTracker.getVersionedClusterStateBundle(); context.masterInfo = new MasterInterface() { @Override public boolean isMaster() { return isMaster; } @Override public Integer getMaster() { return masterElectionHandler.getMaster(); } @Override public boolean inMasterMoratorium() { return inMasterMoratorium; } }; context.nodeListener = this; context.slobrokListener = this; return context; } private static long effectiveActivatedStateVersion(NodeInfo nodeInfo, ClusterStateBundle bundle) { return bundle.deferredActivation() ? nodeInfo.getClusterStateVersionActivationAcked() : nodeInfo.getClusterStateVersionBundleAcknowledged(); } private List<Node> enumerateNodesNotYetAckedAtLeastVersion(long version) { var bundle = systemStateBroadcaster.getClusterStateBundle(); if (bundle == null) { return List.of(); } return cluster.getNodeInfos().stream(). filter(n -> effectiveActivatedStateVersion(n, bundle) < version). map(NodeInfo::getNode). collect(Collectors.toList()); } private static <E> String stringifyListWithLimits(List<E> list, int limit) { if (list.size() > limit) { var sub = list.subList(0, limit); return String.format("%s (... and %d more)", sub.stream().map(E::toString).collect(Collectors.joining(", ")), list.size() - limit); } else { return list.stream().map(E::toString).collect(Collectors.joining(", ")); } } private String buildNodesNotYetConvergedMessage(long taskConvergeVersion) { var nodes = enumerateNodesNotYetAckedAtLeastVersion(taskConvergeVersion); if (nodes.isEmpty()) { return ""; } return String.format("the following nodes have not converged to at least version %d: %s", taskConvergeVersion, stringifyListWithLimits(nodes, options.maxDivergentNodesPrintedInTaskErrorMessages)); } private boolean completeSatisfiedVersionDependentTasks() { int publishedVersion = systemStateBroadcaster.lastClusterStateVersionInSync(); long queueSizeBefore = taskCompletionQueue.size(); final long now = timer.getCurrentTimeInMillis(); while (!taskCompletionQueue.isEmpty()) { VersionDependentTaskCompletion taskCompletion = taskCompletionQueue.peek(); if (publishedVersion >= taskCompletion.getMinimumVersion()) { context.log(logger, Level.FINE, () -> String.format("Deferred task of type '%s' has minimum version %d, published is %d; completing", taskCompletion.getTask().getClass().getName(), taskCompletion.getMinimumVersion(), publishedVersion)); taskCompletion.getTask().notifyCompleted(); taskCompletionQueue.remove(); } else if (taskCompletion.getDeadlineTimePointMs() <= now) { var details = buildNodesNotYetConvergedMessage(taskCompletion.getMinimumVersion()); context.log(logger, Level.WARNING, () -> String.format("Deferred task of type '%s' has exceeded wait deadline; completing with failure (details: %s)", taskCompletion.getTask().getClass().getName(), details)); taskCompletion.getTask().handleFailure(RemoteClusterControllerTask.Failure.of( RemoteClusterControllerTask.FailureCondition.DEADLINE_EXCEEDED, details)); taskCompletion.getTask().notifyCompleted(); taskCompletionQueue.remove(); } else { break; } } return (taskCompletionQueue.size() != queueSizeBefore); } /** * A "consolidated" cluster state is guaranteed to have up-to-date information on which nodes are * up or down even when the whole cluster is down. The regular, published cluster state is not * normally updated to reflect node events when the cluster is down. */ ClusterState consolidatedClusterState() { final ClusterState publishedState = stateVersionTracker.getVersionedClusterState(); if (publishedState.getClusterState() == State.UP) { return publishedState; } final ClusterState current = stateVersionTracker.getLatestCandidateState().getClusterState().clone(); current.setVersion(publishedState.getVersion()); return current; } /* System test observations: - a node that stops normally (U -> S) then goes down erroneously triggers premature crash handling - long time before content node state convergence (though this seems to be the case for legacy impl as well) */ private boolean resyncLocallyCachedState() { boolean didWork = false; if ( ! isMaster && cycleCount % 100 == 0) { didWork = metricUpdater.forWork("loadWantedStates", () -> database.loadWantedStates(databaseContext)); didWork |= metricUpdater.forWork("loadStartTimestamps", () -> database.loadStartTimestamps(cluster)); } didWork |= metricUpdater.forWork("updateCluster", () -> nodeLookup.updateCluster(cluster, this)); didWork |= metricUpdater.forWork("sendMessages", () -> stateGatherer.sendMessages(cluster, communicator, this)); didWork |= metricUpdater.forWork( "watchTimers", () -> stateChangeHandler.watchTimers(cluster, stateVersionTracker.getLatestCandidateState().getClusterState(), this)); didWork |= metricUpdater.forWork("recomputeClusterStateIfRequired", this::recomputeClusterStateIfRequired); if ( ! isStateGatherer) { if ( ! isMaster) { eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became node state gatherer as we are fleetcontroller master candidate.", timer.getCurrentTimeInMillis())); stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion()); stateChangeHandler.setStateChangedFlag(); } } isStateGatherer = true; return didWork; } private void invokeCandidateStateListeners(ClusterStateBundle candidateBundle) { systemStateListeners.forEach(listener -> listener.handleNewCandidateState(candidateBundle)); } private boolean hasPassedFirstStateBroadcastTimePoint(long timeNowMs) { return timeNowMs >= firstAllowedStateBroadcast || cluster.allStatesReported(); } private boolean recomputeClusterStateIfRequired() { boolean stateWasChanged = false; if (mustRecomputeCandidateClusterState()) { stateChangeHandler.unsetStateChangedFlag(); final AnnotatedClusterState candidate = computeCurrentAnnotatedState(); final ClusterStateBundle candidateBundle = ClusterStateBundle.builder(candidate) .bucketSpaces(configuredBucketSpaces) .stateDeriver(createBucketSpaceStateDeriver()) .deferredActivation(options.enableTwoPhaseClusterStateActivation) .feedBlock(createResourceExhaustionCalculator() .inferContentClusterFeedBlockOrNull(cluster.getNodeInfos())) .deriveAndBuild(); stateVersionTracker.updateLatestCandidateStateBundle(candidateBundle); invokeCandidateStateListeners(candidateBundle); final long timeNowMs = timer.getCurrentTimeInMillis(); if (hasPassedFirstStateBroadcastTimePoint(timeNowMs) && (stateVersionTracker.candidateChangedEnoughFromCurrentToWarrantPublish() || stateVersionTracker.hasReceivedNewVersionFromZooKeeper())) { final ClusterStateBundle before = stateVersionTracker.getVersionedClusterStateBundle(); stateVersionTracker.promoteCandidateToVersionedState(timeNowMs); emitEventsForAlteredStateEdges(before, stateVersionTracker.getVersionedClusterStateBundle(), timeNowMs); handleNewPublishedState(stateVersionTracker.getVersionedClusterStateBundle()); stateWasChanged = true; } } /* * This works transparently for tasks that end up changing the current cluster state (i.e. * requiring a new state to be published) and for those whose changes are no-ops (because * the changes they request are already part of the current state). In the former case the * tasks will depend on the version that was generated based upon them. In the latter case * the tasks will depend on the version that is already published (or in the process of * being published). */ scheduleVersionDependentTasksForFutureCompletion(stateVersionTracker.getCurrentVersion()); return stateWasChanged; } private ClusterStateDeriver createBucketSpaceStateDeriver() { if (options.clusterHasGlobalDocumentTypes) { return new MaintenanceWhenPendingGlobalMerges(stateVersionTracker.createMergePendingChecker(), createDefaultSpaceMaintenanceTransitionConstraint()); } else { return createIdentityClonedBucketSpaceStateDeriver(); } } private ResourceExhaustionCalculator createResourceExhaustionCalculator() { return new ResourceExhaustionCalculator( options.clusterFeedBlockEnabled, options.clusterFeedBlockLimit, stateVersionTracker.getLatestCandidateStateBundle().getFeedBlockOrNull(), options.clusterFeedBlockNoiseLevel); } private static ClusterStateDeriver createIdentityClonedBucketSpaceStateDeriver() { return (state, space) -> state.clone(); } private MaintenanceTransitionConstraint createDefaultSpaceMaintenanceTransitionConstraint() { AnnotatedClusterState currentDefaultSpaceState = stateVersionTracker.getVersionedClusterStateBundle() .getDerivedBucketSpaceStates().getOrDefault(FixedBucketSpaces.defaultSpace(), AnnotatedClusterState.emptyState()); return UpEdgeMaintenanceTransitionConstraint.forPreviouslyPublishedState(currentDefaultSpaceState.getClusterState()); } /** * Move tasks that are dependent on the most recently generated state being published into * a completion queue with a dependency on the provided version argument. Once that version * has been ACKed by all distributors in the system, those tasks will be marked as completed. */ private void scheduleVersionDependentTasksForFutureCompletion(int completeAtVersion) { final long maxDeadlineTimePointMs = timer.getCurrentTimeInMillis() + options.getMaxDeferredTaskVersionWaitTime().toMillis(); for (RemoteClusterControllerTask task : tasksPendingStateRecompute) { context.log(logger, Level.INFO, task + " will be completed at version " + completeAtVersion); taskCompletionQueue.add(new VersionDependentTaskCompletion(completeAtVersion, task, maxDeadlineTimePointMs)); } tasksPendingStateRecompute.clear(); } private AnnotatedClusterState computeCurrentAnnotatedState() { ClusterStateGenerator.Params params = ClusterStateGenerator.Params.fromOptions(options); params.currentTimeInMilllis(timer.getCurrentTimeInMillis()) .cluster(cluster) .lowestObservedDistributionBitCount(stateVersionTracker.getLowestObservedDistributionBits()); return ClusterStateGenerator.generatedStateFrom(params); } private void emitEventsForAlteredStateEdges(final ClusterStateBundle fromState, final ClusterStateBundle toState, final long timeNowMs) { final List<Event> deltaEvents = EventDiffCalculator.computeEventDiff( EventDiffCalculator.params() .cluster(cluster) .fromState(fromState) .toState(toState) .currentTimeMs(timeNowMs) .maxMaintenanceGracePeriodTimeMs(options.storageNodeMaxTransitionTimeMs())); for (Event event : deltaEvents) { eventLog.add(event, isMaster); } emitStateAppliedEvents(timeNowMs, fromState.getBaselineClusterState(), toState.getBaselineClusterState()); } private void emitStateAppliedEvents(long timeNowMs, ClusterState fromClusterState, ClusterState toClusterState) { eventLog.add(new ClusterEvent( ClusterEvent.Type.SYSTEMSTATE, "New cluster state version " + toClusterState.getVersion() + ". Change from last: " + fromClusterState.getTextualDifference(toClusterState), timeNowMs), isMaster); if (toClusterState.getDistributionBitCount() != fromClusterState.getDistributionBitCount()) { eventLog.add(new ClusterEvent( ClusterEvent.Type.SYSTEMSTATE, "Altering distribution bits in system from " + fromClusterState.getDistributionBitCount() + " to " + toClusterState.getDistributionBitCount(), timeNowMs), isMaster); } } private boolean atFirstClusterStateSendTimeEdge() { if (!isMaster || systemStateBroadcaster.hasBroadcastedClusterStateBundle()) { return false; } return hasPassedFirstStateBroadcastTimePoint(timer.getCurrentTimeInMillis()); } private boolean mustRecomputeCandidateClusterState() { return stateChangeHandler.stateMayHaveChanged() || stateVersionTracker.bucketSpaceMergeCompletionStateHasChanged() || atFirstClusterStateSendTimeEdge(); } private boolean handleLeadershipEdgeTransitions() { boolean didWork = false; if (masterElectionHandler.isMaster()) { if ( ! isMaster) { stateChangeHandler.setStateChangedFlag(); systemStateBroadcaster.resetBroadcastedClusterStateBundle(); stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion()); ClusterStateBundle previousBundle = database.getLatestClusterStateBundle(); database.loadStartTimestamps(cluster); database.loadWantedStates(databaseContext); context.log(logger, Level.INFO, () -> String.format("Loaded previous cluster state bundle from ZooKeeper: %s", previousBundle)); stateVersionTracker.setClusterStateBundleRetrievedFromZooKeeper(previousBundle); eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became fleetcontroller master. Bumped version to " + stateVersionTracker.getCurrentVersion() + " to be in line.", timer.getCurrentTimeInMillis())); long currentTime = timer.getCurrentTimeInMillis(); firstAllowedStateBroadcast = currentTime + options.minTimeBeforeFirstSystemStateBroadcast; isMaster = true; inMasterMoratorium = true; context.log(logger, Level.FINE, () -> "At time " + currentTime + " we set first system state broadcast time to be " + options.minTimeBeforeFirstSystemStateBroadcast + " ms after at time " + firstAllowedStateBroadcast + "."); didWork = true; } if (wantedStateChanged) { didWork |= database.saveWantedStates(databaseContext); wantedStateChanged = false; } } else { dropLeadershipState(); } metricUpdater.updateMasterState(isMaster); return didWork; } private void dropLeadershipState() { if (isMaster) { eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer fleetcontroller master.", timer.getCurrentTimeInMillis())); firstAllowedStateBroadcast = Long.MAX_VALUE; failAllVersionDependentTasks(); } wantedStateChanged = false; isMaster = false; inMasterMoratorium = false; } @Override public void run() { controllerThreadId = Thread.currentThread().getId(); context.log(logger, Level.INFO, "Starting tick loop"); try { processingCycle = true; while (isRunning()) { tick(); } context.log(logger, Level.INFO, "Tick loop stopped"); } catch (InterruptedException e) { context.log(logger, Level.INFO, "Event thread stopped by interrupt exception: ", e); } catch (Throwable t) { t.printStackTrace(); context.log(logger, Level.SEVERE, "Fatal error killed fleet controller", t); synchronized (monitor) { running.set(false); } System.exit(1); } finally { prepareShutdownEdge(); } } private void prepareShutdownEdge() { running.set(false); failAllVersionDependentTasks(); synchronized (monitor) { monitor.notifyAll(); } } public DatabaseHandler.DatabaseContext databaseContext = new DatabaseHandler.DatabaseContext() { @Override public ContentCluster getCluster() { return cluster; } @Override public FleetController getFleetController() { return FleetController.this; } @Override public SlobrokListener getNodeAddedOrRemovedListener() { return FleetController.this; } @Override public NodeListener getNodeStateUpdateListener() { return FleetController.this; } }; public void waitForCompleteCycle(Duration timeout) { Instant endTime = Instant.now().plus(timeout); synchronized (monitor) { long wantedCycle = cycleCount + (processingCycle ? 2 : 1); waitingForCycle = true; try{ while (cycleCount < wantedCycle) { if (Instant.now().isAfter(endTime)) throw new IllegalStateException("Timed out waiting for cycle to complete. Not completed after " + timeout); if ( !isRunning() ) throw new IllegalStateException("Fleetcontroller not running. Will never complete cycles"); try{ monitor.wait(100); } catch (InterruptedException e) {} } } finally { waitingForCycle = false; } } } /** * This function might not be 100% threadsafe, as in theory cluster can be changing while accessed. * But it is only used in unit tests that should not trigger any thread issues. Don't want to add locks that reduce * live performance to remove a non-problem. */ public void waitForNodesHavingSystemStateVersionEqualToOrAbove(int version, int nodeCount, Duration timeout) throws InterruptedException { Instant endTime = Instant.now().plus(timeout); synchronized (monitor) { while (true) { int ackedNodes = 0; for (NodeInfo node : cluster.getNodeInfos()) { if (node.getClusterStateVersionBundleAcknowledged() >= version) { ++ackedNodes; } } if (ackedNodes >= nodeCount) { context.log(logger, Level.INFO, ackedNodes + " nodes now have acked system state " + version + " or higher."); return; } if (Instant.now().isAfter(endTime)) { throw new IllegalStateException("Did not get " + nodeCount + " nodes to system state " + version + " within timeout of " + timeout); } monitor.wait(10); } } } public boolean hasZookeeperConnection() { return !database.isClosed(); } public int getSlobrokMirrorUpdates() { return ((SlobrokClient)nodeLookup).getMirror().updates(); } public ContentCluster getCluster() { return cluster; } public List<NodeEvent> getNodeEvents(Node n) { return eventLog.getNodeEvents(n); } public EventLog getEventLog() { return eventLog; } }
👍
public void waitForNodesHavingSystemStateVersionEqualToOrAbove(int version, int nodeCount, Duration timeout) throws InterruptedException { Instant endTime = Instant.now().plus(timeout); synchronized (monitor) { while (true) { int ackedNodes = 0; for (NodeInfo node : cluster.getNodeInfos()) { if (node.getClusterStateVersionBundleAcknowledged() >= version) { ++ackedNodes; } } if (ackedNodes >= nodeCount) { context.log(logger, Level.INFO, ackedNodes + " nodes now have acked system state " + version + " or higher."); return; } Duration remainingTime = Duration.between(Instant.now(), endTime); if (remainingTime.isNegative()) { throw new IllegalStateException("Did not get " + nodeCount + " nodes to system state " + version + " within timeout of " + timeout); } monitor.wait(10); } } }
if (remainingTime.isNegative()) {
public void waitForNodesHavingSystemStateVersionEqualToOrAbove(int version, int nodeCount, Duration timeout) throws InterruptedException { Instant endTime = Instant.now().plus(timeout); synchronized (monitor) { while (true) { int ackedNodes = 0; for (NodeInfo node : cluster.getNodeInfos()) { if (node.getClusterStateVersionBundleAcknowledged() >= version) { ++ackedNodes; } } if (ackedNodes >= nodeCount) { context.log(logger, Level.INFO, ackedNodes + " nodes now have acked system state " + version + " or higher."); return; } if (Instant.now().isAfter(endTime)) { throw new IllegalStateException("Did not get " + nodeCount + " nodes to system state " + version + " within timeout of " + timeout); } monitor.wait(10); } } }
class FleetController implements NodeListener, SlobrokListener, SystemStateListener, Runnable, RemoteClusterControllerTaskScheduler { private static final Logger logger = Logger.getLogger(FleetController.class.getName()); private final FleetControllerContext context; private final Timer timer; private final Object monitor; private final EventLog eventLog; private final NodeLookup nodeLookup; private final ContentCluster cluster; private final Communicator communicator; private final NodeStateGatherer stateGatherer; private final StateChangeHandler stateChangeHandler; private final SystemStateBroadcaster systemStateBroadcaster; private final StateVersionTracker stateVersionTracker; private final StatusPageServerInterface statusPageServer; private final RpcServer rpcServer; private final DatabaseHandler database; private final MasterElectionHandler masterElectionHandler; private Thread runner = null; private final AtomicBoolean running = new AtomicBoolean(true); private FleetControllerOptions options; private FleetControllerOptions nextOptions; private final List<SystemStateListener> systemStateListeners = new CopyOnWriteArrayList<>(); private boolean processingCycle = false; private boolean wantedStateChanged = false; private long cycleCount = 0; private long lastMetricUpdateCycleCount = 0; private long nextStateSendTime = 0; private Long controllerThreadId = null; private boolean waitingForCycle = false; private final StatusPageServer.PatternRequestRouter statusRequestRouter = new StatusPageServer.PatternRequestRouter(); private final List<ClusterStateBundle> newStates = new ArrayList<>(); private final List<ClusterStateBundle> convergedStates = new ArrayList<>(); private final Queue<RemoteClusterControllerTask> remoteTasks = new LinkedList<>(); private final MetricUpdater metricUpdater; private boolean isMaster = false; private boolean inMasterMoratorium = false; private boolean isStateGatherer = false; private long firstAllowedStateBroadcast = Long.MAX_VALUE; private long tickStartTime = Long.MAX_VALUE; private final List<RemoteClusterControllerTask> tasksPendingStateRecompute = new ArrayList<>(); private final Queue<VersionDependentTaskCompletion> taskCompletionQueue = new ArrayDeque<>(); private Set<String> configuredBucketSpaces = Collections.emptySet(); private final RunDataExtractor dataExtractor = new RunDataExtractor() { @Override public FleetControllerOptions getOptions() { return options; } @Override public ContentCluster getCluster() { return cluster; } }; public FleetController(FleetControllerContext context, Timer timer, EventLog eventLog, ContentCluster cluster, NodeStateGatherer nodeStateGatherer, Communicator communicator, StatusPageServerInterface statusPage, RpcServer server, NodeLookup nodeLookup, DatabaseHandler database, StateChangeHandler stateChangeHandler, SystemStateBroadcaster systemStateBroadcaster, MasterElectionHandler masterElectionHandler, MetricUpdater metricUpdater, FleetControllerOptions options) { context.log(logger, Level.INFO, "Created"); this.context = context; this.timer = timer; this.monitor = timer; this.eventLog = eventLog; this.options = options; this.nodeLookup = nodeLookup; this.cluster = cluster; this.communicator = communicator; this.database = database; this.stateGatherer = nodeStateGatherer; this.stateChangeHandler = stateChangeHandler; this.systemStateBroadcaster = systemStateBroadcaster; this.stateVersionTracker = new StateVersionTracker(options.minMergeCompletionRatio); this.metricUpdater = metricUpdater; this.statusPageServer = statusPage; this.rpcServer = server; this.masterElectionHandler = masterElectionHandler; this.statusRequestRouter.addHandler( "^/node=([a-z]+)\\.(\\d+)$", new LegacyNodePageRequestHandler(timer, eventLog, cluster)); this.statusRequestRouter.addHandler( "^/state.*", new NodeHealthRequestHandler()); this.statusRequestRouter.addHandler( "^/clusterstate", new ClusterStateRequestHandler(stateVersionTracker)); this.statusRequestRouter.addHandler( "^/$", new LegacyIndexPageRequestHandler(timer, cluster, masterElectionHandler, stateVersionTracker, eventLog, timer.getCurrentTimeInMillis(), dataExtractor)); propagateOptions(); } public static FleetController create(FleetControllerOptions options, StatusPageServerInterface statusPageServer, MetricReporter metricReporter) throws Exception { var context = new FleetControllerContextImpl(options); var timer = new RealTimer(); var metricUpdater = new MetricUpdater(metricReporter, options.fleetControllerIndex, options.clusterName); var log = new EventLog(timer, metricUpdater); var cluster = new ContentCluster( options.clusterName, options.nodes, options.storageDistribution); var stateGatherer = new NodeStateGatherer(timer, timer, log); var communicator = new RPCCommunicator( RPCCommunicator.createRealSupervisor(), timer, options.fleetControllerIndex, options.nodeStateRequestTimeoutMS, options.nodeStateRequestTimeoutEarliestPercentage, options.nodeStateRequestTimeoutLatestPercentage, options.nodeStateRequestRoundTripTimeMaxSeconds); var database = new DatabaseHandler(context, new ZooKeeperDatabaseFactory(context), timer, options.zooKeeperServerAddress, timer); var lookUp = new SlobrokClient(context, timer); var stateGenerator = new StateChangeHandler(context, timer, log); var stateBroadcaster = new SystemStateBroadcaster(context, timer, timer); var masterElectionHandler = new MasterElectionHandler(context, options.fleetControllerIndex, options.fleetControllerCount, timer, timer); var controller = new FleetController(context, timer, log, cluster, stateGatherer, communicator, statusPageServer, null, lookUp, database, stateGenerator, stateBroadcaster, masterElectionHandler, metricUpdater, options); controller.start(); return controller; } public void start() { runner = new Thread(this); runner.start(); } public Object getMonitor() { return monitor; } public boolean isRunning() { return running.get(); } public boolean isMaster() { synchronized (monitor) { return isMaster; } } public ClusterState getClusterState() { synchronized (monitor) { return systemStateBroadcaster.getClusterState(); } } public ClusterStateBundle getClusterStateBundle() { synchronized (monitor) { return systemStateBroadcaster.getClusterStateBundle(); } } public void schedule(RemoteClusterControllerTask task) { synchronized (monitor) { context.log(logger, Level.FINE, "Scheduled remote task " + task.getClass().getName() + " for execution"); remoteTasks.add(task); } } /** Used for unit testing. */ public void addSystemStateListener(SystemStateListener listener) { systemStateListeners.add(listener); com.yahoo.vdslib.state.ClusterState state = getSystemState(); if (state == null) { throw new NullPointerException("Cluster state should never be null at this point"); } listener.handleNewPublishedState(ClusterStateBundle.ofBaselineOnly(AnnotatedClusterState.withoutAnnotations(state))); ClusterStateBundle convergedState = systemStateBroadcaster.getLastClusterStateBundleConverged(); if (convergedState != null) { listener.handleStateConvergedInCluster(convergedState); } } public FleetControllerOptions getOptions() { synchronized(monitor) { return options.clone(); } } public NodeState getReportedNodeState(Node n) { synchronized(monitor) { NodeInfo node = cluster.getNodeInfo(n); if (node == null) { throw new IllegalStateException("Did not find node " + n + " in cluster " + cluster); } return node.getReportedState(); } } public NodeState getWantedNodeState(Node n) { synchronized(monitor) { return cluster.getNodeInfo(n).getWantedState(); } } public com.yahoo.vdslib.state.ClusterState getSystemState() { synchronized(monitor) { return stateVersionTracker.getVersionedClusterState(); } } public int getRpcPort() { return rpcServer.getPort(); } public void shutdown() throws InterruptedException, java.io.IOException { if (runner != null && isRunning()) { context.log(logger, Level.INFO, "Joining event thread."); running.set(false); synchronized(monitor) { monitor.notifyAll(); } runner.join(); } context.log(logger, Level.INFO, "FleetController done shutting down event thread."); controllerThreadId = Thread.currentThread().getId(); database.shutdown(databaseContext); if (statusPageServer != null) { statusPageServer.shutdown(); } if (rpcServer != null) { rpcServer.shutdown(); } communicator.shutdown(); nodeLookup.shutdown(); } public void updateOptions(FleetControllerOptions options) { var newId = FleetControllerId.fromOptions(options); synchronized(monitor) { assert newId.equals(context.id()); context.log(logger, Level.INFO, "FleetController has new options"); nextOptions = options.clone(); monitor.notifyAll(); } } private void verifyInControllerThread() { if (controllerThreadId != null && controllerThreadId != Thread.currentThread().getId()) { throw new IllegalStateException("Function called from non-controller thread. Shouldn't happen."); } } private ClusterState latestCandidateClusterState() { return stateVersionTracker.getLatestCandidateState().getClusterState(); } @Override public void handleNewNodeState(NodeInfo node, NodeState newState) { verifyInControllerThread(); stateChangeHandler.handleNewReportedNodeState(latestCandidateClusterState(), node, newState, this); } @Override public void handleNewWantedNodeState(NodeInfo node, NodeState newState) { verifyInControllerThread(); wantedStateChanged = true; stateChangeHandler.proposeNewNodeState(stateVersionTracker.getVersionedClusterState(), node, newState); } @Override public void handleRemovedNode(Node node) { verifyInControllerThread(); wantedStateChanged = true; } @Override public void handleUpdatedHostInfo(NodeInfo nodeInfo, HostInfo newHostInfo) { verifyInControllerThread(); triggerBundleRecomputationIfResourceExhaustionStateChanged(nodeInfo, newHostInfo); stateVersionTracker.handleUpdatedHostInfo(nodeInfo, newHostInfo); } private void triggerBundleRecomputationIfResourceExhaustionStateChanged(NodeInfo nodeInfo, HostInfo newHostInfo) { if (!options.clusterFeedBlockEnabled) { return; } var calc = createResourceExhaustionCalculator(); var previouslyExhausted = calc.enumerateNodeResourceExhaustions(nodeInfo); var nowExhausted = calc.resourceExhaustionsFromHostInfo(nodeInfo, newHostInfo); if (!previouslyExhausted.equals(nowExhausted)) { context.log(logger, Level.FINE, () -> String.format("Triggering state recomputation due to change in cluster feed block: %s -> %s", previouslyExhausted, nowExhausted)); stateChangeHandler.setStateChangedFlag(); } } @Override public void handleNewNode(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleNewNode(node); } @Override public void handleMissingNode(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleMissingNode(stateVersionTracker.getVersionedClusterState(), node, this); } @Override public void handleNewRpcAddress(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleNewRpcAddress(node); } @Override public void handleReturnedRpcAddress(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleReturnedRpcAddress(node); } @Override public void handleNewPublishedState(ClusterStateBundle stateBundle) { verifyInControllerThread(); ClusterState baselineState = stateBundle.getBaselineClusterState(); newStates.add(stateBundle); metricUpdater.updateClusterStateMetrics(cluster, baselineState, ResourceUsageStats.calculateFrom(cluster.getNodeInfos(), options.clusterFeedBlockLimit, stateBundle.getFeedBlock())); lastMetricUpdateCycleCount = cycleCount; systemStateBroadcaster.handleNewClusterStates(stateBundle); if (isMaster) { storeClusterStateMetaDataToZooKeeper(stateBundle); } } private boolean maybePublishOldMetrics() { verifyInControllerThread(); if (isMaster() && cycleCount > 300 + lastMetricUpdateCycleCount) { ClusterStateBundle stateBundle = stateVersionTracker.getVersionedClusterStateBundle(); ClusterState baselineState = stateBundle.getBaselineClusterState(); metricUpdater.updateClusterStateMetrics(cluster, baselineState, ResourceUsageStats.calculateFrom(cluster.getNodeInfos(), options.clusterFeedBlockLimit, stateBundle.getFeedBlock())); lastMetricUpdateCycleCount = cycleCount; return true; } else { return false; } } private void storeClusterStateMetaDataToZooKeeper(ClusterStateBundle stateBundle) { try { database.saveLatestSystemStateVersion(databaseContext, stateBundle.getVersion()); database.saveLatestClusterStateBundle(databaseContext, stateBundle); } catch (InterruptedException e) { throw new RuntimeException("ZooKeeper write interrupted", e); } } /** * This function gives data of the current state in master election. * The keys in the given map are indices of fleet controllers. * The values are what fleetcontroller that fleetcontroller wants to * become master. * * If more than half the fleetcontrollers want a node to be master and * that node also wants itself as master, that node is the single master. * If this condition is not met, there is currently no master. */ public void handleFleetData(Map<Integer, Integer> data) { verifyInControllerThread(); context.log(logger, Level.FINEST, "Sending fleet data event on to master election handler"); metricUpdater.updateMasterElectionMetrics(data); masterElectionHandler.handleFleetData(data); } /** * Called when we can no longer contact database. */ public void lostDatabaseConnection() { verifyInControllerThread(); boolean wasMaster = isMaster; masterElectionHandler.lostDatabaseConnection(); if (wasMaster) { dropLeadershipState(); metricUpdater.updateMasterState(false); } } private void failAllVersionDependentTasks() { tasksPendingStateRecompute.forEach(task -> { task.handleFailure(RemoteClusterControllerTask.Failure.of( RemoteClusterControllerTask.FailureCondition.LEADERSHIP_LOST)); task.notifyCompleted(); }); tasksPendingStateRecompute.clear(); taskCompletionQueue.forEach(task -> { task.getTask().handleFailure(RemoteClusterControllerTask.Failure.of( RemoteClusterControllerTask.FailureCondition.LEADERSHIP_LOST)); task.getTask().notifyCompleted(); }); taskCompletionQueue.clear(); } /** Called when all distributors have acked newest cluster state version. */ public void handleAllDistributorsInSync(DatabaseHandler database, DatabaseHandler.DatabaseContext dbContext) throws InterruptedException { Set<ConfiguredNode> nodes = new HashSet<>(cluster.clusterInfo().getConfiguredNodes().values()); var currentBundle = stateVersionTracker.getVersionedClusterStateBundle(); context.log(logger, Level.FINE, () -> String.format("All distributors have ACKed cluster state version %d", currentBundle.getVersion())); stateChangeHandler.handleAllDistributorsInSync(currentBundle.getBaselineClusterState(), nodes, database, dbContext); convergedStates.add(currentBundle); } private boolean changesConfiguredNodeSet(Collection<ConfiguredNode> newNodes) { if (newNodes.size() != cluster.getConfiguredNodes().size()) return true; if (! cluster.getConfiguredNodes().values().containsAll(newNodes)) return true; for (ConfiguredNode node : newNodes) { if (node.retired() != cluster.getConfiguredNodes().get(node.index()).retired()) { return true; } } return false; } /** This is called when the options field has been set to a new set of options */ private void propagateOptions() { verifyInControllerThread(); selfTerminateIfConfiguredNodeIndexHasChanged(); if (changesConfiguredNodeSet(options.nodes)) { cluster.setSlobrokGenerationCount(0); } configuredBucketSpaces = Set.of(FixedBucketSpaces.defaultSpace(), FixedBucketSpaces.globalSpace()); stateVersionTracker.setMinMergeCompletionRatio(options.minMergeCompletionRatio); communicator.propagateOptions(options); if (nodeLookup instanceof SlobrokClient) { ((SlobrokClient) nodeLookup).setSlobrokConnectionSpecs(options.slobrokConnectionSpecs); } eventLog.setMaxSize(options.eventLogMaxSize, options.eventNodeLogMaxSize); cluster.setPollingFrequency(options.statePollingFrequency); cluster.setDistribution(options.storageDistribution); cluster.setNodes(options.nodes, databaseContext.getNodeStateUpdateListener()); database.setZooKeeperAddress(options.zooKeeperServerAddress, databaseContext); database.setZooKeeperSessionTimeout(options.zooKeeperSessionTimeout, databaseContext); stateGatherer.setMaxSlobrokDisconnectGracePeriod(options.maxSlobrokDisconnectGracePeriod); stateGatherer.setNodeStateRequestTimeout(options.nodeStateRequestTimeoutMS); stateChangeHandler.reconfigureFromOptions(options); stateChangeHandler.setStateChangedFlag(); masterElectionHandler.setFleetControllerCount(options.fleetControllerCount); masterElectionHandler.setMasterZooKeeperCooldownPeriod(options.masterZooKeeperCooldownPeriod); masterElectionHandler.setUsingZooKeeper(options.zooKeeperServerAddress != null && !options.zooKeeperServerAddress.isEmpty()); if (rpcServer != null) { rpcServer.setMasterElectionHandler(masterElectionHandler); try{ rpcServer.setSlobrokConnectionSpecs(options.slobrokConnectionSpecs, options.rpcPort); } catch (ListenFailedException e) { context.log(logger, Level.WARNING, "Failed to bind RPC server to port " + options.rpcPort + ". This may be natural if cluster has altered the services running on this node: " + e.getMessage()); } catch (Exception e) { context.log(logger, Level.WARNING, "Failed to initialize RPC server socket: " + e.getMessage()); } } if (statusPageServer != null) { try{ statusPageServer.setPort(options.httpPort); } catch (Exception e) { context.log(logger, Level.WARNING, "Failed to initialize status server socket. This may be natural if cluster has altered the services running on this node: " + e.getMessage()); } } long currentTime = timer.getCurrentTimeInMillis(); nextStateSendTime = Math.min(currentTime + options.minTimeBetweenNewSystemStates, nextStateSendTime); } private void selfTerminateIfConfiguredNodeIndexHasChanged() { var newId = new FleetControllerId(options.clusterName, options.fleetControllerIndex); if (!newId.equals(context.id())) { context.log(logger, Level.WARNING, context.id() + " got new configuration for " + newId + ". We do not support doing this live; " + "immediately exiting now to force new configuration"); prepareShutdownEdge(); System.exit(1); } } public StatusPageResponse fetchStatusPage(StatusPageServer.HttpRequest httpRequest) { verifyInControllerThread(); StatusPageResponse.ResponseCode responseCode; String message; final String hiddenMessage; try { StatusPageServer.RequestHandler handler = statusRequestRouter.resolveHandler(httpRequest); if (handler == null) { throw new FileNotFoundException("No handler found for request: " + httpRequest.getPath()); } return handler.handle(httpRequest); } catch (FileNotFoundException e) { responseCode = StatusPageResponse.ResponseCode.NOT_FOUND; message = e.getMessage(); hiddenMessage = ""; } catch (Exception e) { responseCode = StatusPageResponse.ResponseCode.INTERNAL_SERVER_ERROR; message = "Internal Server Error"; hiddenMessage = ExceptionUtils.getStackTraceAsString(e); context.log(logger, Level.FINE, () -> "Unknown exception thrown for request " + httpRequest.getRequest() + ": " + hiddenMessage); } TimeZone tz = TimeZone.getTimeZone("UTC"); long currentTime = timer.getCurrentTimeInMillis(); StatusPageResponse response = new StatusPageResponse(); StringBuilder content = new StringBuilder(); response.setContentType("text/html"); response.setResponseCode(responseCode); content.append("<!-- Answer to request " + httpRequest.getRequest() + " -->\n"); content.append("<p>UTC time when creating this page: ").append(RealTimer.printDateNoMilliSeconds(currentTime, tz)).append("</p>"); response.writeHtmlHeader(content, message); response.writeHtmlFooter(content, hiddenMessage); response.writeContent(content.toString()); return response; } public void tick() throws Exception { synchronized (monitor) { boolean didWork; didWork = metricUpdater.forWork("doNextZooKeeperTask", () -> database.doNextZooKeeperTask(databaseContext)); didWork |= metricUpdater.forWork("updateMasterElectionState", this::updateMasterElectionState); didWork |= metricUpdater.forWork("handleLeadershipEdgeTransitions", this::handleLeadershipEdgeTransitions); stateChangeHandler.setMaster(isMaster); if ( ! isRunning()) { return; } didWork |= metricUpdater.forWork("stateGatherer-processResponses", () -> stateGatherer.processResponses(this)); if ( ! isRunning()) { return; } if (masterElectionHandler.isAmongNthFirst(options.stateGatherCount)) { didWork |= resyncLocallyCachedState(); } else { stepDownAsStateGatherer(); } if ( ! isRunning()) { return; } didWork |= metricUpdater.forWork("systemStateBroadcaster-processResponses", systemStateBroadcaster::processResponses); if ( ! isRunning()) { return; } if (isMaster) { didWork |= metricUpdater.forWork("broadcastClusterStateToEligibleNodes", this::broadcastClusterStateToEligibleNodes); systemStateBroadcaster.checkIfClusterStateIsAckedByAllDistributors(database, databaseContext, this); } if ( ! isRunning()) { return; } didWork |= metricUpdater.forWork("processAnyPendingStatusPageRequest", this::processAnyPendingStatusPageRequest); if ( ! isRunning()) { return; } if (rpcServer != null) { didWork |= metricUpdater.forWork("handleRpcRequests", () -> rpcServer.handleRpcRequests(cluster, consolidatedClusterState(), this)); } if ( ! isRunning()) { return; } didWork |= metricUpdater.forWork("processNextQueuedRemoteTask", this::processNextQueuedRemoteTask); didWork |= metricUpdater.forWork("completeSatisfiedVersionDependentTasks", this::completeSatisfiedVersionDependentTasks); didWork |= metricUpdater.forWork("maybePublishOldMetrics", this::maybePublishOldMetrics); processingCycle = false; ++cycleCount; long tickStopTime = timer.getCurrentTimeInMillis(); if (tickStopTime >= tickStartTime) { metricUpdater.addTickTime(tickStopTime - tickStartTime, didWork); } monitor.wait(didWork || waitingForCycle ? 1 : options.cycleWaitTime); if ( ! isRunning()) { return; } tickStartTime = timer.getCurrentTimeInMillis(); processingCycle = true; if (nextOptions != null) { switchToNewConfig(); } } if (isRunning()) { propagateNewStatesToListeners(); } } private boolean updateMasterElectionState() { try { return masterElectionHandler.watchMasterElection(database, databaseContext); } catch (InterruptedException e) { throw new RuntimeException(e); } catch (Exception e) { context.log(logger, Level.WARNING, "Failed to watch master election: " + e); } return false; } private void stepDownAsStateGatherer() { if (isStateGatherer) { cluster.clearStates(); eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer a node state gatherer.", timer.getCurrentTimeInMillis())); } isStateGatherer = false; } private void switchToNewConfig() { options = nextOptions; nextOptions = null; try { propagateOptions(); } catch (Exception e) { context.log(logger, Level.SEVERE, "Failed to handle new fleet controller config", e); } } private boolean processAnyPendingStatusPageRequest() { if (statusPageServer != null) { StatusPageServer.HttpRequest statusRequest = statusPageServer.getCurrentHttpRequest(); if (statusRequest != null) { statusPageServer.answerCurrentStatusRequest(fetchStatusPage(statusRequest)); return true; } } return false; } private boolean broadcastClusterStateToEligibleNodes() { if (database.hasPendingClusterStateMetaDataStore()) { context.log(logger, Level.FINE, "Can't publish current cluster state as it has one or more pending ZooKeeper stores"); return false; } boolean sentAny = false; long currentTime = timer.getCurrentTimeInMillis(); if ((currentTime >= firstAllowedStateBroadcast || cluster.allStatesReported()) && currentTime >= nextStateSendTime) { if (inMasterMoratorium) { context.log(logger, Level.INFO, currentTime < firstAllowedStateBroadcast ? "Master moratorium complete: all nodes have reported in" : "Master moratorium complete: timed out waiting for all nodes to report in"); firstAllowedStateBroadcast = currentTime; inMasterMoratorium = false; } sentAny = systemStateBroadcaster.broadcastNewStateBundleIfRequired( databaseContext, communicator, database.getLastKnownStateBundleVersionWrittenBySelf()); if (sentAny) { nextStateSendTime = currentTime + options.minTimeBetweenNewSystemStates; } } sentAny |= systemStateBroadcaster.broadcastStateActivationsIfRequired(databaseContext, communicator); return sentAny; } private void propagateNewStatesToListeners() { if ( ! newStates.isEmpty()) { synchronized (systemStateListeners) { for (ClusterStateBundle stateBundle : newStates) { for (SystemStateListener listener : systemStateListeners) { listener.handleNewPublishedState(stateBundle); } } newStates.clear(); } } if ( ! convergedStates.isEmpty()) { synchronized (systemStateListeners) { for (ClusterStateBundle stateBundle : convergedStates) { for (SystemStateListener listener : systemStateListeners) { listener.handleStateConvergedInCluster(stateBundle); } } convergedStates.clear(); } } } private boolean processNextQueuedRemoteTask() { metricUpdater.updateRemoteTaskQueueSize(remoteTasks.size()); RemoteClusterControllerTask task = remoteTasks.poll(); if (task == null) { return false; } final RemoteClusterControllerTask.Context taskContext = createRemoteTaskProcessingContext(); context.log(logger, Level.FINEST, () -> String.format("Processing remote task of type '%s'", task.getClass().getName())); task.doRemoteFleetControllerTask(taskContext); if (taskMayBeCompletedImmediately(task)) { context.log(logger, Level.FINEST, () -> String.format("Done processing remote task of type '%s'", task.getClass().getName())); task.notifyCompleted(); } else { context.log(logger, Level.FINEST, () -> String.format("Remote task of type '%s' queued until state recomputation", task.getClass().getName())); tasksPendingStateRecompute.add(task); } return true; } private boolean taskMayBeCompletedImmediately(RemoteClusterControllerTask task) { return (!task.hasVersionAckDependency() || task.isFailed() || !isMaster); } private RemoteClusterControllerTask.Context createRemoteTaskProcessingContext() { final RemoteClusterControllerTask.Context context = new RemoteClusterControllerTask.Context(); context.cluster = cluster; context.currentConsolidatedState = consolidatedClusterState(); context.publishedClusterStateBundle = stateVersionTracker.getVersionedClusterStateBundle(); context.masterInfo = new MasterInterface() { @Override public boolean isMaster() { return isMaster; } @Override public Integer getMaster() { return masterElectionHandler.getMaster(); } @Override public boolean inMasterMoratorium() { return inMasterMoratorium; } }; context.nodeListener = this; context.slobrokListener = this; return context; } private static long effectiveActivatedStateVersion(NodeInfo nodeInfo, ClusterStateBundle bundle) { return bundle.deferredActivation() ? nodeInfo.getClusterStateVersionActivationAcked() : nodeInfo.getClusterStateVersionBundleAcknowledged(); } private List<Node> enumerateNodesNotYetAckedAtLeastVersion(long version) { var bundle = systemStateBroadcaster.getClusterStateBundle(); if (bundle == null) { return List.of(); } return cluster.getNodeInfos().stream(). filter(n -> effectiveActivatedStateVersion(n, bundle) < version). map(NodeInfo::getNode). collect(Collectors.toList()); } private static <E> String stringifyListWithLimits(List<E> list, int limit) { if (list.size() > limit) { var sub = list.subList(0, limit); return String.format("%s (... and %d more)", sub.stream().map(E::toString).collect(Collectors.joining(", ")), list.size() - limit); } else { return list.stream().map(E::toString).collect(Collectors.joining(", ")); } } private String buildNodesNotYetConvergedMessage(long taskConvergeVersion) { var nodes = enumerateNodesNotYetAckedAtLeastVersion(taskConvergeVersion); if (nodes.isEmpty()) { return ""; } return String.format("the following nodes have not converged to at least version %d: %s", taskConvergeVersion, stringifyListWithLimits(nodes, options.maxDivergentNodesPrintedInTaskErrorMessages)); } private boolean completeSatisfiedVersionDependentTasks() { int publishedVersion = systemStateBroadcaster.lastClusterStateVersionInSync(); long queueSizeBefore = taskCompletionQueue.size(); final long now = timer.getCurrentTimeInMillis(); while (!taskCompletionQueue.isEmpty()) { VersionDependentTaskCompletion taskCompletion = taskCompletionQueue.peek(); if (publishedVersion >= taskCompletion.getMinimumVersion()) { context.log(logger, Level.FINE, () -> String.format("Deferred task of type '%s' has minimum version %d, published is %d; completing", taskCompletion.getTask().getClass().getName(), taskCompletion.getMinimumVersion(), publishedVersion)); taskCompletion.getTask().notifyCompleted(); taskCompletionQueue.remove(); } else if (taskCompletion.getDeadlineTimePointMs() <= now) { var details = buildNodesNotYetConvergedMessage(taskCompletion.getMinimumVersion()); context.log(logger, Level.WARNING, () -> String.format("Deferred task of type '%s' has exceeded wait deadline; completing with failure (details: %s)", taskCompletion.getTask().getClass().getName(), details)); taskCompletion.getTask().handleFailure(RemoteClusterControllerTask.Failure.of( RemoteClusterControllerTask.FailureCondition.DEADLINE_EXCEEDED, details)); taskCompletion.getTask().notifyCompleted(); taskCompletionQueue.remove(); } else { break; } } return (taskCompletionQueue.size() != queueSizeBefore); } /** * A "consolidated" cluster state is guaranteed to have up-to-date information on which nodes are * up or down even when the whole cluster is down. The regular, published cluster state is not * normally updated to reflect node events when the cluster is down. */ ClusterState consolidatedClusterState() { final ClusterState publishedState = stateVersionTracker.getVersionedClusterState(); if (publishedState.getClusterState() == State.UP) { return publishedState; } final ClusterState current = stateVersionTracker.getLatestCandidateState().getClusterState().clone(); current.setVersion(publishedState.getVersion()); return current; } /* System test observations: - a node that stops normally (U -> S) then goes down erroneously triggers premature crash handling - long time before content node state convergence (though this seems to be the case for legacy impl as well) */ private boolean resyncLocallyCachedState() { boolean didWork = false; if ( ! isMaster && cycleCount % 100 == 0) { didWork = metricUpdater.forWork("loadWantedStates", () -> database.loadWantedStates(databaseContext)); didWork |= metricUpdater.forWork("loadStartTimestamps", () -> database.loadStartTimestamps(cluster)); } didWork |= metricUpdater.forWork("updateCluster", () -> nodeLookup.updateCluster(cluster, this)); didWork |= metricUpdater.forWork("sendMessages", () -> stateGatherer.sendMessages(cluster, communicator, this)); didWork |= metricUpdater.forWork( "watchTimers", () -> stateChangeHandler.watchTimers(cluster, stateVersionTracker.getLatestCandidateState().getClusterState(), this)); didWork |= metricUpdater.forWork("recomputeClusterStateIfRequired", this::recomputeClusterStateIfRequired); if ( ! isStateGatherer) { if ( ! isMaster) { eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became node state gatherer as we are fleetcontroller master candidate.", timer.getCurrentTimeInMillis())); stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion()); stateChangeHandler.setStateChangedFlag(); } } isStateGatherer = true; return didWork; } private void invokeCandidateStateListeners(ClusterStateBundle candidateBundle) { systemStateListeners.forEach(listener -> listener.handleNewCandidateState(candidateBundle)); } private boolean hasPassedFirstStateBroadcastTimePoint(long timeNowMs) { return timeNowMs >= firstAllowedStateBroadcast || cluster.allStatesReported(); } private boolean recomputeClusterStateIfRequired() { boolean stateWasChanged = false; if (mustRecomputeCandidateClusterState()) { stateChangeHandler.unsetStateChangedFlag(); final AnnotatedClusterState candidate = computeCurrentAnnotatedState(); final ClusterStateBundle candidateBundle = ClusterStateBundle.builder(candidate) .bucketSpaces(configuredBucketSpaces) .stateDeriver(createBucketSpaceStateDeriver()) .deferredActivation(options.enableTwoPhaseClusterStateActivation) .feedBlock(createResourceExhaustionCalculator() .inferContentClusterFeedBlockOrNull(cluster.getNodeInfos())) .deriveAndBuild(); stateVersionTracker.updateLatestCandidateStateBundle(candidateBundle); invokeCandidateStateListeners(candidateBundle); final long timeNowMs = timer.getCurrentTimeInMillis(); if (hasPassedFirstStateBroadcastTimePoint(timeNowMs) && (stateVersionTracker.candidateChangedEnoughFromCurrentToWarrantPublish() || stateVersionTracker.hasReceivedNewVersionFromZooKeeper())) { final ClusterStateBundle before = stateVersionTracker.getVersionedClusterStateBundle(); stateVersionTracker.promoteCandidateToVersionedState(timeNowMs); emitEventsForAlteredStateEdges(before, stateVersionTracker.getVersionedClusterStateBundle(), timeNowMs); handleNewPublishedState(stateVersionTracker.getVersionedClusterStateBundle()); stateWasChanged = true; } } /* * This works transparently for tasks that end up changing the current cluster state (i.e. * requiring a new state to be published) and for those whose changes are no-ops (because * the changes they request are already part of the current state). In the former case the * tasks will depend on the version that was generated based upon them. In the latter case * the tasks will depend on the version that is already published (or in the process of * being published). */ scheduleVersionDependentTasksForFutureCompletion(stateVersionTracker.getCurrentVersion()); return stateWasChanged; } private ClusterStateDeriver createBucketSpaceStateDeriver() { if (options.clusterHasGlobalDocumentTypes) { return new MaintenanceWhenPendingGlobalMerges(stateVersionTracker.createMergePendingChecker(), createDefaultSpaceMaintenanceTransitionConstraint()); } else { return createIdentityClonedBucketSpaceStateDeriver(); } } private ResourceExhaustionCalculator createResourceExhaustionCalculator() { return new ResourceExhaustionCalculator( options.clusterFeedBlockEnabled, options.clusterFeedBlockLimit, stateVersionTracker.getLatestCandidateStateBundle().getFeedBlockOrNull(), options.clusterFeedBlockNoiseLevel); } private static ClusterStateDeriver createIdentityClonedBucketSpaceStateDeriver() { return (state, space) -> state.clone(); } private MaintenanceTransitionConstraint createDefaultSpaceMaintenanceTransitionConstraint() { AnnotatedClusterState currentDefaultSpaceState = stateVersionTracker.getVersionedClusterStateBundle() .getDerivedBucketSpaceStates().getOrDefault(FixedBucketSpaces.defaultSpace(), AnnotatedClusterState.emptyState()); return UpEdgeMaintenanceTransitionConstraint.forPreviouslyPublishedState(currentDefaultSpaceState.getClusterState()); } /** * Move tasks that are dependent on the most recently generated state being published into * a completion queue with a dependency on the provided version argument. Once that version * has been ACKed by all distributors in the system, those tasks will be marked as completed. */ private void scheduleVersionDependentTasksForFutureCompletion(int completeAtVersion) { final long maxDeadlineTimePointMs = timer.getCurrentTimeInMillis() + options.getMaxDeferredTaskVersionWaitTime().toMillis(); for (RemoteClusterControllerTask task : tasksPendingStateRecompute) { context.log(logger, Level.INFO, task + " will be completed at version " + completeAtVersion); taskCompletionQueue.add(new VersionDependentTaskCompletion(completeAtVersion, task, maxDeadlineTimePointMs)); } tasksPendingStateRecompute.clear(); } private AnnotatedClusterState computeCurrentAnnotatedState() { ClusterStateGenerator.Params params = ClusterStateGenerator.Params.fromOptions(options); params.currentTimeInMilllis(timer.getCurrentTimeInMillis()) .cluster(cluster) .lowestObservedDistributionBitCount(stateVersionTracker.getLowestObservedDistributionBits()); return ClusterStateGenerator.generatedStateFrom(params); } private void emitEventsForAlteredStateEdges(final ClusterStateBundle fromState, final ClusterStateBundle toState, final long timeNowMs) { final List<Event> deltaEvents = EventDiffCalculator.computeEventDiff( EventDiffCalculator.params() .cluster(cluster) .fromState(fromState) .toState(toState) .currentTimeMs(timeNowMs) .maxMaintenanceGracePeriodTimeMs(options.storageNodeMaxTransitionTimeMs())); for (Event event : deltaEvents) { eventLog.add(event, isMaster); } emitStateAppliedEvents(timeNowMs, fromState.getBaselineClusterState(), toState.getBaselineClusterState()); } private void emitStateAppliedEvents(long timeNowMs, ClusterState fromClusterState, ClusterState toClusterState) { eventLog.add(new ClusterEvent( ClusterEvent.Type.SYSTEMSTATE, "New cluster state version " + toClusterState.getVersion() + ". Change from last: " + fromClusterState.getTextualDifference(toClusterState), timeNowMs), isMaster); if (toClusterState.getDistributionBitCount() != fromClusterState.getDistributionBitCount()) { eventLog.add(new ClusterEvent( ClusterEvent.Type.SYSTEMSTATE, "Altering distribution bits in system from " + fromClusterState.getDistributionBitCount() + " to " + toClusterState.getDistributionBitCount(), timeNowMs), isMaster); } } private boolean atFirstClusterStateSendTimeEdge() { if (!isMaster || systemStateBroadcaster.hasBroadcastedClusterStateBundle()) { return false; } return hasPassedFirstStateBroadcastTimePoint(timer.getCurrentTimeInMillis()); } private boolean mustRecomputeCandidateClusterState() { return stateChangeHandler.stateMayHaveChanged() || stateVersionTracker.bucketSpaceMergeCompletionStateHasChanged() || atFirstClusterStateSendTimeEdge(); } private boolean handleLeadershipEdgeTransitions() { boolean didWork = false; if (masterElectionHandler.isMaster()) { if ( ! isMaster) { stateChangeHandler.setStateChangedFlag(); systemStateBroadcaster.resetBroadcastedClusterStateBundle(); stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion()); ClusterStateBundle previousBundle = database.getLatestClusterStateBundle(); database.loadStartTimestamps(cluster); database.loadWantedStates(databaseContext); context.log(logger, Level.INFO, () -> String.format("Loaded previous cluster state bundle from ZooKeeper: %s", previousBundle)); stateVersionTracker.setClusterStateBundleRetrievedFromZooKeeper(previousBundle); eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became fleetcontroller master. Bumped version to " + stateVersionTracker.getCurrentVersion() + " to be in line.", timer.getCurrentTimeInMillis())); long currentTime = timer.getCurrentTimeInMillis(); firstAllowedStateBroadcast = currentTime + options.minTimeBeforeFirstSystemStateBroadcast; isMaster = true; inMasterMoratorium = true; context.log(logger, Level.FINE, () -> "At time " + currentTime + " we set first system state broadcast time to be " + options.minTimeBeforeFirstSystemStateBroadcast + " ms after at time " + firstAllowedStateBroadcast + "."); didWork = true; } if (wantedStateChanged) { didWork |= database.saveWantedStates(databaseContext); wantedStateChanged = false; } } else { dropLeadershipState(); } metricUpdater.updateMasterState(isMaster); return didWork; } private void dropLeadershipState() { if (isMaster) { eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer fleetcontroller master.", timer.getCurrentTimeInMillis())); firstAllowedStateBroadcast = Long.MAX_VALUE; failAllVersionDependentTasks(); } wantedStateChanged = false; isMaster = false; inMasterMoratorium = false; } @Override public void run() { controllerThreadId = Thread.currentThread().getId(); context.log(logger, Level.INFO, "Starting tick loop"); try { processingCycle = true; while (isRunning()) { tick(); } context.log(logger, Level.INFO, "Tick loop stopped"); } catch (InterruptedException e) { context.log(logger, Level.INFO, "Event thread stopped by interrupt exception: ", e); } catch (Throwable t) { t.printStackTrace(); context.log(logger, Level.SEVERE, "Fatal error killed fleet controller", t); synchronized (monitor) { running.set(false); } System.exit(1); } finally { prepareShutdownEdge(); } } private void prepareShutdownEdge() { running.set(false); failAllVersionDependentTasks(); synchronized (monitor) { monitor.notifyAll(); } } public DatabaseHandler.DatabaseContext databaseContext = new DatabaseHandler.DatabaseContext() { @Override public ContentCluster getCluster() { return cluster; } @Override public FleetController getFleetController() { return FleetController.this; } @Override public SlobrokListener getNodeAddedOrRemovedListener() { return FleetController.this; } @Override public NodeListener getNodeStateUpdateListener() { return FleetController.this; } }; public void waitForCompleteCycle(Duration timeout) { Instant endTime = Instant.now().plus(timeout); synchronized (monitor) { long wantedCycle = cycleCount + (processingCycle ? 2 : 1); waitingForCycle = true; try{ while (cycleCount < wantedCycle) { if (Instant.now().isAfter(endTime)) throw new IllegalStateException("Timed out waiting for cycle to complete. Not completed after " + timeout); if ( !isRunning() ) throw new IllegalStateException("Fleetcontroller not running. Will never complete cycles"); try{ monitor.wait(100); } catch (InterruptedException e) {} } } finally { waitingForCycle = false; } } } /** * This function might not be 100% threadsafe, as in theory cluster can be changing while accessed. * But it is only used in unit tests that should not trigger any thread issues. Don't want to add locks that reduce * live performance to remove a non-problem. */ public void waitForNodesInSlobrok(int distNodeCount, int storNodeCount, Duration timeout) throws InterruptedException { Instant endTime = Instant.now().plus(timeout); synchronized (monitor) { while (true) { int distCount = 0, storCount = 0; for (NodeInfo info : cluster.getNodeInfos()) { if (!info.isRpcAddressOutdated()) { if (info.isDistributor()) ++distCount; else ++storCount; } } if (distCount == distNodeCount && storCount == storNodeCount) return; Duration remainingTime = Duration.between(Instant.now(), endTime); if (remainingTime.isNegative()) { throw new IllegalStateException("Did not get all " + distNodeCount + " distributors and " + storNodeCount + " storage nodes registered in slobrok within timeout of " + timeout + ". (Got " + distCount + " distributors and " + storCount + " storage nodes)"); } monitor.wait(10); } } } public boolean hasZookeeperConnection() { return !database.isClosed(); } public int getSlobrokMirrorUpdates() { return ((SlobrokClient)nodeLookup).getMirror().updates(); } public ContentCluster getCluster() { return cluster; } public List<NodeEvent> getNodeEvents(Node n) { return eventLog.getNodeEvents(n); } public EventLog getEventLog() { return eventLog; } }
class FleetController implements NodeListener, SlobrokListener, SystemStateListener, Runnable, RemoteClusterControllerTaskScheduler { private static final Logger logger = Logger.getLogger(FleetController.class.getName()); private final FleetControllerContext context; private final Timer timer; private final Object monitor; private final EventLog eventLog; private final NodeLookup nodeLookup; private final ContentCluster cluster; private final Communicator communicator; private final NodeStateGatherer stateGatherer; private final StateChangeHandler stateChangeHandler; private final SystemStateBroadcaster systemStateBroadcaster; private final StateVersionTracker stateVersionTracker; private final StatusPageServerInterface statusPageServer; private final RpcServer rpcServer; private final DatabaseHandler database; private final MasterElectionHandler masterElectionHandler; private Thread runner = null; private final AtomicBoolean running = new AtomicBoolean(true); private FleetControllerOptions options; private FleetControllerOptions nextOptions; private final List<SystemStateListener> systemStateListeners = new CopyOnWriteArrayList<>(); private boolean processingCycle = false; private boolean wantedStateChanged = false; private long cycleCount = 0; private long lastMetricUpdateCycleCount = 0; private long nextStateSendTime = 0; private Long controllerThreadId = null; private boolean waitingForCycle = false; private final StatusPageServer.PatternRequestRouter statusRequestRouter = new StatusPageServer.PatternRequestRouter(); private final List<ClusterStateBundle> newStates = new ArrayList<>(); private final List<ClusterStateBundle> convergedStates = new ArrayList<>(); private final Queue<RemoteClusterControllerTask> remoteTasks = new LinkedList<>(); private final MetricUpdater metricUpdater; private boolean isMaster = false; private boolean inMasterMoratorium = false; private boolean isStateGatherer = false; private long firstAllowedStateBroadcast = Long.MAX_VALUE; private long tickStartTime = Long.MAX_VALUE; private final List<RemoteClusterControllerTask> tasksPendingStateRecompute = new ArrayList<>(); private final Queue<VersionDependentTaskCompletion> taskCompletionQueue = new ArrayDeque<>(); private Set<String> configuredBucketSpaces = Collections.emptySet(); private final RunDataExtractor dataExtractor = new RunDataExtractor() { @Override public FleetControllerOptions getOptions() { return options; } @Override public ContentCluster getCluster() { return cluster; } }; public FleetController(FleetControllerContext context, Timer timer, EventLog eventLog, ContentCluster cluster, NodeStateGatherer nodeStateGatherer, Communicator communicator, StatusPageServerInterface statusPage, RpcServer server, NodeLookup nodeLookup, DatabaseHandler database, StateChangeHandler stateChangeHandler, SystemStateBroadcaster systemStateBroadcaster, MasterElectionHandler masterElectionHandler, MetricUpdater metricUpdater, FleetControllerOptions options) { context.log(logger, Level.INFO, "Created"); this.context = context; this.timer = timer; this.monitor = timer; this.eventLog = eventLog; this.options = options; this.nodeLookup = nodeLookup; this.cluster = cluster; this.communicator = communicator; this.database = database; this.stateGatherer = nodeStateGatherer; this.stateChangeHandler = stateChangeHandler; this.systemStateBroadcaster = systemStateBroadcaster; this.stateVersionTracker = new StateVersionTracker(options.minMergeCompletionRatio); this.metricUpdater = metricUpdater; this.statusPageServer = statusPage; this.rpcServer = server; this.masterElectionHandler = masterElectionHandler; this.statusRequestRouter.addHandler( "^/node=([a-z]+)\\.(\\d+)$", new LegacyNodePageRequestHandler(timer, eventLog, cluster)); this.statusRequestRouter.addHandler( "^/state.*", new NodeHealthRequestHandler()); this.statusRequestRouter.addHandler( "^/clusterstate", new ClusterStateRequestHandler(stateVersionTracker)); this.statusRequestRouter.addHandler( "^/$", new LegacyIndexPageRequestHandler(timer, cluster, masterElectionHandler, stateVersionTracker, eventLog, timer.getCurrentTimeInMillis(), dataExtractor)); propagateOptions(); } public static FleetController create(FleetControllerOptions options, StatusPageServerInterface statusPageServer, MetricReporter metricReporter) throws Exception { var context = new FleetControllerContextImpl(options); var timer = new RealTimer(); var metricUpdater = new MetricUpdater(metricReporter, options.fleetControllerIndex, options.clusterName); var log = new EventLog(timer, metricUpdater); var cluster = new ContentCluster( options.clusterName, options.nodes, options.storageDistribution); var stateGatherer = new NodeStateGatherer(timer, timer, log); var communicator = new RPCCommunicator( RPCCommunicator.createRealSupervisor(), timer, options.fleetControllerIndex, options.nodeStateRequestTimeoutMS, options.nodeStateRequestTimeoutEarliestPercentage, options.nodeStateRequestTimeoutLatestPercentage, options.nodeStateRequestRoundTripTimeMaxSeconds); var database = new DatabaseHandler(context, new ZooKeeperDatabaseFactory(context), timer, options.zooKeeperServerAddress, timer); var lookUp = new SlobrokClient(context, timer); var stateGenerator = new StateChangeHandler(context, timer, log); var stateBroadcaster = new SystemStateBroadcaster(context, timer, timer); var masterElectionHandler = new MasterElectionHandler(context, options.fleetControllerIndex, options.fleetControllerCount, timer, timer); var controller = new FleetController(context, timer, log, cluster, stateGatherer, communicator, statusPageServer, null, lookUp, database, stateGenerator, stateBroadcaster, masterElectionHandler, metricUpdater, options); controller.start(); return controller; } public void start() { runner = new Thread(this); runner.start(); } public Object getMonitor() { return monitor; } public boolean isRunning() { return running.get(); } public boolean isMaster() { synchronized (monitor) { return isMaster; } } public ClusterState getClusterState() { synchronized (monitor) { return systemStateBroadcaster.getClusterState(); } } public ClusterStateBundle getClusterStateBundle() { synchronized (monitor) { return systemStateBroadcaster.getClusterStateBundle(); } } public void schedule(RemoteClusterControllerTask task) { synchronized (monitor) { context.log(logger, Level.FINE, "Scheduled remote task " + task.getClass().getName() + " for execution"); remoteTasks.add(task); } } /** Used for unit testing. */ public void addSystemStateListener(SystemStateListener listener) { systemStateListeners.add(listener); com.yahoo.vdslib.state.ClusterState state = getSystemState(); if (state == null) { throw new NullPointerException("Cluster state should never be null at this point"); } listener.handleNewPublishedState(ClusterStateBundle.ofBaselineOnly(AnnotatedClusterState.withoutAnnotations(state))); ClusterStateBundle convergedState = systemStateBroadcaster.getLastClusterStateBundleConverged(); if (convergedState != null) { listener.handleStateConvergedInCluster(convergedState); } } public FleetControllerOptions getOptions() { synchronized(monitor) { return options.clone(); } } public NodeState getReportedNodeState(Node n) { synchronized(monitor) { NodeInfo node = cluster.getNodeInfo(n); if (node == null) { throw new IllegalStateException("Did not find node " + n + " in cluster " + cluster); } return node.getReportedState(); } } public NodeState getWantedNodeState(Node n) { synchronized(monitor) { return cluster.getNodeInfo(n).getWantedState(); } } public com.yahoo.vdslib.state.ClusterState getSystemState() { synchronized(monitor) { return stateVersionTracker.getVersionedClusterState(); } } public int getRpcPort() { return rpcServer.getPort(); } public void shutdown() throws InterruptedException, java.io.IOException { if (runner != null && isRunning()) { context.log(logger, Level.INFO, "Joining event thread."); running.set(false); synchronized(monitor) { monitor.notifyAll(); } runner.join(); } context.log(logger, Level.INFO, "FleetController done shutting down event thread."); controllerThreadId = Thread.currentThread().getId(); database.shutdown(databaseContext); if (statusPageServer != null) { statusPageServer.shutdown(); } if (rpcServer != null) { rpcServer.shutdown(); } communicator.shutdown(); nodeLookup.shutdown(); } public void updateOptions(FleetControllerOptions options) { var newId = FleetControllerId.fromOptions(options); synchronized(monitor) { assert newId.equals(context.id()); context.log(logger, Level.INFO, "FleetController has new options"); nextOptions = options.clone(); monitor.notifyAll(); } } private void verifyInControllerThread() { if (controllerThreadId != null && controllerThreadId != Thread.currentThread().getId()) { throw new IllegalStateException("Function called from non-controller thread. Shouldn't happen."); } } private ClusterState latestCandidateClusterState() { return stateVersionTracker.getLatestCandidateState().getClusterState(); } @Override public void handleNewNodeState(NodeInfo node, NodeState newState) { verifyInControllerThread(); stateChangeHandler.handleNewReportedNodeState(latestCandidateClusterState(), node, newState, this); } @Override public void handleNewWantedNodeState(NodeInfo node, NodeState newState) { verifyInControllerThread(); wantedStateChanged = true; stateChangeHandler.proposeNewNodeState(stateVersionTracker.getVersionedClusterState(), node, newState); } @Override public void handleRemovedNode(Node node) { verifyInControllerThread(); wantedStateChanged = true; } @Override public void handleUpdatedHostInfo(NodeInfo nodeInfo, HostInfo newHostInfo) { verifyInControllerThread(); triggerBundleRecomputationIfResourceExhaustionStateChanged(nodeInfo, newHostInfo); stateVersionTracker.handleUpdatedHostInfo(nodeInfo, newHostInfo); } private void triggerBundleRecomputationIfResourceExhaustionStateChanged(NodeInfo nodeInfo, HostInfo newHostInfo) { if (!options.clusterFeedBlockEnabled) { return; } var calc = createResourceExhaustionCalculator(); var previouslyExhausted = calc.enumerateNodeResourceExhaustions(nodeInfo); var nowExhausted = calc.resourceExhaustionsFromHostInfo(nodeInfo, newHostInfo); if (!previouslyExhausted.equals(nowExhausted)) { context.log(logger, Level.FINE, () -> String.format("Triggering state recomputation due to change in cluster feed block: %s -> %s", previouslyExhausted, nowExhausted)); stateChangeHandler.setStateChangedFlag(); } } @Override public void handleNewNode(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleNewNode(node); } @Override public void handleMissingNode(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleMissingNode(stateVersionTracker.getVersionedClusterState(), node, this); } @Override public void handleNewRpcAddress(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleNewRpcAddress(node); } @Override public void handleReturnedRpcAddress(NodeInfo node) { verifyInControllerThread(); stateChangeHandler.handleReturnedRpcAddress(node); } @Override public void handleNewPublishedState(ClusterStateBundle stateBundle) { verifyInControllerThread(); ClusterState baselineState = stateBundle.getBaselineClusterState(); newStates.add(stateBundle); metricUpdater.updateClusterStateMetrics(cluster, baselineState, ResourceUsageStats.calculateFrom(cluster.getNodeInfos(), options.clusterFeedBlockLimit, stateBundle.getFeedBlock())); lastMetricUpdateCycleCount = cycleCount; systemStateBroadcaster.handleNewClusterStates(stateBundle); if (isMaster) { storeClusterStateMetaDataToZooKeeper(stateBundle); } } private boolean maybePublishOldMetrics() { verifyInControllerThread(); if (isMaster() && cycleCount > 300 + lastMetricUpdateCycleCount) { ClusterStateBundle stateBundle = stateVersionTracker.getVersionedClusterStateBundle(); ClusterState baselineState = stateBundle.getBaselineClusterState(); metricUpdater.updateClusterStateMetrics(cluster, baselineState, ResourceUsageStats.calculateFrom(cluster.getNodeInfos(), options.clusterFeedBlockLimit, stateBundle.getFeedBlock())); lastMetricUpdateCycleCount = cycleCount; return true; } else { return false; } } private void storeClusterStateMetaDataToZooKeeper(ClusterStateBundle stateBundle) { try { database.saveLatestSystemStateVersion(databaseContext, stateBundle.getVersion()); database.saveLatestClusterStateBundle(databaseContext, stateBundle); } catch (InterruptedException e) { throw new RuntimeException("ZooKeeper write interrupted", e); } } /** * This function gives data of the current state in master election. * The keys in the given map are indices of fleet controllers. * The values are what fleetcontroller that fleetcontroller wants to * become master. * * If more than half the fleetcontrollers want a node to be master and * that node also wants itself as master, that node is the single master. * If this condition is not met, there is currently no master. */ public void handleFleetData(Map<Integer, Integer> data) { verifyInControllerThread(); context.log(logger, Level.FINEST, "Sending fleet data event on to master election handler"); metricUpdater.updateMasterElectionMetrics(data); masterElectionHandler.handleFleetData(data); } /** * Called when we can no longer contact database. */ public void lostDatabaseConnection() { verifyInControllerThread(); boolean wasMaster = isMaster; masterElectionHandler.lostDatabaseConnection(); if (wasMaster) { dropLeadershipState(); metricUpdater.updateMasterState(false); } } private void failAllVersionDependentTasks() { tasksPendingStateRecompute.forEach(task -> { task.handleFailure(RemoteClusterControllerTask.Failure.of( RemoteClusterControllerTask.FailureCondition.LEADERSHIP_LOST)); task.notifyCompleted(); }); tasksPendingStateRecompute.clear(); taskCompletionQueue.forEach(task -> { task.getTask().handleFailure(RemoteClusterControllerTask.Failure.of( RemoteClusterControllerTask.FailureCondition.LEADERSHIP_LOST)); task.getTask().notifyCompleted(); }); taskCompletionQueue.clear(); } /** Called when all distributors have acked newest cluster state version. */ public void handleAllDistributorsInSync(DatabaseHandler database, DatabaseHandler.DatabaseContext dbContext) throws InterruptedException { Set<ConfiguredNode> nodes = new HashSet<>(cluster.clusterInfo().getConfiguredNodes().values()); var currentBundle = stateVersionTracker.getVersionedClusterStateBundle(); context.log(logger, Level.FINE, () -> String.format("All distributors have ACKed cluster state version %d", currentBundle.getVersion())); stateChangeHandler.handleAllDistributorsInSync(currentBundle.getBaselineClusterState(), nodes, database, dbContext); convergedStates.add(currentBundle); } private boolean changesConfiguredNodeSet(Collection<ConfiguredNode> newNodes) { if (newNodes.size() != cluster.getConfiguredNodes().size()) return true; if (! cluster.getConfiguredNodes().values().containsAll(newNodes)) return true; for (ConfiguredNode node : newNodes) { if (node.retired() != cluster.getConfiguredNodes().get(node.index()).retired()) { return true; } } return false; } /** This is called when the options field has been set to a new set of options */ private void propagateOptions() { verifyInControllerThread(); selfTerminateIfConfiguredNodeIndexHasChanged(); if (changesConfiguredNodeSet(options.nodes)) { cluster.setSlobrokGenerationCount(0); } configuredBucketSpaces = Set.of(FixedBucketSpaces.defaultSpace(), FixedBucketSpaces.globalSpace()); stateVersionTracker.setMinMergeCompletionRatio(options.minMergeCompletionRatio); communicator.propagateOptions(options); if (nodeLookup instanceof SlobrokClient) { ((SlobrokClient) nodeLookup).setSlobrokConnectionSpecs(options.slobrokConnectionSpecs); } eventLog.setMaxSize(options.eventLogMaxSize, options.eventNodeLogMaxSize); cluster.setPollingFrequency(options.statePollingFrequency); cluster.setDistribution(options.storageDistribution); cluster.setNodes(options.nodes, databaseContext.getNodeStateUpdateListener()); database.setZooKeeperAddress(options.zooKeeperServerAddress, databaseContext); database.setZooKeeperSessionTimeout(options.zooKeeperSessionTimeout, databaseContext); stateGatherer.setMaxSlobrokDisconnectGracePeriod(options.maxSlobrokDisconnectGracePeriod); stateGatherer.setNodeStateRequestTimeout(options.nodeStateRequestTimeoutMS); stateChangeHandler.reconfigureFromOptions(options); stateChangeHandler.setStateChangedFlag(); masterElectionHandler.setFleetControllerCount(options.fleetControllerCount); masterElectionHandler.setMasterZooKeeperCooldownPeriod(options.masterZooKeeperCooldownPeriod); masterElectionHandler.setUsingZooKeeper(options.zooKeeperServerAddress != null && !options.zooKeeperServerAddress.isEmpty()); if (rpcServer != null) { rpcServer.setMasterElectionHandler(masterElectionHandler); try{ rpcServer.setSlobrokConnectionSpecs(options.slobrokConnectionSpecs, options.rpcPort); } catch (ListenFailedException e) { context.log(logger, Level.WARNING, "Failed to bind RPC server to port " + options.rpcPort + ". This may be natural if cluster has altered the services running on this node: " + e.getMessage()); } catch (Exception e) { context.log(logger, Level.WARNING, "Failed to initialize RPC server socket: " + e.getMessage()); } } if (statusPageServer != null) { try{ statusPageServer.setPort(options.httpPort); } catch (Exception e) { context.log(logger, Level.WARNING, "Failed to initialize status server socket. This may be natural if cluster has altered the services running on this node: " + e.getMessage()); } } long currentTime = timer.getCurrentTimeInMillis(); nextStateSendTime = Math.min(currentTime + options.minTimeBetweenNewSystemStates, nextStateSendTime); } private void selfTerminateIfConfiguredNodeIndexHasChanged() { var newId = new FleetControllerId(options.clusterName, options.fleetControllerIndex); if (!newId.equals(context.id())) { context.log(logger, Level.WARNING, context.id() + " got new configuration for " + newId + ". We do not support doing this live; " + "immediately exiting now to force new configuration"); prepareShutdownEdge(); System.exit(1); } } public StatusPageResponse fetchStatusPage(StatusPageServer.HttpRequest httpRequest) { verifyInControllerThread(); StatusPageResponse.ResponseCode responseCode; String message; final String hiddenMessage; try { StatusPageServer.RequestHandler handler = statusRequestRouter.resolveHandler(httpRequest); if (handler == null) { throw new FileNotFoundException("No handler found for request: " + httpRequest.getPath()); } return handler.handle(httpRequest); } catch (FileNotFoundException e) { responseCode = StatusPageResponse.ResponseCode.NOT_FOUND; message = e.getMessage(); hiddenMessage = ""; } catch (Exception e) { responseCode = StatusPageResponse.ResponseCode.INTERNAL_SERVER_ERROR; message = "Internal Server Error"; hiddenMessage = ExceptionUtils.getStackTraceAsString(e); context.log(logger, Level.FINE, () -> "Unknown exception thrown for request " + httpRequest.getRequest() + ": " + hiddenMessage); } TimeZone tz = TimeZone.getTimeZone("UTC"); long currentTime = timer.getCurrentTimeInMillis(); StatusPageResponse response = new StatusPageResponse(); StringBuilder content = new StringBuilder(); response.setContentType("text/html"); response.setResponseCode(responseCode); content.append("<!-- Answer to request " + httpRequest.getRequest() + " -->\n"); content.append("<p>UTC time when creating this page: ").append(RealTimer.printDateNoMilliSeconds(currentTime, tz)).append("</p>"); response.writeHtmlHeader(content, message); response.writeHtmlFooter(content, hiddenMessage); response.writeContent(content.toString()); return response; } public void tick() throws Exception { synchronized (monitor) { boolean didWork; didWork = metricUpdater.forWork("doNextZooKeeperTask", () -> database.doNextZooKeeperTask(databaseContext)); didWork |= metricUpdater.forWork("updateMasterElectionState", this::updateMasterElectionState); didWork |= metricUpdater.forWork("handleLeadershipEdgeTransitions", this::handleLeadershipEdgeTransitions); stateChangeHandler.setMaster(isMaster); if ( ! isRunning()) { return; } didWork |= metricUpdater.forWork("stateGatherer-processResponses", () -> stateGatherer.processResponses(this)); if ( ! isRunning()) { return; } if (masterElectionHandler.isAmongNthFirst(options.stateGatherCount)) { didWork |= resyncLocallyCachedState(); } else { stepDownAsStateGatherer(); } if ( ! isRunning()) { return; } didWork |= metricUpdater.forWork("systemStateBroadcaster-processResponses", systemStateBroadcaster::processResponses); if ( ! isRunning()) { return; } if (isMaster) { didWork |= metricUpdater.forWork("broadcastClusterStateToEligibleNodes", this::broadcastClusterStateToEligibleNodes); systemStateBroadcaster.checkIfClusterStateIsAckedByAllDistributors(database, databaseContext, this); } if ( ! isRunning()) { return; } didWork |= metricUpdater.forWork("processAnyPendingStatusPageRequest", this::processAnyPendingStatusPageRequest); if ( ! isRunning()) { return; } if (rpcServer != null) { didWork |= metricUpdater.forWork("handleRpcRequests", () -> rpcServer.handleRpcRequests(cluster, consolidatedClusterState(), this)); } if ( ! isRunning()) { return; } didWork |= metricUpdater.forWork("processNextQueuedRemoteTask", this::processNextQueuedRemoteTask); didWork |= metricUpdater.forWork("completeSatisfiedVersionDependentTasks", this::completeSatisfiedVersionDependentTasks); didWork |= metricUpdater.forWork("maybePublishOldMetrics", this::maybePublishOldMetrics); processingCycle = false; ++cycleCount; long tickStopTime = timer.getCurrentTimeInMillis(); if (tickStopTime >= tickStartTime) { metricUpdater.addTickTime(tickStopTime - tickStartTime, didWork); } monitor.wait(didWork || waitingForCycle ? 1 : options.cycleWaitTime); if ( ! isRunning()) { return; } tickStartTime = timer.getCurrentTimeInMillis(); processingCycle = true; if (nextOptions != null) { switchToNewConfig(); } } if (isRunning()) { propagateNewStatesToListeners(); } } private boolean updateMasterElectionState() { try { return masterElectionHandler.watchMasterElection(database, databaseContext); } catch (InterruptedException e) { throw new RuntimeException(e); } catch (Exception e) { context.log(logger, Level.WARNING, "Failed to watch master election: " + e); } return false; } private void stepDownAsStateGatherer() { if (isStateGatherer) { cluster.clearStates(); eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer a node state gatherer.", timer.getCurrentTimeInMillis())); } isStateGatherer = false; } private void switchToNewConfig() { options = nextOptions; nextOptions = null; try { propagateOptions(); } catch (Exception e) { context.log(logger, Level.SEVERE, "Failed to handle new fleet controller config", e); } } private boolean processAnyPendingStatusPageRequest() { if (statusPageServer != null) { StatusPageServer.HttpRequest statusRequest = statusPageServer.getCurrentHttpRequest(); if (statusRequest != null) { statusPageServer.answerCurrentStatusRequest(fetchStatusPage(statusRequest)); return true; } } return false; } private boolean broadcastClusterStateToEligibleNodes() { if (database.hasPendingClusterStateMetaDataStore()) { context.log(logger, Level.FINE, "Can't publish current cluster state as it has one or more pending ZooKeeper stores"); return false; } boolean sentAny = false; long currentTime = timer.getCurrentTimeInMillis(); if ((currentTime >= firstAllowedStateBroadcast || cluster.allStatesReported()) && currentTime >= nextStateSendTime) { if (inMasterMoratorium) { context.log(logger, Level.INFO, currentTime < firstAllowedStateBroadcast ? "Master moratorium complete: all nodes have reported in" : "Master moratorium complete: timed out waiting for all nodes to report in"); firstAllowedStateBroadcast = currentTime; inMasterMoratorium = false; } sentAny = systemStateBroadcaster.broadcastNewStateBundleIfRequired( databaseContext, communicator, database.getLastKnownStateBundleVersionWrittenBySelf()); if (sentAny) { nextStateSendTime = currentTime + options.minTimeBetweenNewSystemStates; } } sentAny |= systemStateBroadcaster.broadcastStateActivationsIfRequired(databaseContext, communicator); return sentAny; } private void propagateNewStatesToListeners() { if ( ! newStates.isEmpty()) { synchronized (systemStateListeners) { for (ClusterStateBundle stateBundle : newStates) { for (SystemStateListener listener : systemStateListeners) { listener.handleNewPublishedState(stateBundle); } } newStates.clear(); } } if ( ! convergedStates.isEmpty()) { synchronized (systemStateListeners) { for (ClusterStateBundle stateBundle : convergedStates) { for (SystemStateListener listener : systemStateListeners) { listener.handleStateConvergedInCluster(stateBundle); } } convergedStates.clear(); } } } private boolean processNextQueuedRemoteTask() { metricUpdater.updateRemoteTaskQueueSize(remoteTasks.size()); RemoteClusterControllerTask task = remoteTasks.poll(); if (task == null) { return false; } final RemoteClusterControllerTask.Context taskContext = createRemoteTaskProcessingContext(); context.log(logger, Level.FINEST, () -> String.format("Processing remote task of type '%s'", task.getClass().getName())); task.doRemoteFleetControllerTask(taskContext); if (taskMayBeCompletedImmediately(task)) { context.log(logger, Level.FINEST, () -> String.format("Done processing remote task of type '%s'", task.getClass().getName())); task.notifyCompleted(); } else { context.log(logger, Level.FINEST, () -> String.format("Remote task of type '%s' queued until state recomputation", task.getClass().getName())); tasksPendingStateRecompute.add(task); } return true; } private boolean taskMayBeCompletedImmediately(RemoteClusterControllerTask task) { return (!task.hasVersionAckDependency() || task.isFailed() || !isMaster); } private RemoteClusterControllerTask.Context createRemoteTaskProcessingContext() { final RemoteClusterControllerTask.Context context = new RemoteClusterControllerTask.Context(); context.cluster = cluster; context.currentConsolidatedState = consolidatedClusterState(); context.publishedClusterStateBundle = stateVersionTracker.getVersionedClusterStateBundle(); context.masterInfo = new MasterInterface() { @Override public boolean isMaster() { return isMaster; } @Override public Integer getMaster() { return masterElectionHandler.getMaster(); } @Override public boolean inMasterMoratorium() { return inMasterMoratorium; } }; context.nodeListener = this; context.slobrokListener = this; return context; } private static long effectiveActivatedStateVersion(NodeInfo nodeInfo, ClusterStateBundle bundle) { return bundle.deferredActivation() ? nodeInfo.getClusterStateVersionActivationAcked() : nodeInfo.getClusterStateVersionBundleAcknowledged(); } private List<Node> enumerateNodesNotYetAckedAtLeastVersion(long version) { var bundle = systemStateBroadcaster.getClusterStateBundle(); if (bundle == null) { return List.of(); } return cluster.getNodeInfos().stream(). filter(n -> effectiveActivatedStateVersion(n, bundle) < version). map(NodeInfo::getNode). collect(Collectors.toList()); } private static <E> String stringifyListWithLimits(List<E> list, int limit) { if (list.size() > limit) { var sub = list.subList(0, limit); return String.format("%s (... and %d more)", sub.stream().map(E::toString).collect(Collectors.joining(", ")), list.size() - limit); } else { return list.stream().map(E::toString).collect(Collectors.joining(", ")); } } private String buildNodesNotYetConvergedMessage(long taskConvergeVersion) { var nodes = enumerateNodesNotYetAckedAtLeastVersion(taskConvergeVersion); if (nodes.isEmpty()) { return ""; } return String.format("the following nodes have not converged to at least version %d: %s", taskConvergeVersion, stringifyListWithLimits(nodes, options.maxDivergentNodesPrintedInTaskErrorMessages)); } private boolean completeSatisfiedVersionDependentTasks() { int publishedVersion = systemStateBroadcaster.lastClusterStateVersionInSync(); long queueSizeBefore = taskCompletionQueue.size(); final long now = timer.getCurrentTimeInMillis(); while (!taskCompletionQueue.isEmpty()) { VersionDependentTaskCompletion taskCompletion = taskCompletionQueue.peek(); if (publishedVersion >= taskCompletion.getMinimumVersion()) { context.log(logger, Level.FINE, () -> String.format("Deferred task of type '%s' has minimum version %d, published is %d; completing", taskCompletion.getTask().getClass().getName(), taskCompletion.getMinimumVersion(), publishedVersion)); taskCompletion.getTask().notifyCompleted(); taskCompletionQueue.remove(); } else if (taskCompletion.getDeadlineTimePointMs() <= now) { var details = buildNodesNotYetConvergedMessage(taskCompletion.getMinimumVersion()); context.log(logger, Level.WARNING, () -> String.format("Deferred task of type '%s' has exceeded wait deadline; completing with failure (details: %s)", taskCompletion.getTask().getClass().getName(), details)); taskCompletion.getTask().handleFailure(RemoteClusterControllerTask.Failure.of( RemoteClusterControllerTask.FailureCondition.DEADLINE_EXCEEDED, details)); taskCompletion.getTask().notifyCompleted(); taskCompletionQueue.remove(); } else { break; } } return (taskCompletionQueue.size() != queueSizeBefore); } /** * A "consolidated" cluster state is guaranteed to have up-to-date information on which nodes are * up or down even when the whole cluster is down. The regular, published cluster state is not * normally updated to reflect node events when the cluster is down. */ ClusterState consolidatedClusterState() { final ClusterState publishedState = stateVersionTracker.getVersionedClusterState(); if (publishedState.getClusterState() == State.UP) { return publishedState; } final ClusterState current = stateVersionTracker.getLatestCandidateState().getClusterState().clone(); current.setVersion(publishedState.getVersion()); return current; } /* System test observations: - a node that stops normally (U -> S) then goes down erroneously triggers premature crash handling - long time before content node state convergence (though this seems to be the case for legacy impl as well) */ private boolean resyncLocallyCachedState() { boolean didWork = false; if ( ! isMaster && cycleCount % 100 == 0) { didWork = metricUpdater.forWork("loadWantedStates", () -> database.loadWantedStates(databaseContext)); didWork |= metricUpdater.forWork("loadStartTimestamps", () -> database.loadStartTimestamps(cluster)); } didWork |= metricUpdater.forWork("updateCluster", () -> nodeLookup.updateCluster(cluster, this)); didWork |= metricUpdater.forWork("sendMessages", () -> stateGatherer.sendMessages(cluster, communicator, this)); didWork |= metricUpdater.forWork( "watchTimers", () -> stateChangeHandler.watchTimers(cluster, stateVersionTracker.getLatestCandidateState().getClusterState(), this)); didWork |= metricUpdater.forWork("recomputeClusterStateIfRequired", this::recomputeClusterStateIfRequired); if ( ! isStateGatherer) { if ( ! isMaster) { eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became node state gatherer as we are fleetcontroller master candidate.", timer.getCurrentTimeInMillis())); stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion()); stateChangeHandler.setStateChangedFlag(); } } isStateGatherer = true; return didWork; } private void invokeCandidateStateListeners(ClusterStateBundle candidateBundle) { systemStateListeners.forEach(listener -> listener.handleNewCandidateState(candidateBundle)); } private boolean hasPassedFirstStateBroadcastTimePoint(long timeNowMs) { return timeNowMs >= firstAllowedStateBroadcast || cluster.allStatesReported(); } private boolean recomputeClusterStateIfRequired() { boolean stateWasChanged = false; if (mustRecomputeCandidateClusterState()) { stateChangeHandler.unsetStateChangedFlag(); final AnnotatedClusterState candidate = computeCurrentAnnotatedState(); final ClusterStateBundle candidateBundle = ClusterStateBundle.builder(candidate) .bucketSpaces(configuredBucketSpaces) .stateDeriver(createBucketSpaceStateDeriver()) .deferredActivation(options.enableTwoPhaseClusterStateActivation) .feedBlock(createResourceExhaustionCalculator() .inferContentClusterFeedBlockOrNull(cluster.getNodeInfos())) .deriveAndBuild(); stateVersionTracker.updateLatestCandidateStateBundle(candidateBundle); invokeCandidateStateListeners(candidateBundle); final long timeNowMs = timer.getCurrentTimeInMillis(); if (hasPassedFirstStateBroadcastTimePoint(timeNowMs) && (stateVersionTracker.candidateChangedEnoughFromCurrentToWarrantPublish() || stateVersionTracker.hasReceivedNewVersionFromZooKeeper())) { final ClusterStateBundle before = stateVersionTracker.getVersionedClusterStateBundle(); stateVersionTracker.promoteCandidateToVersionedState(timeNowMs); emitEventsForAlteredStateEdges(before, stateVersionTracker.getVersionedClusterStateBundle(), timeNowMs); handleNewPublishedState(stateVersionTracker.getVersionedClusterStateBundle()); stateWasChanged = true; } } /* * This works transparently for tasks that end up changing the current cluster state (i.e. * requiring a new state to be published) and for those whose changes are no-ops (because * the changes they request are already part of the current state). In the former case the * tasks will depend on the version that was generated based upon them. In the latter case * the tasks will depend on the version that is already published (or in the process of * being published). */ scheduleVersionDependentTasksForFutureCompletion(stateVersionTracker.getCurrentVersion()); return stateWasChanged; } private ClusterStateDeriver createBucketSpaceStateDeriver() { if (options.clusterHasGlobalDocumentTypes) { return new MaintenanceWhenPendingGlobalMerges(stateVersionTracker.createMergePendingChecker(), createDefaultSpaceMaintenanceTransitionConstraint()); } else { return createIdentityClonedBucketSpaceStateDeriver(); } } private ResourceExhaustionCalculator createResourceExhaustionCalculator() { return new ResourceExhaustionCalculator( options.clusterFeedBlockEnabled, options.clusterFeedBlockLimit, stateVersionTracker.getLatestCandidateStateBundle().getFeedBlockOrNull(), options.clusterFeedBlockNoiseLevel); } private static ClusterStateDeriver createIdentityClonedBucketSpaceStateDeriver() { return (state, space) -> state.clone(); } private MaintenanceTransitionConstraint createDefaultSpaceMaintenanceTransitionConstraint() { AnnotatedClusterState currentDefaultSpaceState = stateVersionTracker.getVersionedClusterStateBundle() .getDerivedBucketSpaceStates().getOrDefault(FixedBucketSpaces.defaultSpace(), AnnotatedClusterState.emptyState()); return UpEdgeMaintenanceTransitionConstraint.forPreviouslyPublishedState(currentDefaultSpaceState.getClusterState()); } /** * Move tasks that are dependent on the most recently generated state being published into * a completion queue with a dependency on the provided version argument. Once that version * has been ACKed by all distributors in the system, those tasks will be marked as completed. */ private void scheduleVersionDependentTasksForFutureCompletion(int completeAtVersion) { final long maxDeadlineTimePointMs = timer.getCurrentTimeInMillis() + options.getMaxDeferredTaskVersionWaitTime().toMillis(); for (RemoteClusterControllerTask task : tasksPendingStateRecompute) { context.log(logger, Level.INFO, task + " will be completed at version " + completeAtVersion); taskCompletionQueue.add(new VersionDependentTaskCompletion(completeAtVersion, task, maxDeadlineTimePointMs)); } tasksPendingStateRecompute.clear(); } private AnnotatedClusterState computeCurrentAnnotatedState() { ClusterStateGenerator.Params params = ClusterStateGenerator.Params.fromOptions(options); params.currentTimeInMilllis(timer.getCurrentTimeInMillis()) .cluster(cluster) .lowestObservedDistributionBitCount(stateVersionTracker.getLowestObservedDistributionBits()); return ClusterStateGenerator.generatedStateFrom(params); } private void emitEventsForAlteredStateEdges(final ClusterStateBundle fromState, final ClusterStateBundle toState, final long timeNowMs) { final List<Event> deltaEvents = EventDiffCalculator.computeEventDiff( EventDiffCalculator.params() .cluster(cluster) .fromState(fromState) .toState(toState) .currentTimeMs(timeNowMs) .maxMaintenanceGracePeriodTimeMs(options.storageNodeMaxTransitionTimeMs())); for (Event event : deltaEvents) { eventLog.add(event, isMaster); } emitStateAppliedEvents(timeNowMs, fromState.getBaselineClusterState(), toState.getBaselineClusterState()); } private void emitStateAppliedEvents(long timeNowMs, ClusterState fromClusterState, ClusterState toClusterState) { eventLog.add(new ClusterEvent( ClusterEvent.Type.SYSTEMSTATE, "New cluster state version " + toClusterState.getVersion() + ". Change from last: " + fromClusterState.getTextualDifference(toClusterState), timeNowMs), isMaster); if (toClusterState.getDistributionBitCount() != fromClusterState.getDistributionBitCount()) { eventLog.add(new ClusterEvent( ClusterEvent.Type.SYSTEMSTATE, "Altering distribution bits in system from " + fromClusterState.getDistributionBitCount() + " to " + toClusterState.getDistributionBitCount(), timeNowMs), isMaster); } } private boolean atFirstClusterStateSendTimeEdge() { if (!isMaster || systemStateBroadcaster.hasBroadcastedClusterStateBundle()) { return false; } return hasPassedFirstStateBroadcastTimePoint(timer.getCurrentTimeInMillis()); } private boolean mustRecomputeCandidateClusterState() { return stateChangeHandler.stateMayHaveChanged() || stateVersionTracker.bucketSpaceMergeCompletionStateHasChanged() || atFirstClusterStateSendTimeEdge(); } private boolean handleLeadershipEdgeTransitions() { boolean didWork = false; if (masterElectionHandler.isMaster()) { if ( ! isMaster) { stateChangeHandler.setStateChangedFlag(); systemStateBroadcaster.resetBroadcastedClusterStateBundle(); stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion()); ClusterStateBundle previousBundle = database.getLatestClusterStateBundle(); database.loadStartTimestamps(cluster); database.loadWantedStates(databaseContext); context.log(logger, Level.INFO, () -> String.format("Loaded previous cluster state bundle from ZooKeeper: %s", previousBundle)); stateVersionTracker.setClusterStateBundleRetrievedFromZooKeeper(previousBundle); eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became fleetcontroller master. Bumped version to " + stateVersionTracker.getCurrentVersion() + " to be in line.", timer.getCurrentTimeInMillis())); long currentTime = timer.getCurrentTimeInMillis(); firstAllowedStateBroadcast = currentTime + options.minTimeBeforeFirstSystemStateBroadcast; isMaster = true; inMasterMoratorium = true; context.log(logger, Level.FINE, () -> "At time " + currentTime + " we set first system state broadcast time to be " + options.minTimeBeforeFirstSystemStateBroadcast + " ms after at time " + firstAllowedStateBroadcast + "."); didWork = true; } if (wantedStateChanged) { didWork |= database.saveWantedStates(databaseContext); wantedStateChanged = false; } } else { dropLeadershipState(); } metricUpdater.updateMasterState(isMaster); return didWork; } private void dropLeadershipState() { if (isMaster) { eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node is no longer fleetcontroller master.", timer.getCurrentTimeInMillis())); firstAllowedStateBroadcast = Long.MAX_VALUE; failAllVersionDependentTasks(); } wantedStateChanged = false; isMaster = false; inMasterMoratorium = false; } @Override public void run() { controllerThreadId = Thread.currentThread().getId(); context.log(logger, Level.INFO, "Starting tick loop"); try { processingCycle = true; while (isRunning()) { tick(); } context.log(logger, Level.INFO, "Tick loop stopped"); } catch (InterruptedException e) { context.log(logger, Level.INFO, "Event thread stopped by interrupt exception: ", e); } catch (Throwable t) { t.printStackTrace(); context.log(logger, Level.SEVERE, "Fatal error killed fleet controller", t); synchronized (monitor) { running.set(false); } System.exit(1); } finally { prepareShutdownEdge(); } } private void prepareShutdownEdge() { running.set(false); failAllVersionDependentTasks(); synchronized (monitor) { monitor.notifyAll(); } } public DatabaseHandler.DatabaseContext databaseContext = new DatabaseHandler.DatabaseContext() { @Override public ContentCluster getCluster() { return cluster; } @Override public FleetController getFleetController() { return FleetController.this; } @Override public SlobrokListener getNodeAddedOrRemovedListener() { return FleetController.this; } @Override public NodeListener getNodeStateUpdateListener() { return FleetController.this; } }; public void waitForCompleteCycle(Duration timeout) { Instant endTime = Instant.now().plus(timeout); synchronized (monitor) { long wantedCycle = cycleCount + (processingCycle ? 2 : 1); waitingForCycle = true; try{ while (cycleCount < wantedCycle) { if (Instant.now().isAfter(endTime)) throw new IllegalStateException("Timed out waiting for cycle to complete. Not completed after " + timeout); if ( !isRunning() ) throw new IllegalStateException("Fleetcontroller not running. Will never complete cycles"); try{ monitor.wait(100); } catch (InterruptedException e) {} } } finally { waitingForCycle = false; } } } /** * This function might not be 100% threadsafe, as in theory cluster can be changing while accessed. * But it is only used in unit tests that should not trigger any thread issues. Don't want to add locks that reduce * live performance to remove a non-problem. */ public void waitForNodesInSlobrok(int distNodeCount, int storNodeCount, Duration timeout) throws InterruptedException { Instant endTime = Instant.now().plus(timeout); synchronized (monitor) { while (true) { int distCount = 0, storCount = 0; for (NodeInfo info : cluster.getNodeInfos()) { if (!info.isRpcAddressOutdated()) { if (info.isDistributor()) ++distCount; else ++storCount; } } if (distCount == distNodeCount && storCount == storNodeCount) return; if (Instant.now().isAfter(endTime)) { throw new IllegalStateException("Did not get all " + distNodeCount + " distributors and " + storNodeCount + " storage nodes registered in slobrok within timeout of " + timeout + ". (Got " + distCount + " distributors and " + storCount + " storage nodes)"); } monitor.wait(10); } } } public boolean hasZookeeperConnection() { return !database.isClosed(); } public int getSlobrokMirrorUpdates() { return ((SlobrokClient)nodeLookup).getMirror().updates(); } public ContentCluster getCluster() { return cluster; } public List<NodeEvent> getNodeEvents(Node n) { return eventLog.getNodeEvents(n); } public EventLog getEventLog() { return eventLog; } }
double timeoutInSeconds() ?
void testGetSystemState() throws Exception { LogFormatter.initializeLogging(); startingTest("RpcServerTest::testGetSystemState"); FleetControllerOptions options = defaultOptions("mycluster"); setUpFleetController(true, options); setUpVdsNodes(true, new DummyVdsNodeOptions()); waitForStableSystem(); assertTrue(nodes.get(0).isDistributor()); log.log(Level.INFO, "Disconnecting distributor 0. Waiting for state to reflect change."); nodes.get(0).disconnect(); nodes.get(19).disconnect(); fleetController.waitForNodesInSlobrok(9, 9, timeout()); timer.advanceTime(options.nodeStateRequestTimeoutMS + options.maxSlobrokDisconnectGracePeriod); wait(new WaitCondition.StateWait(fleetController, fleetController.getMonitor()) { @Override public String isConditionMet() { if (currentState == null) { return "No cluster state defined yet"; } NodeState distState = currentState.getNodeState(new Node(NodeType.DISTRIBUTOR, 0)); if (distState.getState() != State.DOWN) { return "Distributor not detected down yet: " + currentState.toString(); } NodeState storState = currentState.getNodeState(new Node(NodeType.STORAGE, 9)); if (!storState.getState().oneOf("md")) { return "Storage node not detected down yet: " + currentState.toString(); } return null; } }, null, timeout()); int rpcPort = fleetController.getRpcPort(); supervisor = new Supervisor(new Transport()); Target connection = supervisor.connect(new Spec("localhost", rpcPort)); assertTrue(connection.isValid()); Request req = new Request("getSystemState"); connection.invokeSync(req, timeout().getSeconds()); assertEquals(ErrorCode.NONE, req.errorCode(), req.toString()); assertTrue(req.checkReturnTypes("ss"), req.toString()); String systemState = req.returnValues().get(1).asString(); ClusterState retrievedClusterState = new ClusterState(systemState); assertEquals(State.DOWN, retrievedClusterState.getNodeState(new Node(NodeType.DISTRIBUTOR, 0)).getState(), systemState); assertTrue(retrievedClusterState.getNodeState(new Node(NodeType.STORAGE, 9)).getState().oneOf("md"), systemState); }
connection.invokeSync(req, timeout().getSeconds());
void testGetSystemState() throws Exception { LogFormatter.initializeLogging(); startingTest("RpcServerTest::testGetSystemState"); FleetControllerOptions options = defaultOptions("mycluster"); setUpFleetController(true, options); setUpVdsNodes(true, new DummyVdsNodeOptions()); waitForStableSystem(); assertTrue(nodes.get(0).isDistributor()); log.log(Level.INFO, "Disconnecting distributor 0. Waiting for state to reflect change."); nodes.get(0).disconnect(); nodes.get(19).disconnect(); fleetController.waitForNodesInSlobrok(9, 9, timeout()); timer.advanceTime(options.nodeStateRequestTimeoutMS + options.maxSlobrokDisconnectGracePeriod); wait(new WaitCondition.StateWait(fleetController, fleetController.getMonitor()) { @Override public String isConditionMet() { if (currentState == null) { return "No cluster state defined yet"; } NodeState distState = currentState.getNodeState(new Node(NodeType.DISTRIBUTOR, 0)); if (distState.getState() != State.DOWN) { return "Distributor not detected down yet: " + currentState.toString(); } NodeState storState = currentState.getNodeState(new Node(NodeType.STORAGE, 9)); if (!storState.getState().oneOf("md")) { return "Storage node not detected down yet: " + currentState.toString(); } return null; } }, null, timeout()); int rpcPort = fleetController.getRpcPort(); supervisor = new Supervisor(new Transport()); Target connection = supervisor.connect(new Spec("localhost", rpcPort)); assertTrue(connection.isValid()); Request req = new Request("getSystemState"); connection.invokeSync(req, timeoutInSeconds()); assertEquals(ErrorCode.NONE, req.errorCode(), req.toString()); assertTrue(req.checkReturnTypes("ss"), req.toString()); String systemState = req.returnValues().get(1).asString(); ClusterState retrievedClusterState = new ClusterState(systemState); assertEquals(State.DOWN, retrievedClusterState.getNodeState(new Node(NodeType.DISTRIBUTOR, 0)).getState(), systemState); assertTrue(retrievedClusterState.getNodeState(new Node(NodeType.STORAGE, 9)).getState().oneOf("md"), systemState); }
class RpcServerTest extends FleetControllerTest { public static Logger log = Logger.getLogger(RpcServerTest.class.getName()); private Supervisor supervisor; public void tearDown() throws Exception { if (supervisor != null) { supervisor.transport().shutdown().join(); } super.tearDown(); } @Test void testRebinding() throws Exception { startingTest("RpcServerTest::testRebinding"); Slobrok slobrok = new Slobrok(); String[] slobrokConnectionSpecs = getSlobrokConnectionSpecs(slobrok); RpcServer server = new RpcServer(timer, new Object(), "mycluster", 0, new BackOff()); server.setSlobrokConnectionSpecs(slobrokConnectionSpecs, 18347); int portUsed = server.getPort(); server.setSlobrokConnectionSpecs(slobrokConnectionSpecs, portUsed); server.disconnect(); server.disconnect(); server.connect(); server.connect(); server.disconnect(); server.connect(); server.shutdown(); slobrok.stop(); } /** * For some reason, the first test trying to set up a stable system here occasionally times out. * The theory is that some test run before it does something that is not cleaned up in time. * Trying to add a test that should provoke the failure, but not fail due to it to see if we can verify that * assumption. * * (testRebinding() does not seem to be that test. Tests in StateChangeTest that runs before this test tests very * similar things, so strange if it should be from them too though. Maybe last test there. */ @Test void testFailOccasionallyAndIgnoreToSeeIfOtherTestsThenWork() { try { startingTest("RpcServerTest::testFailOccasionallyAndIgnoreToSeeIfOtherTestsThenWork"); setUpFleetController(true, defaultOptions("mycluster")); setUpVdsNodes(true, new DummyVdsNodeOptions()); waitForStableSystem(); } catch (Throwable t) { } } @Test private void setWantedNodeState(State newState, NodeType nodeType, int nodeIndex) { int rpcPort = fleetController.getRpcPort(); if (supervisor == null) { supervisor = new Supervisor(new Transport()); } Target connection = supervisor.connect(new Spec("localhost", rpcPort)); assertTrue(connection.isValid()); Node node = new Node(nodeType, nodeIndex); NodeState newNodeState = new NodeState(nodeType, newState); Request req = setNodeState("storage/cluster.mycluster/" + node.getType().toString() + "/" + node.getIndex(), newNodeState, connection); assertEquals(ErrorCode.NONE, req.errorCode(), req.toString()); assertTrue(req.checkReturnTypes("s"), req.toString()); } @Test void testGetNodeState() throws Exception { startingTest("RpcServerTest::testGetNodeState"); Set<ConfiguredNode> configuredNodes = new TreeSet<>(); for (int i = 0; i < 10; i++) configuredNodes.add(new ConfiguredNode(i, false)); FleetControllerOptions options = defaultOptions("mycluster", configuredNodes); options.minRatioOfStorageNodesUp = 0; options.maxInitProgressTime = 30000; options.stableStateTimePeriod = 60000; setUpFleetController(true, options); setUpVdsNodes(true, new DummyVdsNodeOptions()); waitForStableSystem(); setWantedNodeState(State.DOWN, NodeType.DISTRIBUTOR, 2); setWantedNodeState(State.RETIRED, NodeType.STORAGE, 2); setWantedNodeState(State.MAINTENANCE, NodeType.STORAGE, 7); waitForCompleteCycle(); timer.advanceTime(1000000); waitForCompleteCycle(); nodes.get(0).disconnect(); nodes.get(3).disconnect(); nodes.get(5).disconnect(); waitForState("version:\\d+ distributor:10 .0.s:d .2.s:d storage:10 .1.s:m .2.s:m .7.s:m"); timer.advanceTime(1000000); waitForState("version:\\d+ distributor:10 .0.s:d .2.s:d storage:10 .1.s:d .2.s:d .7.s:m"); timer.advanceTime(1000000); waitForCompleteCycle(); nodes.get(3).setNodeState(new NodeState(nodes.get(3).getType(), State.INITIALIZING).setInitProgress(0.2f)); nodes.get(3).connect(); waitForState("version:\\d+ distributor:10 .0.s:d .2.s:d storage:10 .1.s:i .1.i:0.2 .2.s:d .7.s:m"); int rpcPort = fleetController.getRpcPort(); supervisor = new Supervisor(new Transport()); Target connection = supervisor.connect(new Spec("localhost", rpcPort)); assertTrue(connection.isValid()); Request req = getNodeState("distributor", 0, connection); assertEquals(ErrorCode.NONE, req.errorCode(), req.toString()); assertTrue(req.checkReturnTypes("ssss"), req.toString()); assertEquals(State.DOWN, NodeState.deserialize(NodeType.DISTRIBUTOR, req.returnValues().get(0).asString()).getState()); NodeState reported = NodeState.deserialize(NodeType.DISTRIBUTOR, req.returnValues().get(1).asString()); assertTrue(reported.getState().oneOf("d-"), req.returnValues().get(1).asString()); assertEquals("", req.returnValues().get(2).asString()); req = getNodeState("distributor",2, connection); assertEquals(ErrorCode.NONE, req.errorCode(), req.toString()); assertTrue(req.checkReturnTypes("ssss"), req.toString()); assertEquals(State.DOWN, NodeState.deserialize(NodeType.DISTRIBUTOR, req.returnValues().get(0).asString()).getState()); assertEquals("t:946080000", req.returnValues().get(1).asString()); assertEquals(State.DOWN, NodeState.deserialize(NodeType.DISTRIBUTOR, req.returnValues().get(2).asString()).getState()); req = getNodeState("distributor", 4, connection); assertEquals(ErrorCode.NONE, req.errorCode(), req.toString()); assertTrue(req.checkReturnTypes("ssss"), req.toString()); assertEquals("", req.returnValues().get(0).asString()); assertEquals("t:946080000", req.returnValues().get(1).asString()); assertEquals("", req.returnValues().get(2).asString()); req = getNodeState("distributor", 15, connection); assertEquals(ErrorCode.METHOD_FAILED, req.errorCode(), req.toString()); assertEquals("No node distributor.15 exists in cluster mycluster", req.errorMessage()); assertFalse(req.checkReturnTypes("ssss"), req.toString()); req = getNodeState("storage", 1, connection); assertEquals(ErrorCode.NONE, req.errorCode(), req.toString()); assertTrue(req.checkReturnTypes("ssss"), req.toString()); assertEquals("s:i i:0.2", req.returnValues().get(0).asString()); assertEquals("s:i i:0.2", req.returnValues().get(1).asString()); assertEquals("", req.returnValues().get(2).asString()); req = getNodeState("storage", 2, connection); assertEquals(ErrorCode.NONE, req.errorCode(), req.toString()); assertTrue(req.checkReturnTypes("ssss"), req.toString()); assertEquals(State.DOWN, NodeState.deserialize(NodeType.STORAGE, req.returnValues().get(0).asString()).getState()); reported = NodeState.deserialize(NodeType.STORAGE, req.returnValues().get(1).asString()); assertTrue(reported.getState().oneOf("d-"), req.returnValues().get(1).asString()); assertEquals(State.RETIRED, NodeState.deserialize(NodeType.STORAGE, req.returnValues().get(2).asString()).getState()); req = getNodeState("storage", 5, connection); assertEquals(ErrorCode.NONE, req.errorCode(), req.toString()); assertTrue(req.checkReturnTypes("ssss"), req.toString()); assertEquals("", req.returnValues().get(0).asString()); assertEquals("t:946080000", req.returnValues().get(1).asString()); assertEquals("", req.returnValues().get(2).asString()); req = getNodeState("storage", 7, connection); assertEquals(ErrorCode.NONE, req.errorCode(), req.toString()); assertTrue(req.checkReturnTypes("ssss"), req.toString()); assertEquals(State.MAINTENANCE, NodeState.deserialize(NodeType.STORAGE, req.returnValues().get(0).asString()).getState()); assertEquals("t:946080000", req.returnValues().get(1).asString()); assertEquals(State.MAINTENANCE, NodeState.deserialize(NodeType.STORAGE, req.returnValues().get(2).asString()).getState()); } @Test void testGetNodeStateWithConfiguredRetired() throws Exception { startingTest("RpcServerTest::testGetNodeStateWithConfiguredRetired"); List<ConfiguredNode> configuredNodes = new ArrayList<>(); for (int i = 0; i < 4; i++) configuredNodes.add(new ConfiguredNode(i, false)); configuredNodes.add(new ConfiguredNode(4, true)); FleetControllerOptions options = defaultOptions("mycluster", configuredNodes); options.minRatioOfStorageNodesUp = 0; options.maxInitProgressTime = 30000; options.stableStateTimePeriod = 60000; setUpFleetController(true, options); setUpVdsNodes(true, new DummyVdsNodeOptions(), false, configuredNodes); waitForState("version:\\d+ distributor:5 storage:5 .4.s:r"); setWantedNodeState(State.DOWN, NodeType.DISTRIBUTOR, 2); setWantedNodeState(State.RETIRED, NodeType.STORAGE, 2); setWantedNodeState(State.MAINTENANCE, NodeType.STORAGE, 3); waitForCompleteCycle(); timer.advanceTime(1000000); waitForCompleteCycle(); nodes.get(0).disconnect(); nodes.get(3).disconnect(); nodes.get(5).disconnect(); waitForState("version:\\d+ distributor:5 .0.s:d .2.s:d storage:5 .1.s:m .2.s:m .3.s:m .4.s:r"); timer.advanceTime(1000000); waitForState("version:\\d+ distributor:5 .0.s:d .2.s:d storage:5 .1.s:d .2.s:d .3.s:m .4.s:r"); timer.advanceTime(1000000); waitForCompleteCycle(); nodes.get(3).setNodeState(new NodeState(nodes.get(3).getType(), State.INITIALIZING).setInitProgress(0.2f)); nodes.get(3).connect(); waitForState("version:\\d+ distributor:5 .0.s:d .2.s:d storage:5 .1.s:i .1.i:0.2 .2.s:d .3.s:m .4.s:r"); } @Test void testGetNodeStateWithConfigurationChangeToRetiredWhileNodeDown() throws Exception { startingTest("RpcServerTest::testGetNodeStateWithConfigurationChangeToRetiredWhileNodeDown"); { List<ConfiguredNode> configuredNodes = new ArrayList<>(); for (int i = 0; i < 5; i++) configuredNodes.add(new ConfiguredNode(i, false)); FleetControllerOptions options = defaultOptions("mycluster", configuredNodes); options.maxInitProgressTime = 30000; options.stableStateTimePeriod = 60000; setUpFleetController(true, options); setUpVdsNodes(true, new DummyVdsNodeOptions(), false, configuredNodes); waitForState("version:\\d+ distributor:5 storage:5"); } { waitForCompleteCycle(); timer.advanceTime(1000000); waitForCompleteCycle(); nodes.get(1).disconnectImmediately(); nodes.get(3).disconnectImmediately(); waitForState("version:\\d+ distributor:5 storage:5 .0.s:m .1.s:m"); } { setUpVdsNodes(true, new DummyVdsNodeOptions(), false, 2); Set<ConfiguredNode> configuredNodes = new TreeSet<>(); for (int i = 0; i < 5; i++) configuredNodes.add(new ConfiguredNode(i, true)); configuredNodes.add(new ConfiguredNode(5, false)); configuredNodes.add(new ConfiguredNode(6, false)); FleetControllerOptions options = defaultOptions("mycluster", configuredNodes); options.slobrokConnectionSpecs = this.options.slobrokConnectionSpecs; this.options.maxInitProgressTime = 30000; this.options.stableStateTimePeriod = 60000; fleetController.updateOptions(options); waitForState("version:\\d+ distributor:7 storage:7 .0.s:m .1.s:m .2.s:r .3.s:r .4.s:r"); } { waitForCompleteCycle(); timer.advanceTime(1000000); waitForCompleteCycle(); nodes.get(1).connect(); nodes.get(3).connect(); waitForState("version:\\d+ distributor:7 storage:7 .0.s:r .1.s:r .2.s:r .3.s:r .4.s:r"); } { waitForCompleteCycle(); timer.advanceTime(1000000); waitForCompleteCycle(); nodes.get(1).disconnectImmediately(); nodes.get(3).disconnectImmediately(); waitForState("version:\\d+ distributor:7 storage:7 .0.s:m .1.s:m .2.s:r .3.s:r .4.s:r"); } { Set<ConfiguredNode> configuredNodes = new TreeSet<>(); for (int i = 0; i < 7; i++) configuredNodes.add(new ConfiguredNode(i, false)); FleetControllerOptions options = defaultOptions("mycluster", configuredNodes); options.slobrokConnectionSpecs = this.options.slobrokConnectionSpecs; this.options.maxInitProgressTime = 30000; this.options.stableStateTimePeriod = 60000; fleetController.updateOptions(options); waitForState("version:\\d+ distributor:7 storage:7 .0.s:m .1.s:m"); } { waitForCompleteCycle(); timer.advanceTime(1000000); waitForCompleteCycle(); nodes.get(1).connect(); nodes.get(3).connect(); waitForState("version:\\d+ distributor:7 storage:7"); } } @Test void testGetNodeStateWithConfigurationChangeToRetired() throws Exception { startingTest("RpcServerTest::testGetNodeStateWithConfigurationChangeToRetired"); { List<ConfiguredNode> configuredNodes = new ArrayList<>(); for (int i = 0; i < 5; i++) configuredNodes.add(new ConfiguredNode(i, false)); FleetControllerOptions options = defaultOptions("mycluster", configuredNodes); options.maxInitProgressTime = 30000; options.stableStateTimePeriod = 60000; setUpFleetController(true, options); setUpVdsNodes(true, new DummyVdsNodeOptions(), false, configuredNodes); waitForState("version:\\d+ distributor:5 storage:5"); } { Set<ConfiguredNode> configuredNodes = new TreeSet<>(); for (int i = 0; i < 5; i++) configuredNodes.add(new ConfiguredNode(i, false)); FleetControllerOptions options = defaultOptions("mycluster", configuredNodes); options.slobrokConnectionSpecs = this.options.slobrokConnectionSpecs; this.options.maxInitProgressTime = 30000; this.options.stableStateTimePeriod = 60000; fleetController.updateOptions(options); waitForState("version:\\d+ distributor:5 storage:5"); } { setUpVdsNodes(true, new DummyVdsNodeOptions(), false, 2); Set<ConfiguredNode> configuredNodes = new TreeSet<>(); for (int i = 0; i < 5; i++) configuredNodes.add(new ConfiguredNode(i, true)); configuredNodes.add(new ConfiguredNode(5, false)); configuredNodes.add(new ConfiguredNode(6, false)); FleetControllerOptions options = defaultOptions("mycluster", configuredNodes); options.slobrokConnectionSpecs = this.options.slobrokConnectionSpecs; this.options.maxInitProgressTime = 30000; this.options.stableStateTimePeriod = 60000; fleetController.updateOptions(options); waitForState("version:\\d+ distributor:7 storage:7 .0.s:r .1.s:r .2.s:r .3.s:r .4.s:r"); } { Set<ConfiguredNode> configuredNodes = new TreeSet<>(); for (int i = 0; i < 5; i++) configuredNodes.add(new ConfiguredNode(i, true)); configuredNodes.add(new ConfiguredNode(5, false)); configuredNodes.add(new ConfiguredNode(6, false)); FleetControllerOptions options = defaultOptions("mycluster", configuredNodes); options.slobrokConnectionSpecs = this.options.slobrokConnectionSpecs; this.options.maxInitProgressTime = 30000; this.options.stableStateTimePeriod = 60000; fleetController.updateOptions(options); waitForState("version:\\d+ distributor:7 storage:7 .0.s:r .1.s:r .2.s:r .3.s:r .4.s:r"); } { /* TODO: Verify current result: version:23 distributor:7 .0.s:d .1.s:d .2.s:d .3.s:d .4.s:d storage:7 .0.s:m .1.s:m .2.s:m .3.s:m .4.s:m TODO: Make this work without stopping/disconnecting (see StateChangeHandler.setNodes Set<ConfiguredNode> configuredNodes = new TreeSet<>(); configuredNodes.add(new ConfiguredNode(5, false)); configuredNodes.add(new ConfiguredNode(6, false)); FleetControllerOptions options = new FleetControllerOptions("mycluster", configuredNodes); options.slobrokConnectionSpecs = this.options.slobrokConnectionSpecs; this.options.maxInitProgressTimeMs = 30000; this.options.stableStateTimePeriod = 60000; fleetController.updateOptions(options, 0); for (int i = 0; i < 5*2; i++) { nodes.get(i).disconnectSlobrok(); nodes.get(i).disconnect(); } waitForState("version:\\d+ distributor:7 storage:7 .0.s:d .1.s:d .2.s:d .3.s:d .4.s:d"); */ } } @Test void testSetNodeState() throws Exception { startingTest("RpcServerTest::testSetNodeState"); Set<Integer> nodeIndexes = new TreeSet<>(List.of(4, 6, 9, 10, 14, 16, 21, 22, 23, 25)); Set<ConfiguredNode> configuredNodes = nodeIndexes.stream().map(i -> new ConfiguredNode(i, false)).collect(Collectors.toSet()); FleetControllerOptions options = defaultOptions("mycluster", configuredNodes); setUpFleetController(true, options); setUpVdsNodes(true, new DummyVdsNodeOptions(), false, nodeIndexes); waitForState("version:\\d+ distributor:26 .0.s:d .1.s:d .2.s:d .3.s:d .5.s:d .7.s:d .8.s:d .11.s:d .12.s:d .13.s:d .15.s:d .17.s:d .18.s:d .19.s:d .20.s:d .24.s:d storage:26 .0.s:d .1.s:d .2.s:d .3.s:d .5.s:d .7.s:d .8.s:d .11.s:d .12.s:d .13.s:d .15.s:d .17.s:d .18.s:d .19.s:d .20.s:d .24.s:d"); int rpcPort = fleetController.getRpcPort(); supervisor = new Supervisor(new Transport()); Target connection = supervisor.connect(new Spec("localhost", rpcPort)); assertTrue(connection.isValid()); Request req = setNodeState("storage/cluster.mycluster/storage/14", "s:r", connection); assertEquals(ErrorCode.NONE, req.errorCode(), req.toString()); assertTrue(req.checkReturnTypes("s"), req.toString()); waitForState("version:\\d+ distributor:26 .* storage:26 .* .14.s:r .*"); req = setNodeState("storage/cluster.mycluster/storage/16", "s:m", connection); assertEquals(ErrorCode.NONE, req.errorCode(), req.toString()); assertTrue(req.checkReturnTypes("s"), req.toString()); waitForState("version:\\d+ distributor:26 .* storage:26 .* .14.s:r.* .16.s:m .*"); nodes.get(5 * 2 + 1).disconnect(); waitForCompleteCycle(); timer.advanceTime(100000000); waitForCompleteCycle(); assertEquals(State.MAINTENANCE, fleetController.getSystemState().getNodeState(new Node(NodeType.STORAGE, 16)).getState()); nodes.get(4 * 2 + 1).disconnect(); waitForState("version:\\d+ distributor:26 .* storage:26 .* .14.s:m.* .16.s:m .*"); nodes.get(4 * 2 + 1).connect(); timer.advanceTime(100000000); waitForState("version:\\d+ distributor:26 .* storage:26 .* .14.s:r.* .16.s:m .*"); } @Test void testSetNodeStateOutOfRange() throws Exception { startingTest("RpcServerTest::testSetNodeStateOutOfRange"); FleetControllerOptions options = defaultOptions("mycluster"); options.setStorageDistribution(new Distribution(Distribution.getDefaultDistributionConfig(2, 10))); setUpFleetController(true, options); setUpVdsNodes(true, new DummyVdsNodeOptions()); waitForStableSystem(); int rpcPort = fleetController.getRpcPort(); supervisor = new Supervisor(new Transport()); Target connection = supervisor.connect(new Spec("localhost", rpcPort)); assertTrue(connection.isValid()); Request req = setNodeState("storage/cluster.mycluster/storage/10", "s:m", connection); assertEquals(ErrorCode.METHOD_FAILED, req.errorCode(), req.toString()); assertEquals("Cannot set wanted state of node storage.10. Index does not correspond to a configured node.", req.errorMessage(), req.toString()); req = setNodeState("storage/cluster.mycluster/distributor/10", "s:m", connection); assertEquals(ErrorCode.METHOD_FAILED, req.errorCode(), req.toString()); assertEquals("Cannot set wanted state of node distributor.10. Index does not correspond to a configured node.", req.errorMessage(), req.toString()); req = setNodeState("storage/cluster.mycluster/storage/9", "s:m", connection); assertEquals(ErrorCode.NONE, req.errorCode(), req.toString()); waitForState("version:\\d+ distributor:10 storage:10 .9.s:m"); } @Test void testGetMaster() throws Exception { startingTest("RpcServerTest::testGetMaster"); FleetControllerOptions options = defaultOptions("mycluster"); options.setStorageDistribution(new Distribution(Distribution.getDefaultDistributionConfig(2, 10))); setUpFleetController(true, options); setUpVdsNodes(true, new DummyVdsNodeOptions()); waitForStableSystem(); int rpcPort = fleetController.getRpcPort(); supervisor = new Supervisor(new Transport()); Target connection = supervisor.connect(new Spec("localhost", rpcPort)); assertTrue(connection.isValid()); Request req = new Request("getMaster"); connection.invokeSync(req, timeoutInSeconds()); assertEquals(0, req.returnValues().get(0).asInt32(), req.toString()); assertEquals("All 1 nodes agree that 0 is current master.", req.returnValues().get(1).asString(), req.toString()); } @Test void testGetNodeList() throws Exception { startingTest("RpcServerTest::testGetNodeList"); setUpFleetController(true, defaultOptions("mycluster", 5)); final int nodeCount = 5; setUpVdsNodes(true, new DummyVdsNodeOptions(), false, nodeCount); waitForStableSystem(); assertTrue(nodes.get(0).isDistributor()); nodes.get(0).disconnect(); waitForState("version:\\d+ distributor:5 .0.s:d storage:5"); int rpcPort = fleetController.getRpcPort(); supervisor = new Supervisor(new Transport()); Target connection = supervisor.connect(new Spec("localhost", rpcPort)); assertTrue(connection.isValid()); Request req = new Request("getNodeList"); connection.invokeSync(req, timeoutInSeconds()); assertEquals(ErrorCode.NONE, req.errorCode(), req.errorMessage()); assertTrue(req.checkReturnTypes("SS"), req.toString()); String[] slobrok = req.returnValues().get(0).asStringArray().clone(); String[] rpc = req.returnValues().get(1).asStringArray().clone(); assertEquals(2 * nodeCount, slobrok.length); assertEquals(2 * nodeCount, rpc.length); for (int i = 0; i < 2 * nodeCount; ++i) { if (slobrok[i].equals("storage/cluster.mycluster/distributor/0")) { if (i < nodeCount && !"".equals(rpc[i])) { continue; } assertEquals("", rpc[i], slobrok[i]); continue; } assertNotEquals("", rpc[i]); Request req2 = new Request("getnodestate2"); req2.parameters().add(new StringValue("unknown")); Target connection2 = supervisor.connect(new Spec(rpc[i])); connection2.invokeSync(req2, timeoutInSeconds()); assertEquals(ErrorCode.NONE, req.errorCode(), req2.toString()); } } private Request setNodeState(String node, NodeState newNodeState, Target connection) { return setNodeState(node, newNodeState.serialize(true), connection); } private Request setNodeState(String node, String newNodeState, Target connection) { Request req = new Request("setNodeState"); req.parameters().add(new StringValue(node)); req.parameters().add(new StringValue(newNodeState)); connection.invokeSync(req, timeoutInSeconds()); return req; } private Request getNodeState(String nodeType, int nodeIndex, Target connection) { Request req = new Request("getNodeState"); req.parameters().add(new StringValue(nodeType)); req.parameters().add(new Int32Value(nodeIndex)); connection.invokeSync(req, timeoutInSeconds()); return req; } }
class RpcServerTest extends FleetControllerTest { public static Logger log = Logger.getLogger(RpcServerTest.class.getName()); private Supervisor supervisor; public void tearDown() throws Exception { if (supervisor != null) { supervisor.transport().shutdown().join(); } super.tearDown(); } @Test void testRebinding() throws Exception { startingTest("RpcServerTest::testRebinding"); Slobrok slobrok = new Slobrok(); String[] slobrokConnectionSpecs = getSlobrokConnectionSpecs(slobrok); RpcServer server = new RpcServer(timer, new Object(), "mycluster", 0, new BackOff()); server.setSlobrokConnectionSpecs(slobrokConnectionSpecs, 18347); int portUsed = server.getPort(); server.setSlobrokConnectionSpecs(slobrokConnectionSpecs, portUsed); server.disconnect(); server.disconnect(); server.connect(); server.connect(); server.disconnect(); server.connect(); server.shutdown(); slobrok.stop(); } /** * For some reason, the first test trying to set up a stable system here occasionally times out. * The theory is that some test run before it does something that is not cleaned up in time. * Trying to add a test that should provoke the failure, but not fail due to it to see if we can verify that * assumption. * * (testRebinding() does not seem to be that test. Tests in StateChangeTest that runs before this test tests very * similar things, so strange if it should be from them too though. Maybe last test there. */ @Test void testFailOccasionallyAndIgnoreToSeeIfOtherTestsThenWork() { try { startingTest("RpcServerTest::testFailOccasionallyAndIgnoreToSeeIfOtherTestsThenWork"); setUpFleetController(true, defaultOptions("mycluster")); setUpVdsNodes(true, new DummyVdsNodeOptions()); waitForStableSystem(); } catch (Throwable t) { } } @Test private void setWantedNodeState(State newState, NodeType nodeType, int nodeIndex) { int rpcPort = fleetController.getRpcPort(); if (supervisor == null) { supervisor = new Supervisor(new Transport()); } Target connection = supervisor.connect(new Spec("localhost", rpcPort)); assertTrue(connection.isValid()); Node node = new Node(nodeType, nodeIndex); NodeState newNodeState = new NodeState(nodeType, newState); Request req = setNodeState("storage/cluster.mycluster/" + node.getType().toString() + "/" + node.getIndex(), newNodeState, connection); assertEquals(ErrorCode.NONE, req.errorCode(), req.toString()); assertTrue(req.checkReturnTypes("s"), req.toString()); } @Test void testGetNodeState() throws Exception { startingTest("RpcServerTest::testGetNodeState"); Set<ConfiguredNode> configuredNodes = new TreeSet<>(); for (int i = 0; i < 10; i++) configuredNodes.add(new ConfiguredNode(i, false)); FleetControllerOptions options = defaultOptions("mycluster", configuredNodes); options.minRatioOfStorageNodesUp = 0; options.maxInitProgressTime = 30000; options.stableStateTimePeriod = 60000; setUpFleetController(true, options); setUpVdsNodes(true, new DummyVdsNodeOptions()); waitForStableSystem(); setWantedNodeState(State.DOWN, NodeType.DISTRIBUTOR, 2); setWantedNodeState(State.RETIRED, NodeType.STORAGE, 2); setWantedNodeState(State.MAINTENANCE, NodeType.STORAGE, 7); waitForCompleteCycle(); timer.advanceTime(1000000); waitForCompleteCycle(); nodes.get(0).disconnect(); nodes.get(3).disconnect(); nodes.get(5).disconnect(); waitForState("version:\\d+ distributor:10 .0.s:d .2.s:d storage:10 .1.s:m .2.s:m .7.s:m"); timer.advanceTime(1000000); waitForState("version:\\d+ distributor:10 .0.s:d .2.s:d storage:10 .1.s:d .2.s:d .7.s:m"); timer.advanceTime(1000000); waitForCompleteCycle(); nodes.get(3).setNodeState(new NodeState(nodes.get(3).getType(), State.INITIALIZING).setInitProgress(0.2f)); nodes.get(3).connect(); waitForState("version:\\d+ distributor:10 .0.s:d .2.s:d storage:10 .1.s:i .1.i:0.2 .2.s:d .7.s:m"); int rpcPort = fleetController.getRpcPort(); supervisor = new Supervisor(new Transport()); Target connection = supervisor.connect(new Spec("localhost", rpcPort)); assertTrue(connection.isValid()); Request req = getNodeState("distributor", 0, connection); assertEquals(ErrorCode.NONE, req.errorCode(), req.toString()); assertTrue(req.checkReturnTypes("ssss"), req.toString()); assertEquals(State.DOWN, NodeState.deserialize(NodeType.DISTRIBUTOR, req.returnValues().get(0).asString()).getState()); NodeState reported = NodeState.deserialize(NodeType.DISTRIBUTOR, req.returnValues().get(1).asString()); assertTrue(reported.getState().oneOf("d-"), req.returnValues().get(1).asString()); assertEquals("", req.returnValues().get(2).asString()); req = getNodeState("distributor",2, connection); assertEquals(ErrorCode.NONE, req.errorCode(), req.toString()); assertTrue(req.checkReturnTypes("ssss"), req.toString()); assertEquals(State.DOWN, NodeState.deserialize(NodeType.DISTRIBUTOR, req.returnValues().get(0).asString()).getState()); assertEquals("t:946080000", req.returnValues().get(1).asString()); assertEquals(State.DOWN, NodeState.deserialize(NodeType.DISTRIBUTOR, req.returnValues().get(2).asString()).getState()); req = getNodeState("distributor", 4, connection); assertEquals(ErrorCode.NONE, req.errorCode(), req.toString()); assertTrue(req.checkReturnTypes("ssss"), req.toString()); assertEquals("", req.returnValues().get(0).asString()); assertEquals("t:946080000", req.returnValues().get(1).asString()); assertEquals("", req.returnValues().get(2).asString()); req = getNodeState("distributor", 15, connection); assertEquals(ErrorCode.METHOD_FAILED, req.errorCode(), req.toString()); assertEquals("No node distributor.15 exists in cluster mycluster", req.errorMessage()); assertFalse(req.checkReturnTypes("ssss"), req.toString()); req = getNodeState("storage", 1, connection); assertEquals(ErrorCode.NONE, req.errorCode(), req.toString()); assertTrue(req.checkReturnTypes("ssss"), req.toString()); assertEquals("s:i i:0.2", req.returnValues().get(0).asString()); assertEquals("s:i i:0.2", req.returnValues().get(1).asString()); assertEquals("", req.returnValues().get(2).asString()); req = getNodeState("storage", 2, connection); assertEquals(ErrorCode.NONE, req.errorCode(), req.toString()); assertTrue(req.checkReturnTypes("ssss"), req.toString()); assertEquals(State.DOWN, NodeState.deserialize(NodeType.STORAGE, req.returnValues().get(0).asString()).getState()); reported = NodeState.deserialize(NodeType.STORAGE, req.returnValues().get(1).asString()); assertTrue(reported.getState().oneOf("d-"), req.returnValues().get(1).asString()); assertEquals(State.RETIRED, NodeState.deserialize(NodeType.STORAGE, req.returnValues().get(2).asString()).getState()); req = getNodeState("storage", 5, connection); assertEquals(ErrorCode.NONE, req.errorCode(), req.toString()); assertTrue(req.checkReturnTypes("ssss"), req.toString()); assertEquals("", req.returnValues().get(0).asString()); assertEquals("t:946080000", req.returnValues().get(1).asString()); assertEquals("", req.returnValues().get(2).asString()); req = getNodeState("storage", 7, connection); assertEquals(ErrorCode.NONE, req.errorCode(), req.toString()); assertTrue(req.checkReturnTypes("ssss"), req.toString()); assertEquals(State.MAINTENANCE, NodeState.deserialize(NodeType.STORAGE, req.returnValues().get(0).asString()).getState()); assertEquals("t:946080000", req.returnValues().get(1).asString()); assertEquals(State.MAINTENANCE, NodeState.deserialize(NodeType.STORAGE, req.returnValues().get(2).asString()).getState()); } @Test void testGetNodeStateWithConfiguredRetired() throws Exception { startingTest("RpcServerTest::testGetNodeStateWithConfiguredRetired"); List<ConfiguredNode> configuredNodes = new ArrayList<>(); for (int i = 0; i < 4; i++) configuredNodes.add(new ConfiguredNode(i, false)); configuredNodes.add(new ConfiguredNode(4, true)); FleetControllerOptions options = defaultOptions("mycluster", configuredNodes); options.minRatioOfStorageNodesUp = 0; options.maxInitProgressTime = 30000; options.stableStateTimePeriod = 60000; setUpFleetController(true, options); setUpVdsNodes(true, new DummyVdsNodeOptions(), false, configuredNodes); waitForState("version:\\d+ distributor:5 storage:5 .4.s:r"); setWantedNodeState(State.DOWN, NodeType.DISTRIBUTOR, 2); setWantedNodeState(State.RETIRED, NodeType.STORAGE, 2); setWantedNodeState(State.MAINTENANCE, NodeType.STORAGE, 3); waitForCompleteCycle(); timer.advanceTime(1000000); waitForCompleteCycle(); nodes.get(0).disconnect(); nodes.get(3).disconnect(); nodes.get(5).disconnect(); waitForState("version:\\d+ distributor:5 .0.s:d .2.s:d storage:5 .1.s:m .2.s:m .3.s:m .4.s:r"); timer.advanceTime(1000000); waitForState("version:\\d+ distributor:5 .0.s:d .2.s:d storage:5 .1.s:d .2.s:d .3.s:m .4.s:r"); timer.advanceTime(1000000); waitForCompleteCycle(); nodes.get(3).setNodeState(new NodeState(nodes.get(3).getType(), State.INITIALIZING).setInitProgress(0.2f)); nodes.get(3).connect(); waitForState("version:\\d+ distributor:5 .0.s:d .2.s:d storage:5 .1.s:i .1.i:0.2 .2.s:d .3.s:m .4.s:r"); } @Test void testGetNodeStateWithConfigurationChangeToRetiredWhileNodeDown() throws Exception { startingTest("RpcServerTest::testGetNodeStateWithConfigurationChangeToRetiredWhileNodeDown"); { List<ConfiguredNode> configuredNodes = new ArrayList<>(); for (int i = 0; i < 5; i++) configuredNodes.add(new ConfiguredNode(i, false)); FleetControllerOptions options = defaultOptions("mycluster", configuredNodes); options.maxInitProgressTime = 30000; options.stableStateTimePeriod = 60000; setUpFleetController(true, options); setUpVdsNodes(true, new DummyVdsNodeOptions(), false, configuredNodes); waitForState("version:\\d+ distributor:5 storage:5"); } { waitForCompleteCycle(); timer.advanceTime(1000000); waitForCompleteCycle(); nodes.get(1).disconnectImmediately(); nodes.get(3).disconnectImmediately(); waitForState("version:\\d+ distributor:5 storage:5 .0.s:m .1.s:m"); } { setUpVdsNodes(true, new DummyVdsNodeOptions(), false, 2); Set<ConfiguredNode> configuredNodes = new TreeSet<>(); for (int i = 0; i < 5; i++) configuredNodes.add(new ConfiguredNode(i, true)); configuredNodes.add(new ConfiguredNode(5, false)); configuredNodes.add(new ConfiguredNode(6, false)); FleetControllerOptions options = defaultOptions("mycluster", configuredNodes); options.slobrokConnectionSpecs = this.options.slobrokConnectionSpecs; this.options.maxInitProgressTime = 30000; this.options.stableStateTimePeriod = 60000; fleetController.updateOptions(options); waitForState("version:\\d+ distributor:7 storage:7 .0.s:m .1.s:m .2.s:r .3.s:r .4.s:r"); } { waitForCompleteCycle(); timer.advanceTime(1000000); waitForCompleteCycle(); nodes.get(1).connect(); nodes.get(3).connect(); waitForState("version:\\d+ distributor:7 storage:7 .0.s:r .1.s:r .2.s:r .3.s:r .4.s:r"); } { waitForCompleteCycle(); timer.advanceTime(1000000); waitForCompleteCycle(); nodes.get(1).disconnectImmediately(); nodes.get(3).disconnectImmediately(); waitForState("version:\\d+ distributor:7 storage:7 .0.s:m .1.s:m .2.s:r .3.s:r .4.s:r"); } { Set<ConfiguredNode> configuredNodes = new TreeSet<>(); for (int i = 0; i < 7; i++) configuredNodes.add(new ConfiguredNode(i, false)); FleetControllerOptions options = defaultOptions("mycluster", configuredNodes); options.slobrokConnectionSpecs = this.options.slobrokConnectionSpecs; this.options.maxInitProgressTime = 30000; this.options.stableStateTimePeriod = 60000; fleetController.updateOptions(options); waitForState("version:\\d+ distributor:7 storage:7 .0.s:m .1.s:m"); } { waitForCompleteCycle(); timer.advanceTime(1000000); waitForCompleteCycle(); nodes.get(1).connect(); nodes.get(3).connect(); waitForState("version:\\d+ distributor:7 storage:7"); } } @Test void testGetNodeStateWithConfigurationChangeToRetired() throws Exception { startingTest("RpcServerTest::testGetNodeStateWithConfigurationChangeToRetired"); { List<ConfiguredNode> configuredNodes = new ArrayList<>(); for (int i = 0; i < 5; i++) configuredNodes.add(new ConfiguredNode(i, false)); FleetControllerOptions options = defaultOptions("mycluster", configuredNodes); options.maxInitProgressTime = 30000; options.stableStateTimePeriod = 60000; setUpFleetController(true, options); setUpVdsNodes(true, new DummyVdsNodeOptions(), false, configuredNodes); waitForState("version:\\d+ distributor:5 storage:5"); } { Set<ConfiguredNode> configuredNodes = new TreeSet<>(); for (int i = 0; i < 5; i++) configuredNodes.add(new ConfiguredNode(i, false)); FleetControllerOptions options = defaultOptions("mycluster", configuredNodes); options.slobrokConnectionSpecs = this.options.slobrokConnectionSpecs; this.options.maxInitProgressTime = 30000; this.options.stableStateTimePeriod = 60000; fleetController.updateOptions(options); waitForState("version:\\d+ distributor:5 storage:5"); } { setUpVdsNodes(true, new DummyVdsNodeOptions(), false, 2); Set<ConfiguredNode> configuredNodes = new TreeSet<>(); for (int i = 0; i < 5; i++) configuredNodes.add(new ConfiguredNode(i, true)); configuredNodes.add(new ConfiguredNode(5, false)); configuredNodes.add(new ConfiguredNode(6, false)); FleetControllerOptions options = defaultOptions("mycluster", configuredNodes); options.slobrokConnectionSpecs = this.options.slobrokConnectionSpecs; this.options.maxInitProgressTime = 30000; this.options.stableStateTimePeriod = 60000; fleetController.updateOptions(options); waitForState("version:\\d+ distributor:7 storage:7 .0.s:r .1.s:r .2.s:r .3.s:r .4.s:r"); } { Set<ConfiguredNode> configuredNodes = new TreeSet<>(); for (int i = 0; i < 5; i++) configuredNodes.add(new ConfiguredNode(i, true)); configuredNodes.add(new ConfiguredNode(5, false)); configuredNodes.add(new ConfiguredNode(6, false)); FleetControllerOptions options = defaultOptions("mycluster", configuredNodes); options.slobrokConnectionSpecs = this.options.slobrokConnectionSpecs; this.options.maxInitProgressTime = 30000; this.options.stableStateTimePeriod = 60000; fleetController.updateOptions(options); waitForState("version:\\d+ distributor:7 storage:7 .0.s:r .1.s:r .2.s:r .3.s:r .4.s:r"); } { /* TODO: Verify current result: version:23 distributor:7 .0.s:d .1.s:d .2.s:d .3.s:d .4.s:d storage:7 .0.s:m .1.s:m .2.s:m .3.s:m .4.s:m TODO: Make this work without stopping/disconnecting (see StateChangeHandler.setNodes Set<ConfiguredNode> configuredNodes = new TreeSet<>(); configuredNodes.add(new ConfiguredNode(5, false)); configuredNodes.add(new ConfiguredNode(6, false)); FleetControllerOptions options = new FleetControllerOptions("mycluster", configuredNodes); options.slobrokConnectionSpecs = this.options.slobrokConnectionSpecs; this.options.maxInitProgressTimeMs = 30000; this.options.stableStateTimePeriod = 60000; fleetController.updateOptions(options, 0); for (int i = 0; i < 5*2; i++) { nodes.get(i).disconnectSlobrok(); nodes.get(i).disconnect(); } waitForState("version:\\d+ distributor:7 storage:7 .0.s:d .1.s:d .2.s:d .3.s:d .4.s:d"); */ } } @Test void testSetNodeState() throws Exception { startingTest("RpcServerTest::testSetNodeState"); Set<Integer> nodeIndexes = new TreeSet<>(List.of(4, 6, 9, 10, 14, 16, 21, 22, 23, 25)); Set<ConfiguredNode> configuredNodes = nodeIndexes.stream().map(i -> new ConfiguredNode(i, false)).collect(Collectors.toSet()); FleetControllerOptions options = defaultOptions("mycluster", configuredNodes); setUpFleetController(true, options); setUpVdsNodes(true, new DummyVdsNodeOptions(), false, nodeIndexes); waitForState("version:\\d+ distributor:26 .0.s:d .1.s:d .2.s:d .3.s:d .5.s:d .7.s:d .8.s:d .11.s:d .12.s:d .13.s:d .15.s:d .17.s:d .18.s:d .19.s:d .20.s:d .24.s:d storage:26 .0.s:d .1.s:d .2.s:d .3.s:d .5.s:d .7.s:d .8.s:d .11.s:d .12.s:d .13.s:d .15.s:d .17.s:d .18.s:d .19.s:d .20.s:d .24.s:d"); int rpcPort = fleetController.getRpcPort(); supervisor = new Supervisor(new Transport()); Target connection = supervisor.connect(new Spec("localhost", rpcPort)); assertTrue(connection.isValid()); Request req = setNodeState("storage/cluster.mycluster/storage/14", "s:r", connection); assertEquals(ErrorCode.NONE, req.errorCode(), req.toString()); assertTrue(req.checkReturnTypes("s"), req.toString()); waitForState("version:\\d+ distributor:26 .* storage:26 .* .14.s:r .*"); req = setNodeState("storage/cluster.mycluster/storage/16", "s:m", connection); assertEquals(ErrorCode.NONE, req.errorCode(), req.toString()); assertTrue(req.checkReturnTypes("s"), req.toString()); waitForState("version:\\d+ distributor:26 .* storage:26 .* .14.s:r.* .16.s:m .*"); nodes.get(5 * 2 + 1).disconnect(); waitForCompleteCycle(); timer.advanceTime(100000000); waitForCompleteCycle(); assertEquals(State.MAINTENANCE, fleetController.getSystemState().getNodeState(new Node(NodeType.STORAGE, 16)).getState()); nodes.get(4 * 2 + 1).disconnect(); waitForState("version:\\d+ distributor:26 .* storage:26 .* .14.s:m.* .16.s:m .*"); nodes.get(4 * 2 + 1).connect(); timer.advanceTime(100000000); waitForState("version:\\d+ distributor:26 .* storage:26 .* .14.s:r.* .16.s:m .*"); } @Test void testSetNodeStateOutOfRange() throws Exception { startingTest("RpcServerTest::testSetNodeStateOutOfRange"); FleetControllerOptions options = defaultOptions("mycluster"); options.setStorageDistribution(new Distribution(Distribution.getDefaultDistributionConfig(2, 10))); setUpFleetController(true, options); setUpVdsNodes(true, new DummyVdsNodeOptions()); waitForStableSystem(); int rpcPort = fleetController.getRpcPort(); supervisor = new Supervisor(new Transport()); Target connection = supervisor.connect(new Spec("localhost", rpcPort)); assertTrue(connection.isValid()); Request req = setNodeState("storage/cluster.mycluster/storage/10", "s:m", connection); assertEquals(ErrorCode.METHOD_FAILED, req.errorCode(), req.toString()); assertEquals("Cannot set wanted state of node storage.10. Index does not correspond to a configured node.", req.errorMessage(), req.toString()); req = setNodeState("storage/cluster.mycluster/distributor/10", "s:m", connection); assertEquals(ErrorCode.METHOD_FAILED, req.errorCode(), req.toString()); assertEquals("Cannot set wanted state of node distributor.10. Index does not correspond to a configured node.", req.errorMessage(), req.toString()); req = setNodeState("storage/cluster.mycluster/storage/9", "s:m", connection); assertEquals(ErrorCode.NONE, req.errorCode(), req.toString()); waitForState("version:\\d+ distributor:10 storage:10 .9.s:m"); } @Test void testGetMaster() throws Exception { startingTest("RpcServerTest::testGetMaster"); FleetControllerOptions options = defaultOptions("mycluster"); options.setStorageDistribution(new Distribution(Distribution.getDefaultDistributionConfig(2, 10))); setUpFleetController(true, options); setUpVdsNodes(true, new DummyVdsNodeOptions()); waitForStableSystem(); int rpcPort = fleetController.getRpcPort(); supervisor = new Supervisor(new Transport()); Target connection = supervisor.connect(new Spec("localhost", rpcPort)); assertTrue(connection.isValid()); Request req = new Request("getMaster"); connection.invokeSync(req, timeoutInSeconds()); assertEquals(0, req.returnValues().get(0).asInt32(), req.toString()); assertEquals("All 1 nodes agree that 0 is current master.", req.returnValues().get(1).asString(), req.toString()); } @Test void testGetNodeList() throws Exception { startingTest("RpcServerTest::testGetNodeList"); setUpFleetController(true, defaultOptions("mycluster", 5)); final int nodeCount = 5; setUpVdsNodes(true, new DummyVdsNodeOptions(), false, nodeCount); waitForStableSystem(); assertTrue(nodes.get(0).isDistributor()); nodes.get(0).disconnect(); waitForState("version:\\d+ distributor:5 .0.s:d storage:5"); int rpcPort = fleetController.getRpcPort(); supervisor = new Supervisor(new Transport()); Target connection = supervisor.connect(new Spec("localhost", rpcPort)); assertTrue(connection.isValid()); Request req = new Request("getNodeList"); connection.invokeSync(req, timeoutInSeconds()); assertEquals(ErrorCode.NONE, req.errorCode(), req.errorMessage()); assertTrue(req.checkReturnTypes("SS"), req.toString()); String[] slobrok = req.returnValues().get(0).asStringArray().clone(); String[] rpc = req.returnValues().get(1).asStringArray().clone(); assertEquals(2 * nodeCount, slobrok.length); assertEquals(2 * nodeCount, rpc.length); for (int i = 0; i < 2 * nodeCount; ++i) { if (slobrok[i].equals("storage/cluster.mycluster/distributor/0")) { if (i < nodeCount && !"".equals(rpc[i])) { continue; } assertEquals("", rpc[i], slobrok[i]); continue; } assertNotEquals("", rpc[i]); Request req2 = new Request("getnodestate2"); req2.parameters().add(new StringValue("unknown")); Target connection2 = supervisor.connect(new Spec(rpc[i])); connection2.invokeSync(req2, timeoutInSeconds()); assertEquals(ErrorCode.NONE, req.errorCode(), req2.toString()); } } private Request setNodeState(String node, NodeState newNodeState, Target connection) { return setNodeState(node, newNodeState.serialize(true), connection); } private Request setNodeState(String node, String newNodeState, Target connection) { Request req = new Request("setNodeState"); req.parameters().add(new StringValue(node)); req.parameters().add(new StringValue(newNodeState)); connection.invokeSync(req, timeoutInSeconds()); return req; } private Request getNodeState(String nodeType, int nodeIndex, Target connection) { Request req = new Request("getNodeState"); req.parameters().add(new StringValue(nodeType)); req.parameters().add(new Int32Value(nodeIndex)); connection.invokeSync(req, timeoutInSeconds()); return req; } }
double timeoutInSeconds() ?
void setWantedState(DummyVdsNode node, State state, String reason) { if (supervisor == null) { supervisor = new Supervisor(new Transport()); } NodeState ns = new NodeState(node.getType(), state); if (reason != null) ns.setDescription(reason); Target connection = supervisor.connect(new Spec("localhost", fleetController.getRpcPort())); Request req = new Request("setNodeState"); req.parameters().add(new StringValue(node.getSlobrokName())); req.parameters().add(new StringValue(ns.serialize())); connection.invokeSync(req, timeout.getSeconds()); if (req.isError()) { fail("Failed to invoke setNodeState(): " + req.errorCode() + ": " + req.errorMessage()); } if (!req.checkReturnTypes("s")) { fail("Failed to invoke setNodeState(): Invalid return types."); } }
connection.invokeSync(req, timeout.getSeconds());
void setWantedState(DummyVdsNode node, State state, String reason) { if (supervisor == null) { supervisor = new Supervisor(new Transport()); } NodeState ns = new NodeState(node.getType(), state); if (reason != null) ns.setDescription(reason); Target connection = supervisor.connect(new Spec("localhost", fleetController.getRpcPort())); Request req = new Request("setNodeState"); req.parameters().add(new StringValue(node.getSlobrokName())); req.parameters().add(new StringValue(ns.serialize())); connection.invokeSync(req, timeoutInSeconds()); if (req.isError()) { fail("Failed to invoke setNodeState(): " + req.errorCode() + ": " + req.errorMessage()); } if (!req.checkReturnTypes("s")) { fail("Failed to invoke setNodeState(): Invalid return types."); } }
class ExpectLine { Pattern regex; int matchedCount = 0; int minCount = 1; int maxCount = 1; boolean repeatable() { return (maxCount == 0 || maxCount > matchedCount); } boolean optional() { return (matchedCount >= minCount); } boolean matches(String event) { if (event == null) return false; boolean m = regex.matcher(event).matches(); if (m) ++matchedCount; return m; } ExpectLine(String pattern) { if (pattern.charAt(0) == '?') { pattern = pattern.substring(1); minCount = 0; } else if (pattern.charAt(0) == '*') { pattern = pattern.substring(1); minCount = 0; maxCount = 0; } else if (pattern.charAt(0) == '+') { pattern = pattern.substring(1); maxCount = 0; } regex = Pattern.compile(pattern); } public String toString() { return "{"+minCount+","+maxCount+"}("+matchedCount+") " + regex; } }
class ExpectLine { Pattern regex; int matchedCount = 0; int minCount = 1; int maxCount = 1; boolean repeatable() { return (maxCount == 0 || maxCount > matchedCount); } boolean optional() { return (matchedCount >= minCount); } boolean matches(String event) { if (event == null) return false; boolean m = regex.matcher(event).matches(); if (m) ++matchedCount; return m; } ExpectLine(String pattern) { if (pattern.charAt(0) == '?') { pattern = pattern.substring(1); minCount = 0; } else if (pattern.charAt(0) == '*') { pattern = pattern.substring(1); minCount = 0; maxCount = 0; } else if (pattern.charAt(0) == '+') { pattern = pattern.substring(1); maxCount = 0; } regex = Pattern.compile(pattern); } public String toString() { return "{"+minCount+","+maxCount+"}("+matchedCount+") " + regex; } }
double timeoutInSeconds() ?
private void waitForMasterReason(String reason, Integer master, List<Target> connections, int[] nodes) { Instant endTime = Instant.now().plus(timeout()); while (Instant.now().isBefore(endTime)) { boolean allOk = true; for (int node : nodes) { Request req = new Request("getMaster"); connections.get(node).invokeSync(req, timeout().getSeconds()); if (req.isError()) { allOk = false; break; } if (master != null && master != req.returnValues().get(0).asInt32()) { allOk = false; break; } if (reason != null && ! reason.equals(req.returnValues().get(1).asString())) { allOk = false; break; } } if (allOk) return; try{ Thread.sleep(100); } catch (InterruptedException e) { /* ignore */ } } throw new IllegalStateException("Did not get master reason '" + reason + "' within timeout of " + timeout()); }
connections.get(node).invokeSync(req, timeout().getSeconds());
private void waitForMasterReason(String reason, Integer master, List<Target> connections, int[] nodes) { Instant endTime = Instant.now().plus(timeout()); while (Instant.now().isBefore(endTime)) { boolean allOk = true; for (int node : nodes) { Request req = new Request("getMaster"); connections.get(node).invokeSync(req, timeoutInSeconds()); if (req.isError()) { allOk = false; break; } if (master != null && master != req.returnValues().get(0).asInt32()) { allOk = false; break; } if (reason != null && ! reason.equals(req.returnValues().get(1).asString())) { allOk = false; break; } } if (allOk) return; try{ Thread.sleep(100); } catch (InterruptedException e) { /* ignore */ } } throw new IllegalStateException("Did not get master reason '" + reason + "' within timeout of " + timeout()); }
class StrictlyIncreasingVersionChecker { private ClusterState lastState; private StrictlyIncreasingVersionChecker(ClusterState initialState) { this.lastState = initialState; } static StrictlyIncreasingVersionChecker bootstrappedWith(ClusterState initialState) { return new StrictlyIncreasingVersionChecker(initialState); } void updateAndVerify(ClusterState currentState) { final ClusterState last = lastState; lastState = currentState; if (currentState.getVersion() <= last.getVersion()) { throw new IllegalStateException( String.format("Cluster state version strict increase invariant broken! " + "Old state was '%s', new state is '%s'", last, currentState)); } } }
class StrictlyIncreasingVersionChecker { private ClusterState lastState; private StrictlyIncreasingVersionChecker(ClusterState initialState) { this.lastState = initialState; } static StrictlyIncreasingVersionChecker bootstrappedWith(ClusterState initialState) { return new StrictlyIncreasingVersionChecker(initialState); } void updateAndVerify(ClusterState currentState) { final ClusterState last = lastState; lastState = currentState; if (currentState.getVersion() <= last.getVersion()) { throw new IllegalStateException( String.format("Cluster state version strict increase invariant broken! " + "Old state was '%s', new state is '%s'", last, currentState)); } } }
double timeoutInSeconds() ?
void testGetMaster() throws Exception { startingTest("MasterElectionTest::testGetMaster"); FleetControllerOptions options = defaultOptions("mycluster"); options.masterZooKeeperCooldownPeriod = 3600 * 1000; setUpFleetController(3, true, options); waitForMaster(0); supervisor = new Supervisor(new Transport()); List<Target> connections = new ArrayList<>(); for (FleetController fleetController : fleetControllers) { int rpcPort = fleetController.getRpcPort(); Target connection = supervisor.connect(new Spec("localhost", rpcPort)); assertTrue(connection.isValid()); connections.add(connection); } timer.advanceTime(24 * 3600 * 1000); waitForCompleteCycles(); Request req = new Request("getMaster"); for (int nodeIndex = 0; nodeIndex < 3; ++nodeIndex) { for (int retry = 0; retry < timeout().getSeconds() * 10; ++retry) { req = new Request("getMaster"); connections.get(nodeIndex).invokeSync(req, timeoutInSeconds()); assertFalse(req.isError(), req.errorMessage()); if (req.returnValues().get(0).asInt32() == 0 && req.returnValues().get(1).asString().equals("All 3 nodes agree that 0 is current master.")) { break; } } assertEquals(0, req.returnValues().get(0).asInt32(), req.toString()); assertEquals("All 3 nodes agree that 0 is current master.", req.returnValues().get(1).asString(), req.toString()); } log.log(Level.INFO, "SHUTTING DOWN FLEET CONTROLLER 0"); fleetControllers.get(0).shutdown(); waitForCompleteCycle(1); waitForCompleteCycle(2); timer.advanceTime(300 * 1000); int[] remainingNodes = {1, 2}; waitForMasterReason( "2 of 3 nodes agree 1 should be master, but old master cooldown period of 3600000 ms has not passed yet. To ensure it has got time to realize it is no longer master before we elect a new one, currently there is no master.", -1, connections, remainingNodes); assertFalse(fleetControllers.get(1).isMaster()); timer.advanceTime(3600 * 1000); waitForMaster(1); req = new Request("getMaster"); connections.get(0).invokeSync(req, timeoutInSeconds()); assertEquals(104, req.errorCode(), req.toString()); assertEquals("Connection error", req.errorMessage(), req.toString()); for (int i = 0; i < timeout().getSeconds() * 10; ++i) { req = new Request("getMaster"); connections.get(1).invokeSync(req, timeoutInSeconds()); assertFalse(req.isError(), req.errorMessage()); if (req.returnValues().get(0).asInt32() != -1) break; } assertEquals(1, req.returnValues().get(0).asInt32(), req.toString()); assertEquals("2 of 3 nodes agree 1 is master.", req.returnValues().get(1).asString(), req.toString()); for (int i = 0; i < timeout().getSeconds() * 10; ++i) { req = new Request("getMaster"); connections.get(2).invokeSync(req, timeoutInSeconds()); assertFalse(req.isError(), req.errorMessage()); if (req.returnValues().get(0).asInt32() != -1) break; } assertEquals(1, req.returnValues().get(0).asInt32(), req.toString()); assertEquals("2 of 3 nodes agree 1 is master.", req.returnValues().get(1).asString(), req.toString()); }
for (int retry = 0; retry < timeout().getSeconds() * 10; ++retry) {
void testGetMaster() throws Exception { startingTest("MasterElectionTest::testGetMaster"); FleetControllerOptions options = defaultOptions("mycluster"); options.masterZooKeeperCooldownPeriod = 3600 * 1000; setUpFleetController(3, true, options); waitForMaster(0); supervisor = new Supervisor(new Transport()); List<Target> connections = new ArrayList<>(); for (FleetController fleetController : fleetControllers) { int rpcPort = fleetController.getRpcPort(); Target connection = supervisor.connect(new Spec("localhost", rpcPort)); assertTrue(connection.isValid()); connections.add(connection); } timer.advanceTime(24 * 3600 * 1000); waitForCompleteCycles(); Request req = new Request("getMaster"); for (int nodeIndex = 0; nodeIndex < 3; ++nodeIndex) { for (int retry = 0; retry < timeoutInSeconds() * 10; ++retry) { req = new Request("getMaster"); connections.get(nodeIndex).invokeSync(req, timeoutInSeconds()); assertFalse(req.isError(), req.errorMessage()); if (req.returnValues().get(0).asInt32() == 0 && req.returnValues().get(1).asString().equals("All 3 nodes agree that 0 is current master.")) { break; } } assertEquals(0, req.returnValues().get(0).asInt32(), req.toString()); assertEquals("All 3 nodes agree that 0 is current master.", req.returnValues().get(1).asString(), req.toString()); } log.log(Level.INFO, "SHUTTING DOWN FLEET CONTROLLER 0"); fleetControllers.get(0).shutdown(); waitForCompleteCycle(1); waitForCompleteCycle(2); timer.advanceTime(300 * 1000); int[] remainingNodes = {1, 2}; waitForMasterReason( "2 of 3 nodes agree 1 should be master, but old master cooldown period of 3600000 ms has not passed yet. To ensure it has got time to realize it is no longer master before we elect a new one, currently there is no master.", -1, connections, remainingNodes); assertFalse(fleetControllers.get(1).isMaster()); timer.advanceTime(3600 * 1000); waitForMaster(1); req = new Request("getMaster"); connections.get(0).invokeSync(req, timeoutInSeconds()); assertEquals(104, req.errorCode(), req.toString()); assertEquals("Connection error", req.errorMessage(), req.toString()); for (int i = 0; i < timeoutInSeconds() * 10; ++i) { req = new Request("getMaster"); connections.get(1).invokeSync(req, timeoutInSeconds()); assertFalse(req.isError(), req.errorMessage()); if (req.returnValues().get(0).asInt32() != -1) break; } assertEquals(1, req.returnValues().get(0).asInt32(), req.toString()); assertEquals("2 of 3 nodes agree 1 is master.", req.returnValues().get(1).asString(), req.toString()); for (int i = 0; i < timeoutInSeconds() * 10; ++i) { req = new Request("getMaster"); connections.get(2).invokeSync(req, timeoutInSeconds()); assertFalse(req.isError(), req.errorMessage()); if (req.returnValues().get(0).asInt32() != -1) break; } assertEquals(1, req.returnValues().get(0).asInt32(), req.toString()); assertEquals("2 of 3 nodes agree 1 is master.", req.returnValues().get(1).asString(), req.toString()); }
class StrictlyIncreasingVersionChecker { private ClusterState lastState; private StrictlyIncreasingVersionChecker(ClusterState initialState) { this.lastState = initialState; } static StrictlyIncreasingVersionChecker bootstrappedWith(ClusterState initialState) { return new StrictlyIncreasingVersionChecker(initialState); } void updateAndVerify(ClusterState currentState) { final ClusterState last = lastState; lastState = currentState; if (currentState.getVersion() <= last.getVersion()) { throw new IllegalStateException( String.format("Cluster state version strict increase invariant broken! " + "Old state was '%s', new state is '%s'", last, currentState)); } } }
class StrictlyIncreasingVersionChecker { private ClusterState lastState; private StrictlyIncreasingVersionChecker(ClusterState initialState) { this.lastState = initialState; } static StrictlyIncreasingVersionChecker bootstrappedWith(ClusterState initialState) { return new StrictlyIncreasingVersionChecker(initialState); } void updateAndVerify(ClusterState currentState) { final ClusterState last = lastState; lastState = currentState; if (currentState.getVersion() <= last.getVersion()) { throw new IllegalStateException( String.format("Cluster state version strict increase invariant broken! " + "Old state was '%s', new state is '%s'", last, currentState)); } } }
double timeoutInSeconds() ?
void testGetMaster() throws Exception { startingTest("MasterElectionTest::testGetMaster"); FleetControllerOptions options = defaultOptions("mycluster"); options.masterZooKeeperCooldownPeriod = 3600 * 1000; setUpFleetController(3, true, options); waitForMaster(0); supervisor = new Supervisor(new Transport()); List<Target> connections = new ArrayList<>(); for (FleetController fleetController : fleetControllers) { int rpcPort = fleetController.getRpcPort(); Target connection = supervisor.connect(new Spec("localhost", rpcPort)); assertTrue(connection.isValid()); connections.add(connection); } timer.advanceTime(24 * 3600 * 1000); waitForCompleteCycles(); Request req = new Request("getMaster"); for (int nodeIndex = 0; nodeIndex < 3; ++nodeIndex) { for (int retry = 0; retry < timeout().getSeconds() * 10; ++retry) { req = new Request("getMaster"); connections.get(nodeIndex).invokeSync(req, timeoutInSeconds()); assertFalse(req.isError(), req.errorMessage()); if (req.returnValues().get(0).asInt32() == 0 && req.returnValues().get(1).asString().equals("All 3 nodes agree that 0 is current master.")) { break; } } assertEquals(0, req.returnValues().get(0).asInt32(), req.toString()); assertEquals("All 3 nodes agree that 0 is current master.", req.returnValues().get(1).asString(), req.toString()); } log.log(Level.INFO, "SHUTTING DOWN FLEET CONTROLLER 0"); fleetControllers.get(0).shutdown(); waitForCompleteCycle(1); waitForCompleteCycle(2); timer.advanceTime(300 * 1000); int[] remainingNodes = {1, 2}; waitForMasterReason( "2 of 3 nodes agree 1 should be master, but old master cooldown period of 3600000 ms has not passed yet. To ensure it has got time to realize it is no longer master before we elect a new one, currently there is no master.", -1, connections, remainingNodes); assertFalse(fleetControllers.get(1).isMaster()); timer.advanceTime(3600 * 1000); waitForMaster(1); req = new Request("getMaster"); connections.get(0).invokeSync(req, timeoutInSeconds()); assertEquals(104, req.errorCode(), req.toString()); assertEquals("Connection error", req.errorMessage(), req.toString()); for (int i = 0; i < timeout().getSeconds() * 10; ++i) { req = new Request("getMaster"); connections.get(1).invokeSync(req, timeoutInSeconds()); assertFalse(req.isError(), req.errorMessage()); if (req.returnValues().get(0).asInt32() != -1) break; } assertEquals(1, req.returnValues().get(0).asInt32(), req.toString()); assertEquals("2 of 3 nodes agree 1 is master.", req.returnValues().get(1).asString(), req.toString()); for (int i = 0; i < timeout().getSeconds() * 10; ++i) { req = new Request("getMaster"); connections.get(2).invokeSync(req, timeoutInSeconds()); assertFalse(req.isError(), req.errorMessage()); if (req.returnValues().get(0).asInt32() != -1) break; } assertEquals(1, req.returnValues().get(0).asInt32(), req.toString()); assertEquals("2 of 3 nodes agree 1 is master.", req.returnValues().get(1).asString(), req.toString()); }
for (int i = 0; i < timeout().getSeconds() * 10; ++i) {
void testGetMaster() throws Exception { startingTest("MasterElectionTest::testGetMaster"); FleetControllerOptions options = defaultOptions("mycluster"); options.masterZooKeeperCooldownPeriod = 3600 * 1000; setUpFleetController(3, true, options); waitForMaster(0); supervisor = new Supervisor(new Transport()); List<Target> connections = new ArrayList<>(); for (FleetController fleetController : fleetControllers) { int rpcPort = fleetController.getRpcPort(); Target connection = supervisor.connect(new Spec("localhost", rpcPort)); assertTrue(connection.isValid()); connections.add(connection); } timer.advanceTime(24 * 3600 * 1000); waitForCompleteCycles(); Request req = new Request("getMaster"); for (int nodeIndex = 0; nodeIndex < 3; ++nodeIndex) { for (int retry = 0; retry < timeoutInSeconds() * 10; ++retry) { req = new Request("getMaster"); connections.get(nodeIndex).invokeSync(req, timeoutInSeconds()); assertFalse(req.isError(), req.errorMessage()); if (req.returnValues().get(0).asInt32() == 0 && req.returnValues().get(1).asString().equals("All 3 nodes agree that 0 is current master.")) { break; } } assertEquals(0, req.returnValues().get(0).asInt32(), req.toString()); assertEquals("All 3 nodes agree that 0 is current master.", req.returnValues().get(1).asString(), req.toString()); } log.log(Level.INFO, "SHUTTING DOWN FLEET CONTROLLER 0"); fleetControllers.get(0).shutdown(); waitForCompleteCycle(1); waitForCompleteCycle(2); timer.advanceTime(300 * 1000); int[] remainingNodes = {1, 2}; waitForMasterReason( "2 of 3 nodes agree 1 should be master, but old master cooldown period of 3600000 ms has not passed yet. To ensure it has got time to realize it is no longer master before we elect a new one, currently there is no master.", -1, connections, remainingNodes); assertFalse(fleetControllers.get(1).isMaster()); timer.advanceTime(3600 * 1000); waitForMaster(1); req = new Request("getMaster"); connections.get(0).invokeSync(req, timeoutInSeconds()); assertEquals(104, req.errorCode(), req.toString()); assertEquals("Connection error", req.errorMessage(), req.toString()); for (int i = 0; i < timeoutInSeconds() * 10; ++i) { req = new Request("getMaster"); connections.get(1).invokeSync(req, timeoutInSeconds()); assertFalse(req.isError(), req.errorMessage()); if (req.returnValues().get(0).asInt32() != -1) break; } assertEquals(1, req.returnValues().get(0).asInt32(), req.toString()); assertEquals("2 of 3 nodes agree 1 is master.", req.returnValues().get(1).asString(), req.toString()); for (int i = 0; i < timeoutInSeconds() * 10; ++i) { req = new Request("getMaster"); connections.get(2).invokeSync(req, timeoutInSeconds()); assertFalse(req.isError(), req.errorMessage()); if (req.returnValues().get(0).asInt32() != -1) break; } assertEquals(1, req.returnValues().get(0).asInt32(), req.toString()); assertEquals("2 of 3 nodes agree 1 is master.", req.returnValues().get(1).asString(), req.toString()); }
class StrictlyIncreasingVersionChecker { private ClusterState lastState; private StrictlyIncreasingVersionChecker(ClusterState initialState) { this.lastState = initialState; } static StrictlyIncreasingVersionChecker bootstrappedWith(ClusterState initialState) { return new StrictlyIncreasingVersionChecker(initialState); } void updateAndVerify(ClusterState currentState) { final ClusterState last = lastState; lastState = currentState; if (currentState.getVersion() <= last.getVersion()) { throw new IllegalStateException( String.format("Cluster state version strict increase invariant broken! " + "Old state was '%s', new state is '%s'", last, currentState)); } } }
class StrictlyIncreasingVersionChecker { private ClusterState lastState; private StrictlyIncreasingVersionChecker(ClusterState initialState) { this.lastState = initialState; } static StrictlyIncreasingVersionChecker bootstrappedWith(ClusterState initialState) { return new StrictlyIncreasingVersionChecker(initialState); } void updateAndVerify(ClusterState currentState) { final ClusterState last = lastState; lastState = currentState; if (currentState.getVersion() <= last.getVersion()) { throw new IllegalStateException( String.format("Cluster state version strict increase invariant broken! " + "Old state was '%s', new state is '%s'", last, currentState)); } } }
double timeoutInSeconds() ?
void testGetMaster() throws Exception { startingTest("MasterElectionTest::testGetMaster"); FleetControllerOptions options = defaultOptions("mycluster"); options.masterZooKeeperCooldownPeriod = 3600 * 1000; setUpFleetController(3, true, options); waitForMaster(0); supervisor = new Supervisor(new Transport()); List<Target> connections = new ArrayList<>(); for (FleetController fleetController : fleetControllers) { int rpcPort = fleetController.getRpcPort(); Target connection = supervisor.connect(new Spec("localhost", rpcPort)); assertTrue(connection.isValid()); connections.add(connection); } timer.advanceTime(24 * 3600 * 1000); waitForCompleteCycles(); Request req = new Request("getMaster"); for (int nodeIndex = 0; nodeIndex < 3; ++nodeIndex) { for (int retry = 0; retry < timeout().getSeconds() * 10; ++retry) { req = new Request("getMaster"); connections.get(nodeIndex).invokeSync(req, timeoutInSeconds()); assertFalse(req.isError(), req.errorMessage()); if (req.returnValues().get(0).asInt32() == 0 && req.returnValues().get(1).asString().equals("All 3 nodes agree that 0 is current master.")) { break; } } assertEquals(0, req.returnValues().get(0).asInt32(), req.toString()); assertEquals("All 3 nodes agree that 0 is current master.", req.returnValues().get(1).asString(), req.toString()); } log.log(Level.INFO, "SHUTTING DOWN FLEET CONTROLLER 0"); fleetControllers.get(0).shutdown(); waitForCompleteCycle(1); waitForCompleteCycle(2); timer.advanceTime(300 * 1000); int[] remainingNodes = {1, 2}; waitForMasterReason( "2 of 3 nodes agree 1 should be master, but old master cooldown period of 3600000 ms has not passed yet. To ensure it has got time to realize it is no longer master before we elect a new one, currently there is no master.", -1, connections, remainingNodes); assertFalse(fleetControllers.get(1).isMaster()); timer.advanceTime(3600 * 1000); waitForMaster(1); req = new Request("getMaster"); connections.get(0).invokeSync(req, timeoutInSeconds()); assertEquals(104, req.errorCode(), req.toString()); assertEquals("Connection error", req.errorMessage(), req.toString()); for (int i = 0; i < timeout().getSeconds() * 10; ++i) { req = new Request("getMaster"); connections.get(1).invokeSync(req, timeoutInSeconds()); assertFalse(req.isError(), req.errorMessage()); if (req.returnValues().get(0).asInt32() != -1) break; } assertEquals(1, req.returnValues().get(0).asInt32(), req.toString()); assertEquals("2 of 3 nodes agree 1 is master.", req.returnValues().get(1).asString(), req.toString()); for (int i = 0; i < timeout().getSeconds() * 10; ++i) { req = new Request("getMaster"); connections.get(2).invokeSync(req, timeoutInSeconds()); assertFalse(req.isError(), req.errorMessage()); if (req.returnValues().get(0).asInt32() != -1) break; } assertEquals(1, req.returnValues().get(0).asInt32(), req.toString()); assertEquals("2 of 3 nodes agree 1 is master.", req.returnValues().get(1).asString(), req.toString()); }
for (int i = 0; i < timeout().getSeconds() * 10; ++i) {
void testGetMaster() throws Exception { startingTest("MasterElectionTest::testGetMaster"); FleetControllerOptions options = defaultOptions("mycluster"); options.masterZooKeeperCooldownPeriod = 3600 * 1000; setUpFleetController(3, true, options); waitForMaster(0); supervisor = new Supervisor(new Transport()); List<Target> connections = new ArrayList<>(); for (FleetController fleetController : fleetControllers) { int rpcPort = fleetController.getRpcPort(); Target connection = supervisor.connect(new Spec("localhost", rpcPort)); assertTrue(connection.isValid()); connections.add(connection); } timer.advanceTime(24 * 3600 * 1000); waitForCompleteCycles(); Request req = new Request("getMaster"); for (int nodeIndex = 0; nodeIndex < 3; ++nodeIndex) { for (int retry = 0; retry < timeoutInSeconds() * 10; ++retry) { req = new Request("getMaster"); connections.get(nodeIndex).invokeSync(req, timeoutInSeconds()); assertFalse(req.isError(), req.errorMessage()); if (req.returnValues().get(0).asInt32() == 0 && req.returnValues().get(1).asString().equals("All 3 nodes agree that 0 is current master.")) { break; } } assertEquals(0, req.returnValues().get(0).asInt32(), req.toString()); assertEquals("All 3 nodes agree that 0 is current master.", req.returnValues().get(1).asString(), req.toString()); } log.log(Level.INFO, "SHUTTING DOWN FLEET CONTROLLER 0"); fleetControllers.get(0).shutdown(); waitForCompleteCycle(1); waitForCompleteCycle(2); timer.advanceTime(300 * 1000); int[] remainingNodes = {1, 2}; waitForMasterReason( "2 of 3 nodes agree 1 should be master, but old master cooldown period of 3600000 ms has not passed yet. To ensure it has got time to realize it is no longer master before we elect a new one, currently there is no master.", -1, connections, remainingNodes); assertFalse(fleetControllers.get(1).isMaster()); timer.advanceTime(3600 * 1000); waitForMaster(1); req = new Request("getMaster"); connections.get(0).invokeSync(req, timeoutInSeconds()); assertEquals(104, req.errorCode(), req.toString()); assertEquals("Connection error", req.errorMessage(), req.toString()); for (int i = 0; i < timeoutInSeconds() * 10; ++i) { req = new Request("getMaster"); connections.get(1).invokeSync(req, timeoutInSeconds()); assertFalse(req.isError(), req.errorMessage()); if (req.returnValues().get(0).asInt32() != -1) break; } assertEquals(1, req.returnValues().get(0).asInt32(), req.toString()); assertEquals("2 of 3 nodes agree 1 is master.", req.returnValues().get(1).asString(), req.toString()); for (int i = 0; i < timeoutInSeconds() * 10; ++i) { req = new Request("getMaster"); connections.get(2).invokeSync(req, timeoutInSeconds()); assertFalse(req.isError(), req.errorMessage()); if (req.returnValues().get(0).asInt32() != -1) break; } assertEquals(1, req.returnValues().get(0).asInt32(), req.toString()); assertEquals("2 of 3 nodes agree 1 is master.", req.returnValues().get(1).asString(), req.toString()); }
class StrictlyIncreasingVersionChecker { private ClusterState lastState; private StrictlyIncreasingVersionChecker(ClusterState initialState) { this.lastState = initialState; } static StrictlyIncreasingVersionChecker bootstrappedWith(ClusterState initialState) { return new StrictlyIncreasingVersionChecker(initialState); } void updateAndVerify(ClusterState currentState) { final ClusterState last = lastState; lastState = currentState; if (currentState.getVersion() <= last.getVersion()) { throw new IllegalStateException( String.format("Cluster state version strict increase invariant broken! " + "Old state was '%s', new state is '%s'", last, currentState)); } } }
class StrictlyIncreasingVersionChecker { private ClusterState lastState; private StrictlyIncreasingVersionChecker(ClusterState initialState) { this.lastState = initialState; } static StrictlyIncreasingVersionChecker bootstrappedWith(ClusterState initialState) { return new StrictlyIncreasingVersionChecker(initialState); } void updateAndVerify(ClusterState currentState) { final ClusterState last = lastState; lastState = currentState; if (currentState.getVersion() <= last.getVersion()) { throw new IllegalStateException( String.format("Cluster state version strict increase invariant broken! " + "Old state was '%s', new state is '%s'", last, currentState)); } } }
Ideally we should change the type of the `BiMap` to also be `SimplePrincipal`, if someone changes deserialization code, this could stop working again?
public Cloud withDeveloperKey(PublicKey key, Principal principal) { BiMap<PublicKey, Principal> keys = HashBiMap.create(developerKeys); principal = new SimplePrincipal(principal.getName()); if (keys.containsKey(key)) throw new IllegalArgumentException("Key " + KeyUtils.toPem(key) + " is already owned by " + keys.get(key)); if (keys.inverse().containsKey(principal)) throw new IllegalArgumentException(principal + " is already associated with key " + KeyUtils.toPem(keys.inverse().get(principal))); keys.put(key, principal); return new Cloud(name, createdAt, lastLoginInfo, creator, keys, info, tenantSecretStores, archiveAccess, invalidateUserSessionsBefore); }
principal = new SimplePrincipal(principal.getName());
public Cloud withDeveloperKey(PublicKey key, Principal principal) { BiMap<PublicKey, Principal> keys = HashBiMap.create(developerKeys); principal = new SimplePrincipal(principal.getName()); if (keys.containsKey(key)) throw new IllegalArgumentException("Key " + KeyUtils.toPem(key) + " is already owned by " + keys.get(key)); if (keys.inverse().containsKey(principal)) throw new IllegalArgumentException(principal + " is already associated with key " + KeyUtils.toPem(keys.inverse().get(principal))); keys.put(key, principal); return new Cloud(name, createdAt, lastLoginInfo, creator, keys, info, tenantSecretStores, archiveAccess, invalidateUserSessionsBefore); }
class Cloud extends LockedTenant { private final Optional<Principal> creator; private final BiMap<PublicKey, Principal> developerKeys; private final TenantInfo info; private final List<TenantSecretStore> tenantSecretStores; private final ArchiveAccess archiveAccess; private final Optional<Instant> invalidateUserSessionsBefore; private Cloud(TenantName name, Instant createdAt, LastLoginInfo lastLoginInfo, Optional<Principal> creator, BiMap<PublicKey, Principal> developerKeys, TenantInfo info, List<TenantSecretStore> tenantSecretStores, ArchiveAccess archiveAccess, Optional<Instant> invalidateUserSessionsBefore) { super(name, createdAt, lastLoginInfo); this.developerKeys = ImmutableBiMap.copyOf(developerKeys); this.creator = creator; this.info = info; this.tenantSecretStores = tenantSecretStores; this.archiveAccess = archiveAccess; this.invalidateUserSessionsBefore = invalidateUserSessionsBefore; } private Cloud(CloudTenant tenant) { this(tenant.name(), tenant.createdAt(), tenant.lastLoginInfo(), tenant.creator(), tenant.developerKeys(), tenant.info(), tenant.tenantSecretStores(), tenant.archiveAccess(), tenant.invalidateUserSessionsBefore()); } @Override public CloudTenant get() { return new CloudTenant(name, createdAt, lastLoginInfo, creator, developerKeys, info, tenantSecretStores, archiveAccess, invalidateUserSessionsBefore); } public Cloud withoutDeveloperKey(PublicKey key) { BiMap<PublicKey, Principal> keys = HashBiMap.create(developerKeys); keys.remove(key); return new Cloud(name, createdAt, lastLoginInfo, creator, keys, info, tenantSecretStores, archiveAccess, invalidateUserSessionsBefore); } public Cloud withInfo(TenantInfo newInfo) { return new Cloud(name, createdAt, lastLoginInfo, creator, developerKeys, newInfo, tenantSecretStores, archiveAccess, invalidateUserSessionsBefore); } @Override public LockedTenant with(LastLoginInfo lastLoginInfo) { return new Cloud(name, createdAt, lastLoginInfo, creator, developerKeys, info, tenantSecretStores, archiveAccess, invalidateUserSessionsBefore); } public Cloud withSecretStore(TenantSecretStore tenantSecretStore) { ArrayList<TenantSecretStore> secretStores = new ArrayList<>(tenantSecretStores); secretStores.add(tenantSecretStore); return new Cloud(name, createdAt, lastLoginInfo, creator, developerKeys, info, secretStores, archiveAccess, invalidateUserSessionsBefore); } public Cloud withoutSecretStore(TenantSecretStore tenantSecretStore) { ArrayList<TenantSecretStore> secretStores = new ArrayList<>(tenantSecretStores); secretStores.remove(tenantSecretStore); return new Cloud(name, createdAt, lastLoginInfo, creator, developerKeys, info, secretStores, archiveAccess, invalidateUserSessionsBefore); } public Cloud withArchiveAccess(ArchiveAccess archiveAccess) { return new Cloud(name, createdAt, lastLoginInfo, creator, developerKeys, info, tenantSecretStores, archiveAccess, invalidateUserSessionsBefore); } public Cloud withInvalidateUserSessionsBefore(Instant invalidateUserSessionsBefore) { return new Cloud(name, createdAt, lastLoginInfo, creator, developerKeys, info, tenantSecretStores, archiveAccess, Optional.of(invalidateUserSessionsBefore)); } }
class Cloud extends LockedTenant { private final Optional<Principal> creator; private final BiMap<PublicKey, Principal> developerKeys; private final TenantInfo info; private final List<TenantSecretStore> tenantSecretStores; private final ArchiveAccess archiveAccess; private final Optional<Instant> invalidateUserSessionsBefore; private Cloud(TenantName name, Instant createdAt, LastLoginInfo lastLoginInfo, Optional<Principal> creator, BiMap<PublicKey, Principal> developerKeys, TenantInfo info, List<TenantSecretStore> tenantSecretStores, ArchiveAccess archiveAccess, Optional<Instant> invalidateUserSessionsBefore) { super(name, createdAt, lastLoginInfo); this.developerKeys = ImmutableBiMap.copyOf(developerKeys); this.creator = creator; this.info = info; this.tenantSecretStores = tenantSecretStores; this.archiveAccess = archiveAccess; this.invalidateUserSessionsBefore = invalidateUserSessionsBefore; } private Cloud(CloudTenant tenant) { this(tenant.name(), tenant.createdAt(), tenant.lastLoginInfo(), tenant.creator(), tenant.developerKeys(), tenant.info(), tenant.tenantSecretStores(), tenant.archiveAccess(), tenant.invalidateUserSessionsBefore()); } @Override public CloudTenant get() { return new CloudTenant(name, createdAt, lastLoginInfo, creator, developerKeys, info, tenantSecretStores, archiveAccess, invalidateUserSessionsBefore); } public Cloud withoutDeveloperKey(PublicKey key) { BiMap<PublicKey, Principal> keys = HashBiMap.create(developerKeys); keys.remove(key); return new Cloud(name, createdAt, lastLoginInfo, creator, keys, info, tenantSecretStores, archiveAccess, invalidateUserSessionsBefore); } public Cloud withInfo(TenantInfo newInfo) { return new Cloud(name, createdAt, lastLoginInfo, creator, developerKeys, newInfo, tenantSecretStores, archiveAccess, invalidateUserSessionsBefore); } @Override public LockedTenant with(LastLoginInfo lastLoginInfo) { return new Cloud(name, createdAt, lastLoginInfo, creator, developerKeys, info, tenantSecretStores, archiveAccess, invalidateUserSessionsBefore); } public Cloud withSecretStore(TenantSecretStore tenantSecretStore) { ArrayList<TenantSecretStore> secretStores = new ArrayList<>(tenantSecretStores); secretStores.add(tenantSecretStore); return new Cloud(name, createdAt, lastLoginInfo, creator, developerKeys, info, secretStores, archiveAccess, invalidateUserSessionsBefore); } public Cloud withoutSecretStore(TenantSecretStore tenantSecretStore) { ArrayList<TenantSecretStore> secretStores = new ArrayList<>(tenantSecretStores); secretStores.remove(tenantSecretStore); return new Cloud(name, createdAt, lastLoginInfo, creator, developerKeys, info, secretStores, archiveAccess, invalidateUserSessionsBefore); } public Cloud withArchiveAccess(ArchiveAccess archiveAccess) { return new Cloud(name, createdAt, lastLoginInfo, creator, developerKeys, info, tenantSecretStores, archiveAccess, invalidateUserSessionsBefore); } public Cloud withInvalidateUserSessionsBefore(Instant invalidateUserSessionsBefore) { return new Cloud(name, createdAt, lastLoginInfo, creator, developerKeys, info, tenantSecretStores, archiveAccess, Optional.of(invalidateUserSessionsBefore)); } }
Consider using `SimplePrincipal` in the map as well so that the equality contract is obvious.
public Cloud withDeveloperKey(PublicKey key, Principal principal) { BiMap<PublicKey, Principal> keys = HashBiMap.create(developerKeys); principal = new SimplePrincipal(principal.getName()); if (keys.containsKey(key)) throw new IllegalArgumentException("Key " + KeyUtils.toPem(key) + " is already owned by " + keys.get(key)); if (keys.inverse().containsKey(principal)) throw new IllegalArgumentException(principal + " is already associated with key " + KeyUtils.toPem(keys.inverse().get(principal))); keys.put(key, principal); return new Cloud(name, createdAt, lastLoginInfo, creator, keys, info, tenantSecretStores, archiveAccess, invalidateUserSessionsBefore); }
BiMap<PublicKey, Principal> keys = HashBiMap.create(developerKeys);
public Cloud withDeveloperKey(PublicKey key, Principal principal) { BiMap<PublicKey, Principal> keys = HashBiMap.create(developerKeys); principal = new SimplePrincipal(principal.getName()); if (keys.containsKey(key)) throw new IllegalArgumentException("Key " + KeyUtils.toPem(key) + " is already owned by " + keys.get(key)); if (keys.inverse().containsKey(principal)) throw new IllegalArgumentException(principal + " is already associated with key " + KeyUtils.toPem(keys.inverse().get(principal))); keys.put(key, principal); return new Cloud(name, createdAt, lastLoginInfo, creator, keys, info, tenantSecretStores, archiveAccess, invalidateUserSessionsBefore); }
class Cloud extends LockedTenant { private final Optional<Principal> creator; private final BiMap<PublicKey, Principal> developerKeys; private final TenantInfo info; private final List<TenantSecretStore> tenantSecretStores; private final ArchiveAccess archiveAccess; private final Optional<Instant> invalidateUserSessionsBefore; private Cloud(TenantName name, Instant createdAt, LastLoginInfo lastLoginInfo, Optional<Principal> creator, BiMap<PublicKey, Principal> developerKeys, TenantInfo info, List<TenantSecretStore> tenantSecretStores, ArchiveAccess archiveAccess, Optional<Instant> invalidateUserSessionsBefore) { super(name, createdAt, lastLoginInfo); this.developerKeys = ImmutableBiMap.copyOf(developerKeys); this.creator = creator; this.info = info; this.tenantSecretStores = tenantSecretStores; this.archiveAccess = archiveAccess; this.invalidateUserSessionsBefore = invalidateUserSessionsBefore; } private Cloud(CloudTenant tenant) { this(tenant.name(), tenant.createdAt(), tenant.lastLoginInfo(), tenant.creator(), tenant.developerKeys(), tenant.info(), tenant.tenantSecretStores(), tenant.archiveAccess(), tenant.invalidateUserSessionsBefore()); } @Override public CloudTenant get() { return new CloudTenant(name, createdAt, lastLoginInfo, creator, developerKeys, info, tenantSecretStores, archiveAccess, invalidateUserSessionsBefore); } public Cloud withoutDeveloperKey(PublicKey key) { BiMap<PublicKey, Principal> keys = HashBiMap.create(developerKeys); keys.remove(key); return new Cloud(name, createdAt, lastLoginInfo, creator, keys, info, tenantSecretStores, archiveAccess, invalidateUserSessionsBefore); } public Cloud withInfo(TenantInfo newInfo) { return new Cloud(name, createdAt, lastLoginInfo, creator, developerKeys, newInfo, tenantSecretStores, archiveAccess, invalidateUserSessionsBefore); } @Override public LockedTenant with(LastLoginInfo lastLoginInfo) { return new Cloud(name, createdAt, lastLoginInfo, creator, developerKeys, info, tenantSecretStores, archiveAccess, invalidateUserSessionsBefore); } public Cloud withSecretStore(TenantSecretStore tenantSecretStore) { ArrayList<TenantSecretStore> secretStores = new ArrayList<>(tenantSecretStores); secretStores.add(tenantSecretStore); return new Cloud(name, createdAt, lastLoginInfo, creator, developerKeys, info, secretStores, archiveAccess, invalidateUserSessionsBefore); } public Cloud withoutSecretStore(TenantSecretStore tenantSecretStore) { ArrayList<TenantSecretStore> secretStores = new ArrayList<>(tenantSecretStores); secretStores.remove(tenantSecretStore); return new Cloud(name, createdAt, lastLoginInfo, creator, developerKeys, info, secretStores, archiveAccess, invalidateUserSessionsBefore); } public Cloud withArchiveAccess(ArchiveAccess archiveAccess) { return new Cloud(name, createdAt, lastLoginInfo, creator, developerKeys, info, tenantSecretStores, archiveAccess, invalidateUserSessionsBefore); } public Cloud withInvalidateUserSessionsBefore(Instant invalidateUserSessionsBefore) { return new Cloud(name, createdAt, lastLoginInfo, creator, developerKeys, info, tenantSecretStores, archiveAccess, Optional.of(invalidateUserSessionsBefore)); } }
class Cloud extends LockedTenant { private final Optional<Principal> creator; private final BiMap<PublicKey, Principal> developerKeys; private final TenantInfo info; private final List<TenantSecretStore> tenantSecretStores; private final ArchiveAccess archiveAccess; private final Optional<Instant> invalidateUserSessionsBefore; private Cloud(TenantName name, Instant createdAt, LastLoginInfo lastLoginInfo, Optional<Principal> creator, BiMap<PublicKey, Principal> developerKeys, TenantInfo info, List<TenantSecretStore> tenantSecretStores, ArchiveAccess archiveAccess, Optional<Instant> invalidateUserSessionsBefore) { super(name, createdAt, lastLoginInfo); this.developerKeys = ImmutableBiMap.copyOf(developerKeys); this.creator = creator; this.info = info; this.tenantSecretStores = tenantSecretStores; this.archiveAccess = archiveAccess; this.invalidateUserSessionsBefore = invalidateUserSessionsBefore; } private Cloud(CloudTenant tenant) { this(tenant.name(), tenant.createdAt(), tenant.lastLoginInfo(), tenant.creator(), tenant.developerKeys(), tenant.info(), tenant.tenantSecretStores(), tenant.archiveAccess(), tenant.invalidateUserSessionsBefore()); } @Override public CloudTenant get() { return new CloudTenant(name, createdAt, lastLoginInfo, creator, developerKeys, info, tenantSecretStores, archiveAccess, invalidateUserSessionsBefore); } public Cloud withoutDeveloperKey(PublicKey key) { BiMap<PublicKey, Principal> keys = HashBiMap.create(developerKeys); keys.remove(key); return new Cloud(name, createdAt, lastLoginInfo, creator, keys, info, tenantSecretStores, archiveAccess, invalidateUserSessionsBefore); } public Cloud withInfo(TenantInfo newInfo) { return new Cloud(name, createdAt, lastLoginInfo, creator, developerKeys, newInfo, tenantSecretStores, archiveAccess, invalidateUserSessionsBefore); } @Override public LockedTenant with(LastLoginInfo lastLoginInfo) { return new Cloud(name, createdAt, lastLoginInfo, creator, developerKeys, info, tenantSecretStores, archiveAccess, invalidateUserSessionsBefore); } public Cloud withSecretStore(TenantSecretStore tenantSecretStore) { ArrayList<TenantSecretStore> secretStores = new ArrayList<>(tenantSecretStores); secretStores.add(tenantSecretStore); return new Cloud(name, createdAt, lastLoginInfo, creator, developerKeys, info, secretStores, archiveAccess, invalidateUserSessionsBefore); } public Cloud withoutSecretStore(TenantSecretStore tenantSecretStore) { ArrayList<TenantSecretStore> secretStores = new ArrayList<>(tenantSecretStores); secretStores.remove(tenantSecretStore); return new Cloud(name, createdAt, lastLoginInfo, creator, developerKeys, info, secretStores, archiveAccess, invalidateUserSessionsBefore); } public Cloud withArchiveAccess(ArchiveAccess archiveAccess) { return new Cloud(name, createdAt, lastLoginInfo, creator, developerKeys, info, tenantSecretStores, archiveAccess, invalidateUserSessionsBefore); } public Cloud withInvalidateUserSessionsBefore(Instant invalidateUserSessionsBefore) { return new Cloud(name, createdAt, lastLoginInfo, creator, developerKeys, info, tenantSecretStores, archiveAccess, Optional.of(invalidateUserSessionsBefore)); } }
Consider adding info logging whenever RPC port changes (if not already present)
private void watchPortChange() { Subscriber subscriber = subscriberFactory.getSubscriber(Collections.singleton(new ConfigKey<>(QrConfig.class, configId)), "portWatcher"); try { while (true) { subscriber.waitNextGeneration(false); if (first(subscriber.config().values()) instanceof QrConfig newConfig) { reconfigure(newConfig.shutdown()); synchronized (this) { if (qrConfig.rpc().port() != newConfig.rpc().port()) { try { reListenRpc(newConfig); } catch (Throwable e) { com.yahoo.protect.Process.logAndDie("Rpc port config has changed from " + qrConfig.rpc().port() + " to " + newConfig.rpc().port() + ", and we were not able to reconfigure so we will just bail out and restart.", e); } } qrConfig = newConfig; } log.fine("Received new QrConfig :" + newConfig); } } } finally { subscriber.close(); } }
reListenRpc(newConfig);
private void watchPortChange() { Subscriber subscriber = subscriberFactory.getSubscriber(Collections.singleton(new ConfigKey<>(QrConfig.class, configId)), "portWatcher"); try { while (true) { subscriber.waitNextGeneration(false); if (first(subscriber.config().values()) instanceof QrConfig newConfig) { reconfigure(newConfig.shutdown()); synchronized (this) { if (qrConfig.rpc().port() != newConfig.rpc().port()) { log.log(Level.INFO, "Rpc port changed from " + qrConfig.rpc().port() + " to " + newConfig.rpc().port()); try { reListenRpc(newConfig); } catch (Throwable e) { com.yahoo.protect.Process.logAndDie("Rpc port config has changed from " + qrConfig.rpc().port() + " to " + newConfig.rpc().port() + ", and we were not able to reconfigure so we will just bail out and restart.", e); } } qrConfig = newConfig; } log.fine("Received new QrConfig :" + newConfig); } } } finally { subscriber.close(); } }
class to be loaded, * which runs the static block. */ @SuppressWarnings("UnusedDeclaration") public static void ensureVespaLoggingInitialized() { }
class to be loaded, * which runs the static block. */ @SuppressWarnings("UnusedDeclaration") public static void ensureVespaLoggingInitialized() { }
Done
private void watchPortChange() { Subscriber subscriber = subscriberFactory.getSubscriber(Collections.singleton(new ConfigKey<>(QrConfig.class, configId)), "portWatcher"); try { while (true) { subscriber.waitNextGeneration(false); if (first(subscriber.config().values()) instanceof QrConfig newConfig) { reconfigure(newConfig.shutdown()); synchronized (this) { if (qrConfig.rpc().port() != newConfig.rpc().port()) { try { reListenRpc(newConfig); } catch (Throwable e) { com.yahoo.protect.Process.logAndDie("Rpc port config has changed from " + qrConfig.rpc().port() + " to " + newConfig.rpc().port() + ", and we were not able to reconfigure so we will just bail out and restart.", e); } } qrConfig = newConfig; } log.fine("Received new QrConfig :" + newConfig); } } } finally { subscriber.close(); } }
reListenRpc(newConfig);
private void watchPortChange() { Subscriber subscriber = subscriberFactory.getSubscriber(Collections.singleton(new ConfigKey<>(QrConfig.class, configId)), "portWatcher"); try { while (true) { subscriber.waitNextGeneration(false); if (first(subscriber.config().values()) instanceof QrConfig newConfig) { reconfigure(newConfig.shutdown()); synchronized (this) { if (qrConfig.rpc().port() != newConfig.rpc().port()) { log.log(Level.INFO, "Rpc port changed from " + qrConfig.rpc().port() + " to " + newConfig.rpc().port()); try { reListenRpc(newConfig); } catch (Throwable e) { com.yahoo.protect.Process.logAndDie("Rpc port config has changed from " + qrConfig.rpc().port() + " to " + newConfig.rpc().port() + ", and we were not able to reconfigure so we will just bail out and restart.", e); } } qrConfig = newConfig; } log.fine("Received new QrConfig :" + newConfig); } } } finally { subscriber.close(); } }
class to be loaded, * which runs the static block. */ @SuppressWarnings("UnusedDeclaration") public static void ensureVespaLoggingInitialized() { }
class to be loaded, * which runs the static block. */ @SuppressWarnings("UnusedDeclaration") public static void ensureVespaLoggingInitialized() { }
... lest apps built against, e.g., major 8, will be eligible for "upgrade" to 7, as long as they have _no_ deployments at all ...
public InstanceList compatibleWithPlatform(Version platform, Function<ApplicationId, VersionCompatibility> compatibility) { return matching(id -> instance(id).productionDeployments().values().stream() .flatMap(deployment -> application(id).revisions().get(deployment.revision()).compileVersion().stream()) .noneMatch(version -> compatibility.apply(id).refuse(platform, version)) && application(id).revisions().production().stream() .anyMatch(revision -> revision.compileVersion() .map(compiled -> compatibility.apply(id).accept(platform, compiled)) .orElse(true))); }
.orElse(true)));
public InstanceList compatibleWithPlatform(Version platform, Function<ApplicationId, VersionCompatibility> compatibility) { return matching(id -> instance(id).productionDeployments().values().stream() .flatMap(deployment -> application(id).revisions().get(deployment.revision()).compileVersion().stream()) .noneMatch(version -> compatibility.apply(id).refuse(platform, version)) && application(id).revisions().production().stream() .anyMatch(revision -> revision.compileVersion() .map(compiled -> compatibility.apply(id).accept(platform, compiled)) .orElse(true))); }
class InstanceList extends AbstractFilteringList<ApplicationId, InstanceList> { private final Map<ApplicationId, DeploymentStatus> instances; private InstanceList(Collection<? extends ApplicationId> items, boolean negate, Map<ApplicationId, DeploymentStatus> instances) { super(items, negate, (i, n) -> new InstanceList(i, n, instances)); this.instances = Map.copyOf(instances); } /** * Returns the subset of instances where all production deployments are compatible with the given version, * and at least one known build is compatible with the given version. * * @param platform the version which applications returned are compatible with */ /** * Returns the subset of instances that aren't pinned to an earlier major version than the given one. * * @param targetMajorVersion the target major version which applications returned allows upgrading to * @param defaultMajorVersion the default major version to assume for applications not specifying one */ public InstanceList allowingMajorVersion(int targetMajorVersion, int defaultMajorVersion) { return matching(id -> targetMajorVersion <= application(id).deploymentSpec().majorVersion() .orElse(application(id).majorVersion() .orElse(defaultMajorVersion))); } /** Returns the subset of instances that are allowed to upgrade to the given version at the given time */ public InstanceList canUpgradeAt(Version version, Instant instant) { return matching(id -> instances.get(id).instanceSteps().get(id.instance()) .readyAt(Change.of(version)) .map(readyAt -> ! readyAt.isAfter(instant)).orElse(false)); } /** Returns the subset of instances which have at least one production deployment */ public InstanceList withProductionDeployment() { return matching(id -> instance(id).productionDeployments().size() > 0); } /** Returns the subset of instances which contain declared jobs */ public InstanceList withDeclaredJobs() { return matching(id -> instances.get(id).application().revisions().last().isPresent() && instances.get(id).jobSteps().values().stream() .anyMatch(job -> job.isDeclared() && job.job().get().application().equals(id))); } /** Returns the subset of instances which have at least one deployment on a lower version than the given one, or which have no production deployments */ public InstanceList onLowerVersionThan(Version version) { return matching(id -> instance(id).productionDeployments().isEmpty() || instance(id).productionDeployments().values().stream() .anyMatch(deployment -> deployment.version().isBefore(version))); } /** Returns the subset of instances that has completed deployment of given change */ public InstanceList hasCompleted(Change change) { return matching(id -> instances.get(id).hasCompleted(id.instance(), change)); } /** Returns the subset of instances which are currently deploying a change */ public InstanceList deploying() { return matching(id -> instance(id).change().hasTargets()); } /** Returns the subset of instances which are currently deploying a new revision */ public InstanceList changingRevision() { return matching(id -> instance(id).change().revision().isPresent()); } /** Returns the subset of instances which currently have failing jobs on the given version */ public InstanceList failingOn(Version version) { return matching(id -> ! instances.get(id).instanceJobs().get(id).failingHard() .lastCompleted().on(version).isEmpty()); } /** Returns the subset of instances which are not pinned to a certain Vespa version. */ public InstanceList unpinned() { return matching(id -> ! instance(id).change().isPinned()); } /** Returns the subset of instances which are currently failing a job. */ public InstanceList failing() { return matching(id -> ! instances.get(id).instanceJobs().get(id).failingHard().isEmpty()); } /** Returns the subset of instances which are currently failing an upgrade. */ public InstanceList failingUpgrade() { return matching(id -> ! instances.get(id).instanceJobs().get(id).failingHard().not().failingApplicationChange().isEmpty()); } /** Returns the subset of instances which are upgrading (to any version), not considering block windows. */ public InstanceList upgrading() { return matching(id -> instance(id).change().platform().isPresent()); } /** Returns the subset of instances which are currently upgrading to the given version */ public InstanceList upgradingTo(Version version) { return upgradingTo(List.of(version)); } /** Returns the subset of instances which are currently upgrading to the given version */ public InstanceList upgradingTo(Collection<Version> versions) { return matching(id -> versions.stream().anyMatch(version -> instance(id).change().platform().equals(Optional.of(version)))); } public InstanceList with(DeploymentSpec.UpgradePolicy policy) { return matching(id -> application(id).deploymentSpec().requireInstance(id.instance()).upgradePolicy() == policy); } /** Returns the subset of instances which started failing on the given version */ public InstanceList startedFailingOn(Version version) { return matching(id -> ! instances.get(id).instanceJobs().get(id).firstFailing().on(version).isEmpty()); } /** Returns this list sorted by increasing oldest production deployment version. Applications without any deployments are ordered first. */ public InstanceList byIncreasingDeployedVersion() { return sortedBy(comparing(id -> instance(id).productionDeployments().values().stream() .map(Deployment::version) .min(naturalOrder()) .orElse(Version.emptyVersion))); } private Application application(ApplicationId id) { return instances.get(id).application(); } private Instance instance(ApplicationId id) { return application(id).require(id.instance()); } public static InstanceList from(DeploymentStatusList statuses) { Map<ApplicationId, DeploymentStatus> instances = new HashMap<>(); for (DeploymentStatus status : statuses.asList()) for (InstanceName instance : status.application().deploymentSpec().instanceNames()) instances.put(status.application().id().instance(instance), status); return new InstanceList(instances.keySet(), false, instances); } }
class InstanceList extends AbstractFilteringList<ApplicationId, InstanceList> { private final Map<ApplicationId, DeploymentStatus> instances; private InstanceList(Collection<? extends ApplicationId> items, boolean negate, Map<ApplicationId, DeploymentStatus> instances) { super(items, negate, (i, n) -> new InstanceList(i, n, instances)); this.instances = Map.copyOf(instances); } /** * Returns the subset of instances where all production deployments are compatible with the given version, * and at least one known build is compatible with the given version. * * @param platform the version which applications returned are compatible with */ /** * Returns the subset of instances that aren't pinned to an earlier major version than the given one. * * @param targetMajorVersion the target major version which applications returned allows upgrading to * @param defaultMajorVersion the default major version to assume for applications not specifying one */ public InstanceList allowingMajorVersion(int targetMajorVersion, int defaultMajorVersion) { return matching(id -> targetMajorVersion <= application(id).deploymentSpec().majorVersion() .orElse(application(id).majorVersion() .orElse(defaultMajorVersion))); } /** Returns the subset of instances that are allowed to upgrade to the given version at the given time */ public InstanceList canUpgradeAt(Version version, Instant instant) { return matching(id -> instances.get(id).instanceSteps().get(id.instance()) .readyAt(Change.of(version)) .map(readyAt -> ! readyAt.isAfter(instant)).orElse(false)); } /** Returns the subset of instances which have at least one production deployment */ public InstanceList withProductionDeployment() { return matching(id -> instance(id).productionDeployments().size() > 0); } /** Returns the subset of instances which contain declared jobs */ public InstanceList withDeclaredJobs() { return matching(id -> instances.get(id).application().revisions().last().isPresent() && instances.get(id).jobSteps().values().stream() .anyMatch(job -> job.isDeclared() && job.job().get().application().equals(id))); } /** Returns the subset of instances which have at least one deployment on a lower version than the given one, or which have no production deployments */ public InstanceList onLowerVersionThan(Version version) { return matching(id -> instance(id).productionDeployments().isEmpty() || instance(id).productionDeployments().values().stream() .anyMatch(deployment -> deployment.version().isBefore(version))); } /** Returns the subset of instances that has completed deployment of given change */ public InstanceList hasCompleted(Change change) { return matching(id -> instances.get(id).hasCompleted(id.instance(), change)); } /** Returns the subset of instances which are currently deploying a change */ public InstanceList deploying() { return matching(id -> instance(id).change().hasTargets()); } /** Returns the subset of instances which are currently deploying a new revision */ public InstanceList changingRevision() { return matching(id -> instance(id).change().revision().isPresent()); } /** Returns the subset of instances which currently have failing jobs on the given version */ public InstanceList failingOn(Version version) { return matching(id -> ! instances.get(id).instanceJobs().get(id).failingHard() .lastCompleted().on(version).isEmpty()); } /** Returns the subset of instances which are not pinned to a certain Vespa version. */ public InstanceList unpinned() { return matching(id -> ! instance(id).change().isPinned()); } /** Returns the subset of instances which are currently failing a job. */ public InstanceList failing() { return matching(id -> ! instances.get(id).instanceJobs().get(id).failingHard().isEmpty()); } /** Returns the subset of instances which are currently failing an upgrade. */ public InstanceList failingUpgrade() { return matching(id -> ! instances.get(id).instanceJobs().get(id).failingHard().not().failingApplicationChange().isEmpty()); } /** Returns the subset of instances which are upgrading (to any version), not considering block windows. */ public InstanceList upgrading() { return matching(id -> instance(id).change().platform().isPresent()); } /** Returns the subset of instances which are currently upgrading to the given version */ public InstanceList upgradingTo(Version version) { return upgradingTo(List.of(version)); } /** Returns the subset of instances which are currently upgrading to the given version */ public InstanceList upgradingTo(Collection<Version> versions) { return matching(id -> versions.stream().anyMatch(version -> instance(id).change().platform().equals(Optional.of(version)))); } public InstanceList with(DeploymentSpec.UpgradePolicy policy) { return matching(id -> application(id).deploymentSpec().requireInstance(id.instance()).upgradePolicy() == policy); } /** Returns the subset of instances which started failing on the given version */ public InstanceList startedFailingOn(Version version) { return matching(id -> ! instances.get(id).instanceJobs().get(id).firstFailing().on(version).isEmpty()); } /** Returns this list sorted by increasing oldest production deployment version. Applications without any deployments are ordered first. */ public InstanceList byIncreasingDeployedVersion() { return sortedBy(comparing(id -> instance(id).productionDeployments().values().stream() .map(Deployment::version) .min(naturalOrder()) .orElse(Version.emptyVersion))); } private Application application(ApplicationId id) { return instances.get(id).application(); } private Instance instance(ApplicationId id) { return application(id).require(id.instance()); } public static InstanceList from(DeploymentStatusList statuses) { Map<ApplicationId, DeploymentStatus> instances = new HashMap<>(); for (DeploymentStatus status : statuses.asList()) for (InstanceName instance : status.application().deploymentSpec().instanceNames()) instances.put(status.application().id().instance(instance), status); return new InstanceList(instances.keySet(), false, instances); } }
```suggestion return Duration.ofSeconds((long)(decayer.averageSearchTime())); ```
Duration averageSearchTime() { return Duration.ofNanos((long)(decayer.averageSearchTime()*1_000_000_000)); }
return Duration.ofNanos((long)(decayer.averageSearchTime()*1_000_000_000));
Duration averageSearchTime() { return fromDouble(averageSearchTime);}
class NoDecay implements Decayer { public void decay(RequestDuration duration) {} public double averageSearchTime() { return MIN_QUERY_TIME; } }
class DecayByRequests implements GroupStatus.Decayer { private long queries; private double averageSearchTime; DecayByRequests() { this(0, INITIAL_QUERY_TIME); } DecayByRequests(long initialQueries, Duration initialSearchTime) { queries = initialQueries; averageSearchTime = toDouble(initialSearchTime); } public void decay(RequestDuration duration) { double searchTime = Math.max(toDouble(duration.duration()), MIN_QUERY_TIME); double decayRate = Math.min(queries + MIN_LATENCY_DECAY_RATE, DEFAULT_LATENCY_DECAY_RATE); queries++; averageSearchTime = (searchTime + (decayRate - 1) * averageSearchTime) / decayRate; } public double averageCost() { return averageSearchTime; } }
```suggestion return duration.toSeconds(); ```
private static double toDouble(Duration duration) { return duration.toNanos()/1_000_000_000.0; }
return duration.toNanos()/1_000_000_000.0;
private static double toDouble(Duration duration) { return duration.toNanos()/1_000_000_000.0; }
class AdaptiveScheduler implements GroupScheduler { enum Type {TIME, REQUESTS} private final Random random; private final List<GroupStatus> scoreboard; static class DecayByRequests implements GroupStatus.Decayer { private long queries; private double averageSearchTime; DecayByRequests() { this(0, INITIAL_QUERY_TIME); } DecayByRequests(long initialQueries, Duration initialSearchTime) { queries = initialQueries; averageSearchTime = toDouble(initialSearchTime); } public void decay(RequestDuration duration) { double searchTime = Math.max(toDouble(duration.duration()), MIN_QUERY_TIME); double decayRate = Math.min(queries + MIN_LATENCY_DECAY_RATE, DEFAULT_LATENCY_DECAY_RATE); queries++; averageSearchTime = (searchTime + (decayRate - 1) * averageSearchTime) / decayRate; } public double averageSearchTime() { return averageSearchTime; } } static class DecayByTime implements GroupStatus.Decayer { private double averageSearchTime; private RequestDuration prev; DecayByTime() { this(INITIAL_QUERY_TIME, RequestDuration.of(Duration.ZERO)); } DecayByTime(Duration initialSearchTime, RequestDuration start) { averageSearchTime = toDouble(initialSearchTime); prev = start; } public void decay(RequestDuration duration) { double searchTime = Math.max(duration.duration().toMillis()/1000.0, MIN_QUERY_TIME); double decayRate = LATENCY_DECAY_TIME; double sampleWeight = Math.min(decayRate/2, toDouble(duration.timeSince(prev))); averageSearchTime = (sampleWeight*searchTime + (decayRate - sampleWeight) * averageSearchTime) / decayRate; prev = duration; } public double averageSearchTime() { return averageSearchTime; } } public AdaptiveScheduler(Type type, Random random, List<GroupStatus> scoreboard) { this.random = random; this.scoreboard = scoreboard; for (GroupStatus gs : scoreboard) { gs.setDecayer(type == Type.REQUESTS ? new DecayByRequests() : new DecayByTime()); } } private Optional<GroupStatus> selectGroup(double needle, boolean requireCoverage, Set<Integer> rejected) { double sum = 0; int n = 0; for (GroupStatus gs : scoreboard) { if (rejected == null || !rejected.contains(gs.group.id())) { if (!requireCoverage || gs.group.hasSufficientCoverage()) { sum += gs.averageSearchTimeInverse(); n++; } } } if (n == 0) { return Optional.empty(); } double accum = 0; for (GroupStatus gs : scoreboard) { if (rejected == null || !rejected.contains(gs.group.id())) { if (!requireCoverage || gs.group.hasSufficientCoverage()) { accum += gs.averageSearchTimeInverse(); if (needle < accum / sum) { return Optional.of(gs); } } } } return Optional.empty(); } @Override public Optional<GroupStatus> takeNextGroup(Set<Integer> rejectedGroups) { double needle = random.nextDouble(); Optional<GroupStatus> gs = selectGroup(needle, true, rejectedGroups); if (gs.isPresent()) return gs; return selectGroup(needle, false, rejectedGroups); } }
class AdaptiveScheduler implements GroupScheduler { enum Type {TIME, REQUESTS} private final Random random; private final List<GroupStatus> scoreboard; private static Duration fromDouble(double seconds) { return Duration.ofNanos((long)(seconds*1_000_000_000));} static class DecayByRequests implements GroupStatus.Decayer { private long queries; private double averageSearchTime; DecayByRequests() { this(0, INITIAL_QUERY_TIME); } DecayByRequests(long initialQueries, Duration initialSearchTime) { queries = initialQueries; averageSearchTime = toDouble(initialSearchTime); } public void decay(RequestDuration duration) { double searchTime = Math.max(toDouble(duration.duration()), MIN_QUERY_TIME); double decayRate = Math.min(queries + MIN_LATENCY_DECAY_RATE, DEFAULT_LATENCY_DECAY_RATE); queries++; averageSearchTime = (searchTime + (decayRate - 1) * averageSearchTime) / decayRate; } public double averageCost() { return averageSearchTime; } Duration averageSearchTime() { return fromDouble(averageSearchTime);} } static class DecayByTime implements GroupStatus.Decayer { private double averageSearchTime; private RequestDuration prev; DecayByTime() { this(INITIAL_QUERY_TIME, RequestDuration.of(Duration.ZERO)); } DecayByTime(Duration initialSearchTime, RequestDuration start) { averageSearchTime = toDouble(initialSearchTime); prev = start; } public void decay(RequestDuration duration) { double searchTime = Math.max(toDouble(duration.duration()), MIN_QUERY_TIME); double decayRate = LATENCY_DECAY_TIME; double sampleWeight = Math.min(decayRate/2, toDouble(duration.difference(prev))); averageSearchTime = (sampleWeight*searchTime + (decayRate - sampleWeight) * averageSearchTime) / decayRate; prev = duration; } public double averageCost() { return averageSearchTime; } Duration averageSearchTime() { return fromDouble(averageSearchTime);} } public AdaptiveScheduler(Type type, Random random, List<GroupStatus> scoreboard) { this.random = random; this.scoreboard = scoreboard; for (GroupStatus gs : scoreboard) { gs.setDecayer(type == Type.REQUESTS ? new DecayByRequests() : new DecayByTime()); } } private Optional<GroupStatus> selectGroup(double needle, boolean requireCoverage, Set<Integer> rejected) { double sum = 0; int n = 0; for (GroupStatus gs : scoreboard) { if (rejected == null || !rejected.contains(gs.group.id())) { if (!requireCoverage || gs.group.hasSufficientCoverage()) { sum += gs.weight(); n++; } } } if (n == 0) { return Optional.empty(); } double accum = 0; for (GroupStatus gs : scoreboard) { if (rejected == null || !rejected.contains(gs.group.id())) { if (!requireCoverage || gs.group.hasSufficientCoverage()) { accum += gs.weight(); if (needle < accum / sum) { return Optional.of(gs); } } } } return Optional.empty(); } @Override public Optional<GroupStatus> takeNextGroup(Set<Integer> rejectedGroups) { double needle = random.nextDouble(); Optional<GroupStatus> gs = selectGroup(needle, true, rejectedGroups); if (gs.isPresent()) return gs; return selectGroup(needle, false, rejectedGroups); } }
No can do, This only gives number of whole seconds. Needs exact time as a floating point number in seconds.
private static double toDouble(Duration duration) { return duration.toNanos()/1_000_000_000.0; }
return duration.toNanos()/1_000_000_000.0;
private static double toDouble(Duration duration) { return duration.toNanos()/1_000_000_000.0; }
class AdaptiveScheduler implements GroupScheduler { enum Type {TIME, REQUESTS} private final Random random; private final List<GroupStatus> scoreboard; static class DecayByRequests implements GroupStatus.Decayer { private long queries; private double averageSearchTime; DecayByRequests() { this(0, INITIAL_QUERY_TIME); } DecayByRequests(long initialQueries, Duration initialSearchTime) { queries = initialQueries; averageSearchTime = toDouble(initialSearchTime); } public void decay(RequestDuration duration) { double searchTime = Math.max(toDouble(duration.duration()), MIN_QUERY_TIME); double decayRate = Math.min(queries + MIN_LATENCY_DECAY_RATE, DEFAULT_LATENCY_DECAY_RATE); queries++; averageSearchTime = (searchTime + (decayRate - 1) * averageSearchTime) / decayRate; } public double averageSearchTime() { return averageSearchTime; } } static class DecayByTime implements GroupStatus.Decayer { private double averageSearchTime; private RequestDuration prev; DecayByTime() { this(INITIAL_QUERY_TIME, RequestDuration.of(Duration.ZERO)); } DecayByTime(Duration initialSearchTime, RequestDuration start) { averageSearchTime = toDouble(initialSearchTime); prev = start; } public void decay(RequestDuration duration) { double searchTime = Math.max(duration.duration().toMillis()/1000.0, MIN_QUERY_TIME); double decayRate = LATENCY_DECAY_TIME; double sampleWeight = Math.min(decayRate/2, toDouble(duration.timeSince(prev))); averageSearchTime = (sampleWeight*searchTime + (decayRate - sampleWeight) * averageSearchTime) / decayRate; prev = duration; } public double averageSearchTime() { return averageSearchTime; } } public AdaptiveScheduler(Type type, Random random, List<GroupStatus> scoreboard) { this.random = random; this.scoreboard = scoreboard; for (GroupStatus gs : scoreboard) { gs.setDecayer(type == Type.REQUESTS ? new DecayByRequests() : new DecayByTime()); } } private Optional<GroupStatus> selectGroup(double needle, boolean requireCoverage, Set<Integer> rejected) { double sum = 0; int n = 0; for (GroupStatus gs : scoreboard) { if (rejected == null || !rejected.contains(gs.group.id())) { if (!requireCoverage || gs.group.hasSufficientCoverage()) { sum += gs.averageSearchTimeInverse(); n++; } } } if (n == 0) { return Optional.empty(); } double accum = 0; for (GroupStatus gs : scoreboard) { if (rejected == null || !rejected.contains(gs.group.id())) { if (!requireCoverage || gs.group.hasSufficientCoverage()) { accum += gs.averageSearchTimeInverse(); if (needle < accum / sum) { return Optional.of(gs); } } } } return Optional.empty(); } @Override public Optional<GroupStatus> takeNextGroup(Set<Integer> rejectedGroups) { double needle = random.nextDouble(); Optional<GroupStatus> gs = selectGroup(needle, true, rejectedGroups); if (gs.isPresent()) return gs; return selectGroup(needle, false, rejectedGroups); } }
class AdaptiveScheduler implements GroupScheduler { enum Type {TIME, REQUESTS} private final Random random; private final List<GroupStatus> scoreboard; private static Duration fromDouble(double seconds) { return Duration.ofNanos((long)(seconds*1_000_000_000));} static class DecayByRequests implements GroupStatus.Decayer { private long queries; private double averageSearchTime; DecayByRequests() { this(0, INITIAL_QUERY_TIME); } DecayByRequests(long initialQueries, Duration initialSearchTime) { queries = initialQueries; averageSearchTime = toDouble(initialSearchTime); } public void decay(RequestDuration duration) { double searchTime = Math.max(toDouble(duration.duration()), MIN_QUERY_TIME); double decayRate = Math.min(queries + MIN_LATENCY_DECAY_RATE, DEFAULT_LATENCY_DECAY_RATE); queries++; averageSearchTime = (searchTime + (decayRate - 1) * averageSearchTime) / decayRate; } public double averageCost() { return averageSearchTime; } Duration averageSearchTime() { return fromDouble(averageSearchTime);} } static class DecayByTime implements GroupStatus.Decayer { private double averageSearchTime; private RequestDuration prev; DecayByTime() { this(INITIAL_QUERY_TIME, RequestDuration.of(Duration.ZERO)); } DecayByTime(Duration initialSearchTime, RequestDuration start) { averageSearchTime = toDouble(initialSearchTime); prev = start; } public void decay(RequestDuration duration) { double searchTime = Math.max(toDouble(duration.duration()), MIN_QUERY_TIME); double decayRate = LATENCY_DECAY_TIME; double sampleWeight = Math.min(decayRate/2, toDouble(duration.difference(prev))); averageSearchTime = (sampleWeight*searchTime + (decayRate - sampleWeight) * averageSearchTime) / decayRate; prev = duration; } public double averageCost() { return averageSearchTime; } Duration averageSearchTime() { return fromDouble(averageSearchTime);} } public AdaptiveScheduler(Type type, Random random, List<GroupStatus> scoreboard) { this.random = random; this.scoreboard = scoreboard; for (GroupStatus gs : scoreboard) { gs.setDecayer(type == Type.REQUESTS ? new DecayByRequests() : new DecayByTime()); } } private Optional<GroupStatus> selectGroup(double needle, boolean requireCoverage, Set<Integer> rejected) { double sum = 0; int n = 0; for (GroupStatus gs : scoreboard) { if (rejected == null || !rejected.contains(gs.group.id())) { if (!requireCoverage || gs.group.hasSufficientCoverage()) { sum += gs.weight(); n++; } } } if (n == 0) { return Optional.empty(); } double accum = 0; for (GroupStatus gs : scoreboard) { if (rejected == null || !rejected.contains(gs.group.id())) { if (!requireCoverage || gs.group.hasSufficientCoverage()) { accum += gs.weight(); if (needle < accum / sum) { return Optional.of(gs); } } } } return Optional.empty(); } @Override public Optional<GroupStatus> takeNextGroup(Set<Integer> rejectedGroups) { double needle = random.nextDouble(); Optional<GroupStatus> gs = selectGroup(needle, true, rejectedGroups); if (gs.isPresent()) return gs; return selectGroup(needle, false, rejectedGroups); } }
Needs the full resolution.
Duration averageSearchTime() { return Duration.ofNanos((long)(decayer.averageSearchTime()*1_000_000_000)); }
return Duration.ofNanos((long)(decayer.averageSearchTime()*1_000_000_000));
Duration averageSearchTime() { return fromDouble(averageSearchTime);}
class NoDecay implements Decayer { public void decay(RequestDuration duration) {} public double averageSearchTime() { return MIN_QUERY_TIME; } }
class DecayByRequests implements GroupStatus.Decayer { private long queries; private double averageSearchTime; DecayByRequests() { this(0, INITIAL_QUERY_TIME); } DecayByRequests(long initialQueries, Duration initialSearchTime) { queries = initialQueries; averageSearchTime = toDouble(initialSearchTime); } public void decay(RequestDuration duration) { double searchTime = Math.max(toDouble(duration.duration()), MIN_QUERY_TIME); double decayRate = Math.min(queries + MIN_LATENCY_DECAY_RATE, DEFAULT_LATENCY_DECAY_RATE); queries++; averageSearchTime = (searchTime + (decayRate - 1) * averageSearchTime) / decayRate; } public double averageCost() { return averageSearchTime; } }
```suggestion if (!dimensions.equals(previousMemoryOverheadDimensions)) ```
private void updateMemoryOverheadMetric(int numContainers, double memoryOverhead) { final String name = "mem.system.overhead"; Dimensions dimensions = new Dimensions(Map.of("containers", Integer.toString(numContainers))); metrics.declareGauge(Metrics.APPLICATION_HOST, name, dimensions, Metrics.DimensionType.DEFAULT) .sample(memoryOverhead); if (previousMemoryOverheadDimensions != null && !previousMemoryOverheadDimensions.equals(dimensions)) metrics.deleteMetricByDimension(name, previousMemoryOverheadDimensions, Metrics.DimensionType.DEFAULT); previousMemoryOverheadDimensions = dimensions; }
if (previousMemoryOverheadDimensions != null && !previousMemoryOverheadDimensions.equals(dimensions))
private void updateMemoryOverheadMetric(int numContainers, double memoryOverhead) { final String name = "mem.system.overhead"; Dimensions dimensions = new Dimensions(Map.of("containers", Integer.toString(numContainers))); metrics.declareGauge(Metrics.APPLICATION_HOST, name, dimensions, Metrics.DimensionType.DEFAULT) .sample(memoryOverhead); if (previousMemoryOverheadDimensions != null && !previousMemoryOverheadDimensions.equals(dimensions)) metrics.deleteMetricByDimension(name, previousMemoryOverheadDimensions, Metrics.DimensionType.DEFAULT); previousMemoryOverheadDimensions = dimensions; }
class NodeAdminImpl implements NodeAdmin { private static final Duration NODE_AGENT_FREEZE_TIMEOUT = Duration.ofSeconds(5); private static final Duration NODE_AGENT_SPREAD = Duration.ofSeconds(3); private final NodeAgentWithSchedulerFactory nodeAgentWithSchedulerFactory; private final Clock clock; private final Duration freezeTimeout; private final Duration spread; private boolean previousWantFrozen; private boolean isFrozen; private Instant startOfFreezeConvergence; private final Map<String, NodeAgentWithScheduler> nodeAgentWithSchedulerByHostname = new ConcurrentHashMap<>(); private final ProcMeminfoReader procMeminfoReader; private final Gauge jvmHeapUsed; private final Gauge jvmHeapFree; private final Gauge jvmHeapTotal; private final Gauge containerCount; private final Counter numberOfUnhandledExceptions; private final Metrics metrics; private Dimensions previousMemoryOverheadDimensions = null; public NodeAdminImpl(NodeAgentFactory nodeAgentFactory, Metrics metrics, Clock clock, FileSystem fileSystem) { this(nodeAgentContext -> create(clock, nodeAgentFactory, nodeAgentContext), metrics, clock, NODE_AGENT_FREEZE_TIMEOUT, NODE_AGENT_SPREAD, new ProcMeminfoReader(fileSystem)); } public NodeAdminImpl(NodeAgentFactory nodeAgentFactory, Metrics metrics, Clock clock, Duration freezeTimeout, Duration spread, ProcMeminfoReader procMeminfoReader) { this(nodeAgentContext -> create(clock, nodeAgentFactory, nodeAgentContext), metrics, clock, freezeTimeout, spread, procMeminfoReader); } NodeAdminImpl(NodeAgentWithSchedulerFactory nodeAgentWithSchedulerFactory, Metrics metrics, Clock clock, Duration freezeTimeout, Duration spread, ProcMeminfoReader procMeminfoReader) { this.nodeAgentWithSchedulerFactory = nodeAgentWithSchedulerFactory; this.clock = clock; this.freezeTimeout = freezeTimeout; this.spread = spread; this.previousWantFrozen = true; this.isFrozen = true; this.startOfFreezeConvergence = clock.instant(); this.numberOfUnhandledExceptions = metrics.declareCounter("unhandled_exceptions", new Dimensions(Map.of("src", "node-agents"))); this.procMeminfoReader = procMeminfoReader; this.jvmHeapUsed = metrics.declareGauge("mem.heap.used"); this.jvmHeapFree = metrics.declareGauge("mem.heap.free"); this.jvmHeapTotal = metrics.declareGauge("mem.heap.total"); this.containerCount = metrics.declareGauge("container.count"); this.metrics = metrics; } @Override public void refreshContainersToRun(Set<NodeAgentContext> nodeAgentContexts) { Map<String, NodeAgentContext> nodeAgentContextsByHostname = nodeAgentContexts.stream() .collect(Collectors.toMap(ctx -> ctx.node().id(), Function.identity())); diff(nodeAgentWithSchedulerByHostname.keySet(), nodeAgentContextsByHostname.keySet()) .forEach(hostname -> nodeAgentWithSchedulerByHostname.remove(hostname).stopForRemoval()); diff(nodeAgentContextsByHostname.keySet(), nodeAgentWithSchedulerByHostname.keySet()).forEach(hostname -> { NodeAgentWithScheduler naws = nodeAgentWithSchedulerFactory.create(nodeAgentContextsByHostname.get(hostname)); naws.start(); nodeAgentWithSchedulerByHostname.put(hostname, naws); }); Duration timeBetweenNodeAgents = spread.dividedBy(Math.max(nodeAgentContextsByHostname.size() - 1, 1)); Instant nextAgentStart = clock.instant(); for (Map.Entry<String, NodeAgentContext> entry : nodeAgentContextsByHostname.entrySet()) { nodeAgentWithSchedulerByHostname.get(entry.getKey()).scheduleTickWith(entry.getValue(), nextAgentStart); nextAgentStart = nextAgentStart.plus(timeBetweenNodeAgents); } } @Override public void updateMetrics(boolean isSuspended) { int numContainers = 0; long totalContainerMemoryBytes = 0; for (NodeAgentWithScheduler nodeAgentWithScheduler : nodeAgentWithSchedulerByHostname.values()) { int count = nodeAgentWithScheduler.getAndResetNumberOfUnhandledExceptions(); if (!isSuspended) numberOfUnhandledExceptions.add(count); Optional<ContainerStats> containerStats = nodeAgentWithScheduler.updateContainerNodeMetrics(isSuspended); if (containerStats.isPresent()) { ++numContainers; totalContainerMemoryBytes += containerStats.get().getMemoryStats().getUsage(); } } Runtime runtime = Runtime.getRuntime(); runtime.gc(); long freeMemory = runtime.freeMemory(); long totalMemory = runtime.totalMemory(); long usedMemory = totalMemory - freeMemory; jvmHeapFree.sample(freeMemory); jvmHeapUsed.sample(usedMemory); jvmHeapTotal.sample(totalMemory); if (!isSuspended) { containerCount.sample(numContainers); ProcMeminfo meminfo = procMeminfoReader.read(); updateMemoryOverheadMetric(numContainers, meminfo.memTotalBytes() - meminfo.memAvailableBytes() - totalContainerMemoryBytes); } } @Override public boolean setFrozen(boolean wantFrozen) { if (wantFrozen != previousWantFrozen) { if (wantFrozen) { this.startOfFreezeConvergence = clock.instant(); } else { this.startOfFreezeConvergence = null; } previousWantFrozen = wantFrozen; } boolean allNodeAgentsConverged = parallelStreamOfNodeAgentWithScheduler() .filter(nodeAgentScheduler -> !nodeAgentScheduler.setFrozen(wantFrozen, freezeTimeout)) .count() == 0; if (wantFrozen) { if (allNodeAgentsConverged) isFrozen = true; } else isFrozen = false; return allNodeAgentsConverged; } @Override public boolean isFrozen() { return isFrozen; } @Override public Duration subsystemFreezeDuration() { if (startOfFreezeConvergence == null) { return Duration.ZERO; } else { return Duration.between(startOfFreezeConvergence, clock.instant()); } } @Override public void stopNodeAgentServices() { parallelStreamOfNodeAgentWithScheduler().forEach(NodeAgentWithScheduler::stopForHostSuspension); } @Override public void start() { } @Override public void stop() { parallelStreamOfNodeAgentWithScheduler().forEach(NodeAgentWithScheduler::stopForRemoval); } /** * Returns a parallel stream of NodeAgentWithScheduler. * * <p>Why not just call nodeAgentWithSchedulerByHostname.values().parallelStream()? Experiments * with Java 11 have shown that with 10 nodes and forEach(), there are a maximum of 3 concurrent * threads. With HashMap it produces 5. With List it produces 10 concurrent threads.</p> */ private Stream<NodeAgentWithScheduler> parallelStreamOfNodeAgentWithScheduler() { return List.copyOf(nodeAgentWithSchedulerByHostname.values()).parallelStream(); } private static <T> Set<T> diff(Set<T> minuend, Set<T> subtrahend) { var result = new HashSet<>(minuend); result.removeAll(subtrahend); return result; } static class NodeAgentWithScheduler implements NodeAgentScheduler { private final NodeAgent nodeAgent; private final NodeAgentScheduler nodeAgentScheduler; private NodeAgentWithScheduler(NodeAgent nodeAgent, NodeAgentScheduler nodeAgentScheduler) { this.nodeAgent = nodeAgent; this.nodeAgentScheduler = nodeAgentScheduler; } void start() { nodeAgent.start(currentContext()); } void stopForHostSuspension() { nodeAgent.stopForHostSuspension(currentContext()); } void stopForRemoval() { nodeAgent.stopForRemoval(currentContext()); } Optional<ContainerStats> updateContainerNodeMetrics(boolean isSuspended) { return nodeAgent.updateContainerNodeMetrics(currentContext(), isSuspended); } int getAndResetNumberOfUnhandledExceptions() { return nodeAgent.getAndResetNumberOfUnhandledExceptions(); } @Override public void scheduleTickWith(NodeAgentContext context, Instant at) { nodeAgentScheduler.scheduleTickWith(context, at); } @Override public boolean setFrozen(boolean frozen, Duration timeout) { return nodeAgentScheduler.setFrozen(frozen, timeout); } @Override public NodeAgentContext currentContext() { return nodeAgentScheduler.currentContext(); } } @FunctionalInterface interface NodeAgentWithSchedulerFactory { NodeAgentWithScheduler create(NodeAgentContext context); } private static NodeAgentWithScheduler create(Clock clock, NodeAgentFactory nodeAgentFactory, NodeAgentContext context) { NodeAgentContextManager contextManager = new NodeAgentContextManager(clock, context); NodeAgent nodeAgent = nodeAgentFactory.create(contextManager, context); return new NodeAgentWithScheduler(nodeAgent, contextManager); } }
class NodeAdminImpl implements NodeAdmin { private static final Duration NODE_AGENT_FREEZE_TIMEOUT = Duration.ofSeconds(5); private static final Duration NODE_AGENT_SPREAD = Duration.ofSeconds(3); private final NodeAgentWithSchedulerFactory nodeAgentWithSchedulerFactory; private final Clock clock; private final Duration freezeTimeout; private final Duration spread; private boolean previousWantFrozen; private boolean isFrozen; private Instant startOfFreezeConvergence; private final Map<String, NodeAgentWithScheduler> nodeAgentWithSchedulerByHostname = new ConcurrentHashMap<>(); private final ProcMeminfoReader procMeminfoReader; private final Gauge jvmHeapUsed; private final Gauge jvmHeapFree; private final Gauge jvmHeapTotal; private final Gauge containerCount; private final Counter numberOfUnhandledExceptions; private final Metrics metrics; private Dimensions previousMemoryOverheadDimensions = null; public NodeAdminImpl(NodeAgentFactory nodeAgentFactory, Metrics metrics, Clock clock, FileSystem fileSystem) { this(nodeAgentContext -> create(clock, nodeAgentFactory, nodeAgentContext), metrics, clock, NODE_AGENT_FREEZE_TIMEOUT, NODE_AGENT_SPREAD, new ProcMeminfoReader(fileSystem)); } public NodeAdminImpl(NodeAgentFactory nodeAgentFactory, Metrics metrics, Clock clock, Duration freezeTimeout, Duration spread, ProcMeminfoReader procMeminfoReader) { this(nodeAgentContext -> create(clock, nodeAgentFactory, nodeAgentContext), metrics, clock, freezeTimeout, spread, procMeminfoReader); } NodeAdminImpl(NodeAgentWithSchedulerFactory nodeAgentWithSchedulerFactory, Metrics metrics, Clock clock, Duration freezeTimeout, Duration spread, ProcMeminfoReader procMeminfoReader) { this.nodeAgentWithSchedulerFactory = nodeAgentWithSchedulerFactory; this.clock = clock; this.freezeTimeout = freezeTimeout; this.spread = spread; this.previousWantFrozen = true; this.isFrozen = true; this.startOfFreezeConvergence = clock.instant(); this.numberOfUnhandledExceptions = metrics.declareCounter("unhandled_exceptions", new Dimensions(Map.of("src", "node-agents"))); this.procMeminfoReader = procMeminfoReader; this.jvmHeapUsed = metrics.declareGauge("mem.heap.used"); this.jvmHeapFree = metrics.declareGauge("mem.heap.free"); this.jvmHeapTotal = metrics.declareGauge("mem.heap.total"); this.containerCount = metrics.declareGauge("container.count"); this.metrics = metrics; } @Override public void refreshContainersToRun(Set<NodeAgentContext> nodeAgentContexts) { Map<String, NodeAgentContext> nodeAgentContextsByHostname = nodeAgentContexts.stream() .collect(Collectors.toMap(ctx -> ctx.node().id(), Function.identity())); diff(nodeAgentWithSchedulerByHostname.keySet(), nodeAgentContextsByHostname.keySet()) .forEach(hostname -> nodeAgentWithSchedulerByHostname.remove(hostname).stopForRemoval()); diff(nodeAgentContextsByHostname.keySet(), nodeAgentWithSchedulerByHostname.keySet()).forEach(hostname -> { NodeAgentWithScheduler naws = nodeAgentWithSchedulerFactory.create(nodeAgentContextsByHostname.get(hostname)); naws.start(); nodeAgentWithSchedulerByHostname.put(hostname, naws); }); Duration timeBetweenNodeAgents = spread.dividedBy(Math.max(nodeAgentContextsByHostname.size() - 1, 1)); Instant nextAgentStart = clock.instant(); for (Map.Entry<String, NodeAgentContext> entry : nodeAgentContextsByHostname.entrySet()) { nodeAgentWithSchedulerByHostname.get(entry.getKey()).scheduleTickWith(entry.getValue(), nextAgentStart); nextAgentStart = nextAgentStart.plus(timeBetweenNodeAgents); } } @Override public void updateMetrics(boolean isSuspended) { int numContainers = 0; long totalContainerMemoryBytes = 0; for (NodeAgentWithScheduler nodeAgentWithScheduler : nodeAgentWithSchedulerByHostname.values()) { int count = nodeAgentWithScheduler.getAndResetNumberOfUnhandledExceptions(); if (!isSuspended) numberOfUnhandledExceptions.add(count); Optional<ContainerStats> containerStats = nodeAgentWithScheduler.updateContainerNodeMetrics(isSuspended); if (containerStats.isPresent()) { ++numContainers; totalContainerMemoryBytes += containerStats.get().getMemoryStats().getUsage(); } } Runtime runtime = Runtime.getRuntime(); runtime.gc(); long freeMemory = runtime.freeMemory(); long totalMemory = runtime.totalMemory(); long usedMemory = totalMemory - freeMemory; jvmHeapFree.sample(freeMemory); jvmHeapUsed.sample(usedMemory); jvmHeapTotal.sample(totalMemory); if (!isSuspended) { containerCount.sample(numContainers); ProcMeminfo meminfo = procMeminfoReader.read(); updateMemoryOverheadMetric(numContainers, meminfo.memTotalBytes() - meminfo.memAvailableBytes() - totalContainerMemoryBytes); } } @Override public boolean setFrozen(boolean wantFrozen) { if (wantFrozen != previousWantFrozen) { if (wantFrozen) { this.startOfFreezeConvergence = clock.instant(); } else { this.startOfFreezeConvergence = null; } previousWantFrozen = wantFrozen; } boolean allNodeAgentsConverged = parallelStreamOfNodeAgentWithScheduler() .filter(nodeAgentScheduler -> !nodeAgentScheduler.setFrozen(wantFrozen, freezeTimeout)) .count() == 0; if (wantFrozen) { if (allNodeAgentsConverged) isFrozen = true; } else isFrozen = false; return allNodeAgentsConverged; } @Override public boolean isFrozen() { return isFrozen; } @Override public Duration subsystemFreezeDuration() { if (startOfFreezeConvergence == null) { return Duration.ZERO; } else { return Duration.between(startOfFreezeConvergence, clock.instant()); } } @Override public void stopNodeAgentServices() { parallelStreamOfNodeAgentWithScheduler().forEach(NodeAgentWithScheduler::stopForHostSuspension); } @Override public void start() { } @Override public void stop() { parallelStreamOfNodeAgentWithScheduler().forEach(NodeAgentWithScheduler::stopForRemoval); } /** * Returns a parallel stream of NodeAgentWithScheduler. * * <p>Why not just call nodeAgentWithSchedulerByHostname.values().parallelStream()? Experiments * with Java 11 have shown that with 10 nodes and forEach(), there are a maximum of 3 concurrent * threads. With HashMap it produces 5. With List it produces 10 concurrent threads.</p> */ private Stream<NodeAgentWithScheduler> parallelStreamOfNodeAgentWithScheduler() { return List.copyOf(nodeAgentWithSchedulerByHostname.values()).parallelStream(); } private static <T> Set<T> diff(Set<T> minuend, Set<T> subtrahend) { var result = new HashSet<>(minuend); result.removeAll(subtrahend); return result; } static class NodeAgentWithScheduler implements NodeAgentScheduler { private final NodeAgent nodeAgent; private final NodeAgentScheduler nodeAgentScheduler; private NodeAgentWithScheduler(NodeAgent nodeAgent, NodeAgentScheduler nodeAgentScheduler) { this.nodeAgent = nodeAgent; this.nodeAgentScheduler = nodeAgentScheduler; } void start() { nodeAgent.start(currentContext()); } void stopForHostSuspension() { nodeAgent.stopForHostSuspension(currentContext()); } void stopForRemoval() { nodeAgent.stopForRemoval(currentContext()); } Optional<ContainerStats> updateContainerNodeMetrics(boolean isSuspended) { return nodeAgent.updateContainerNodeMetrics(currentContext(), isSuspended); } int getAndResetNumberOfUnhandledExceptions() { return nodeAgent.getAndResetNumberOfUnhandledExceptions(); } @Override public void scheduleTickWith(NodeAgentContext context, Instant at) { nodeAgentScheduler.scheduleTickWith(context, at); } @Override public boolean setFrozen(boolean frozen, Duration timeout) { return nodeAgentScheduler.setFrozen(frozen, timeout); } @Override public NodeAgentContext currentContext() { return nodeAgentScheduler.currentContext(); } } @FunctionalInterface interface NodeAgentWithSchedulerFactory { NodeAgentWithScheduler create(NodeAgentContext context); } private static NodeAgentWithScheduler create(Clock clock, NodeAgentFactory nodeAgentFactory, NodeAgentContext context) { NodeAgentContextManager contextManager = new NodeAgentContextManager(clock, context); NodeAgent nodeAgent = nodeAgentFactory.create(contextManager, context); return new NodeAgentWithScheduler(nodeAgent, contextManager); } }
This would be wrong if previousMemoryOverheadDimensions is null.
private void updateMemoryOverheadMetric(int numContainers, double memoryOverhead) { final String name = "mem.system.overhead"; Dimensions dimensions = new Dimensions(Map.of("containers", Integer.toString(numContainers))); metrics.declareGauge(Metrics.APPLICATION_HOST, name, dimensions, Metrics.DimensionType.DEFAULT) .sample(memoryOverhead); if (previousMemoryOverheadDimensions != null && !previousMemoryOverheadDimensions.equals(dimensions)) metrics.deleteMetricByDimension(name, previousMemoryOverheadDimensions, Metrics.DimensionType.DEFAULT); previousMemoryOverheadDimensions = dimensions; }
if (previousMemoryOverheadDimensions != null && !previousMemoryOverheadDimensions.equals(dimensions))
private void updateMemoryOverheadMetric(int numContainers, double memoryOverhead) { final String name = "mem.system.overhead"; Dimensions dimensions = new Dimensions(Map.of("containers", Integer.toString(numContainers))); metrics.declareGauge(Metrics.APPLICATION_HOST, name, dimensions, Metrics.DimensionType.DEFAULT) .sample(memoryOverhead); if (previousMemoryOverheadDimensions != null && !previousMemoryOverheadDimensions.equals(dimensions)) metrics.deleteMetricByDimension(name, previousMemoryOverheadDimensions, Metrics.DimensionType.DEFAULT); previousMemoryOverheadDimensions = dimensions; }
class NodeAdminImpl implements NodeAdmin { private static final Duration NODE_AGENT_FREEZE_TIMEOUT = Duration.ofSeconds(5); private static final Duration NODE_AGENT_SPREAD = Duration.ofSeconds(3); private final NodeAgentWithSchedulerFactory nodeAgentWithSchedulerFactory; private final Clock clock; private final Duration freezeTimeout; private final Duration spread; private boolean previousWantFrozen; private boolean isFrozen; private Instant startOfFreezeConvergence; private final Map<String, NodeAgentWithScheduler> nodeAgentWithSchedulerByHostname = new ConcurrentHashMap<>(); private final ProcMeminfoReader procMeminfoReader; private final Gauge jvmHeapUsed; private final Gauge jvmHeapFree; private final Gauge jvmHeapTotal; private final Gauge containerCount; private final Counter numberOfUnhandledExceptions; private final Metrics metrics; private Dimensions previousMemoryOverheadDimensions = null; public NodeAdminImpl(NodeAgentFactory nodeAgentFactory, Metrics metrics, Clock clock, FileSystem fileSystem) { this(nodeAgentContext -> create(clock, nodeAgentFactory, nodeAgentContext), metrics, clock, NODE_AGENT_FREEZE_TIMEOUT, NODE_AGENT_SPREAD, new ProcMeminfoReader(fileSystem)); } public NodeAdminImpl(NodeAgentFactory nodeAgentFactory, Metrics metrics, Clock clock, Duration freezeTimeout, Duration spread, ProcMeminfoReader procMeminfoReader) { this(nodeAgentContext -> create(clock, nodeAgentFactory, nodeAgentContext), metrics, clock, freezeTimeout, spread, procMeminfoReader); } NodeAdminImpl(NodeAgentWithSchedulerFactory nodeAgentWithSchedulerFactory, Metrics metrics, Clock clock, Duration freezeTimeout, Duration spread, ProcMeminfoReader procMeminfoReader) { this.nodeAgentWithSchedulerFactory = nodeAgentWithSchedulerFactory; this.clock = clock; this.freezeTimeout = freezeTimeout; this.spread = spread; this.previousWantFrozen = true; this.isFrozen = true; this.startOfFreezeConvergence = clock.instant(); this.numberOfUnhandledExceptions = metrics.declareCounter("unhandled_exceptions", new Dimensions(Map.of("src", "node-agents"))); this.procMeminfoReader = procMeminfoReader; this.jvmHeapUsed = metrics.declareGauge("mem.heap.used"); this.jvmHeapFree = metrics.declareGauge("mem.heap.free"); this.jvmHeapTotal = metrics.declareGauge("mem.heap.total"); this.containerCount = metrics.declareGauge("container.count"); this.metrics = metrics; } @Override public void refreshContainersToRun(Set<NodeAgentContext> nodeAgentContexts) { Map<String, NodeAgentContext> nodeAgentContextsByHostname = nodeAgentContexts.stream() .collect(Collectors.toMap(ctx -> ctx.node().id(), Function.identity())); diff(nodeAgentWithSchedulerByHostname.keySet(), nodeAgentContextsByHostname.keySet()) .forEach(hostname -> nodeAgentWithSchedulerByHostname.remove(hostname).stopForRemoval()); diff(nodeAgentContextsByHostname.keySet(), nodeAgentWithSchedulerByHostname.keySet()).forEach(hostname -> { NodeAgentWithScheduler naws = nodeAgentWithSchedulerFactory.create(nodeAgentContextsByHostname.get(hostname)); naws.start(); nodeAgentWithSchedulerByHostname.put(hostname, naws); }); Duration timeBetweenNodeAgents = spread.dividedBy(Math.max(nodeAgentContextsByHostname.size() - 1, 1)); Instant nextAgentStart = clock.instant(); for (Map.Entry<String, NodeAgentContext> entry : nodeAgentContextsByHostname.entrySet()) { nodeAgentWithSchedulerByHostname.get(entry.getKey()).scheduleTickWith(entry.getValue(), nextAgentStart); nextAgentStart = nextAgentStart.plus(timeBetweenNodeAgents); } } @Override public void updateMetrics(boolean isSuspended) { int numContainers = 0; long totalContainerMemoryBytes = 0; for (NodeAgentWithScheduler nodeAgentWithScheduler : nodeAgentWithSchedulerByHostname.values()) { int count = nodeAgentWithScheduler.getAndResetNumberOfUnhandledExceptions(); if (!isSuspended) numberOfUnhandledExceptions.add(count); Optional<ContainerStats> containerStats = nodeAgentWithScheduler.updateContainerNodeMetrics(isSuspended); if (containerStats.isPresent()) { ++numContainers; totalContainerMemoryBytes += containerStats.get().getMemoryStats().getUsage(); } } Runtime runtime = Runtime.getRuntime(); runtime.gc(); long freeMemory = runtime.freeMemory(); long totalMemory = runtime.totalMemory(); long usedMemory = totalMemory - freeMemory; jvmHeapFree.sample(freeMemory); jvmHeapUsed.sample(usedMemory); jvmHeapTotal.sample(totalMemory); if (!isSuspended) { containerCount.sample(numContainers); ProcMeminfo meminfo = procMeminfoReader.read(); updateMemoryOverheadMetric(numContainers, meminfo.memTotalBytes() - meminfo.memAvailableBytes() - totalContainerMemoryBytes); } } @Override public boolean setFrozen(boolean wantFrozen) { if (wantFrozen != previousWantFrozen) { if (wantFrozen) { this.startOfFreezeConvergence = clock.instant(); } else { this.startOfFreezeConvergence = null; } previousWantFrozen = wantFrozen; } boolean allNodeAgentsConverged = parallelStreamOfNodeAgentWithScheduler() .filter(nodeAgentScheduler -> !nodeAgentScheduler.setFrozen(wantFrozen, freezeTimeout)) .count() == 0; if (wantFrozen) { if (allNodeAgentsConverged) isFrozen = true; } else isFrozen = false; return allNodeAgentsConverged; } @Override public boolean isFrozen() { return isFrozen; } @Override public Duration subsystemFreezeDuration() { if (startOfFreezeConvergence == null) { return Duration.ZERO; } else { return Duration.between(startOfFreezeConvergence, clock.instant()); } } @Override public void stopNodeAgentServices() { parallelStreamOfNodeAgentWithScheduler().forEach(NodeAgentWithScheduler::stopForHostSuspension); } @Override public void start() { } @Override public void stop() { parallelStreamOfNodeAgentWithScheduler().forEach(NodeAgentWithScheduler::stopForRemoval); } /** * Returns a parallel stream of NodeAgentWithScheduler. * * <p>Why not just call nodeAgentWithSchedulerByHostname.values().parallelStream()? Experiments * with Java 11 have shown that with 10 nodes and forEach(), there are a maximum of 3 concurrent * threads. With HashMap it produces 5. With List it produces 10 concurrent threads.</p> */ private Stream<NodeAgentWithScheduler> parallelStreamOfNodeAgentWithScheduler() { return List.copyOf(nodeAgentWithSchedulerByHostname.values()).parallelStream(); } private static <T> Set<T> diff(Set<T> minuend, Set<T> subtrahend) { var result = new HashSet<>(minuend); result.removeAll(subtrahend); return result; } static class NodeAgentWithScheduler implements NodeAgentScheduler { private final NodeAgent nodeAgent; private final NodeAgentScheduler nodeAgentScheduler; private NodeAgentWithScheduler(NodeAgent nodeAgent, NodeAgentScheduler nodeAgentScheduler) { this.nodeAgent = nodeAgent; this.nodeAgentScheduler = nodeAgentScheduler; } void start() { nodeAgent.start(currentContext()); } void stopForHostSuspension() { nodeAgent.stopForHostSuspension(currentContext()); } void stopForRemoval() { nodeAgent.stopForRemoval(currentContext()); } Optional<ContainerStats> updateContainerNodeMetrics(boolean isSuspended) { return nodeAgent.updateContainerNodeMetrics(currentContext(), isSuspended); } int getAndResetNumberOfUnhandledExceptions() { return nodeAgent.getAndResetNumberOfUnhandledExceptions(); } @Override public void scheduleTickWith(NodeAgentContext context, Instant at) { nodeAgentScheduler.scheduleTickWith(context, at); } @Override public boolean setFrozen(boolean frozen, Duration timeout) { return nodeAgentScheduler.setFrozen(frozen, timeout); } @Override public NodeAgentContext currentContext() { return nodeAgentScheduler.currentContext(); } } @FunctionalInterface interface NodeAgentWithSchedulerFactory { NodeAgentWithScheduler create(NodeAgentContext context); } private static NodeAgentWithScheduler create(Clock clock, NodeAgentFactory nodeAgentFactory, NodeAgentContext context) { NodeAgentContextManager contextManager = new NodeAgentContextManager(clock, context); NodeAgent nodeAgent = nodeAgentFactory.create(contextManager, context); return new NodeAgentWithScheduler(nodeAgent, contextManager); } }
class NodeAdminImpl implements NodeAdmin { private static final Duration NODE_AGENT_FREEZE_TIMEOUT = Duration.ofSeconds(5); private static final Duration NODE_AGENT_SPREAD = Duration.ofSeconds(3); private final NodeAgentWithSchedulerFactory nodeAgentWithSchedulerFactory; private final Clock clock; private final Duration freezeTimeout; private final Duration spread; private boolean previousWantFrozen; private boolean isFrozen; private Instant startOfFreezeConvergence; private final Map<String, NodeAgentWithScheduler> nodeAgentWithSchedulerByHostname = new ConcurrentHashMap<>(); private final ProcMeminfoReader procMeminfoReader; private final Gauge jvmHeapUsed; private final Gauge jvmHeapFree; private final Gauge jvmHeapTotal; private final Gauge containerCount; private final Counter numberOfUnhandledExceptions; private final Metrics metrics; private Dimensions previousMemoryOverheadDimensions = null; public NodeAdminImpl(NodeAgentFactory nodeAgentFactory, Metrics metrics, Clock clock, FileSystem fileSystem) { this(nodeAgentContext -> create(clock, nodeAgentFactory, nodeAgentContext), metrics, clock, NODE_AGENT_FREEZE_TIMEOUT, NODE_AGENT_SPREAD, new ProcMeminfoReader(fileSystem)); } public NodeAdminImpl(NodeAgentFactory nodeAgentFactory, Metrics metrics, Clock clock, Duration freezeTimeout, Duration spread, ProcMeminfoReader procMeminfoReader) { this(nodeAgentContext -> create(clock, nodeAgentFactory, nodeAgentContext), metrics, clock, freezeTimeout, spread, procMeminfoReader); } NodeAdminImpl(NodeAgentWithSchedulerFactory nodeAgentWithSchedulerFactory, Metrics metrics, Clock clock, Duration freezeTimeout, Duration spread, ProcMeminfoReader procMeminfoReader) { this.nodeAgentWithSchedulerFactory = nodeAgentWithSchedulerFactory; this.clock = clock; this.freezeTimeout = freezeTimeout; this.spread = spread; this.previousWantFrozen = true; this.isFrozen = true; this.startOfFreezeConvergence = clock.instant(); this.numberOfUnhandledExceptions = metrics.declareCounter("unhandled_exceptions", new Dimensions(Map.of("src", "node-agents"))); this.procMeminfoReader = procMeminfoReader; this.jvmHeapUsed = metrics.declareGauge("mem.heap.used"); this.jvmHeapFree = metrics.declareGauge("mem.heap.free"); this.jvmHeapTotal = metrics.declareGauge("mem.heap.total"); this.containerCount = metrics.declareGauge("container.count"); this.metrics = metrics; } @Override public void refreshContainersToRun(Set<NodeAgentContext> nodeAgentContexts) { Map<String, NodeAgentContext> nodeAgentContextsByHostname = nodeAgentContexts.stream() .collect(Collectors.toMap(ctx -> ctx.node().id(), Function.identity())); diff(nodeAgentWithSchedulerByHostname.keySet(), nodeAgentContextsByHostname.keySet()) .forEach(hostname -> nodeAgentWithSchedulerByHostname.remove(hostname).stopForRemoval()); diff(nodeAgentContextsByHostname.keySet(), nodeAgentWithSchedulerByHostname.keySet()).forEach(hostname -> { NodeAgentWithScheduler naws = nodeAgentWithSchedulerFactory.create(nodeAgentContextsByHostname.get(hostname)); naws.start(); nodeAgentWithSchedulerByHostname.put(hostname, naws); }); Duration timeBetweenNodeAgents = spread.dividedBy(Math.max(nodeAgentContextsByHostname.size() - 1, 1)); Instant nextAgentStart = clock.instant(); for (Map.Entry<String, NodeAgentContext> entry : nodeAgentContextsByHostname.entrySet()) { nodeAgentWithSchedulerByHostname.get(entry.getKey()).scheduleTickWith(entry.getValue(), nextAgentStart); nextAgentStart = nextAgentStart.plus(timeBetweenNodeAgents); } } @Override public void updateMetrics(boolean isSuspended) { int numContainers = 0; long totalContainerMemoryBytes = 0; for (NodeAgentWithScheduler nodeAgentWithScheduler : nodeAgentWithSchedulerByHostname.values()) { int count = nodeAgentWithScheduler.getAndResetNumberOfUnhandledExceptions(); if (!isSuspended) numberOfUnhandledExceptions.add(count); Optional<ContainerStats> containerStats = nodeAgentWithScheduler.updateContainerNodeMetrics(isSuspended); if (containerStats.isPresent()) { ++numContainers; totalContainerMemoryBytes += containerStats.get().getMemoryStats().getUsage(); } } Runtime runtime = Runtime.getRuntime(); runtime.gc(); long freeMemory = runtime.freeMemory(); long totalMemory = runtime.totalMemory(); long usedMemory = totalMemory - freeMemory; jvmHeapFree.sample(freeMemory); jvmHeapUsed.sample(usedMemory); jvmHeapTotal.sample(totalMemory); if (!isSuspended) { containerCount.sample(numContainers); ProcMeminfo meminfo = procMeminfoReader.read(); updateMemoryOverheadMetric(numContainers, meminfo.memTotalBytes() - meminfo.memAvailableBytes() - totalContainerMemoryBytes); } } @Override public boolean setFrozen(boolean wantFrozen) { if (wantFrozen != previousWantFrozen) { if (wantFrozen) { this.startOfFreezeConvergence = clock.instant(); } else { this.startOfFreezeConvergence = null; } previousWantFrozen = wantFrozen; } boolean allNodeAgentsConverged = parallelStreamOfNodeAgentWithScheduler() .filter(nodeAgentScheduler -> !nodeAgentScheduler.setFrozen(wantFrozen, freezeTimeout)) .count() == 0; if (wantFrozen) { if (allNodeAgentsConverged) isFrozen = true; } else isFrozen = false; return allNodeAgentsConverged; } @Override public boolean isFrozen() { return isFrozen; } @Override public Duration subsystemFreezeDuration() { if (startOfFreezeConvergence == null) { return Duration.ZERO; } else { return Duration.between(startOfFreezeConvergence, clock.instant()); } } @Override public void stopNodeAgentServices() { parallelStreamOfNodeAgentWithScheduler().forEach(NodeAgentWithScheduler::stopForHostSuspension); } @Override public void start() { } @Override public void stop() { parallelStreamOfNodeAgentWithScheduler().forEach(NodeAgentWithScheduler::stopForRemoval); } /** * Returns a parallel stream of NodeAgentWithScheduler. * * <p>Why not just call nodeAgentWithSchedulerByHostname.values().parallelStream()? Experiments * with Java 11 have shown that with 10 nodes and forEach(), there are a maximum of 3 concurrent * threads. With HashMap it produces 5. With List it produces 10 concurrent threads.</p> */ private Stream<NodeAgentWithScheduler> parallelStreamOfNodeAgentWithScheduler() { return List.copyOf(nodeAgentWithSchedulerByHostname.values()).parallelStream(); } private static <T> Set<T> diff(Set<T> minuend, Set<T> subtrahend) { var result = new HashSet<>(minuend); result.removeAll(subtrahend); return result; } static class NodeAgentWithScheduler implements NodeAgentScheduler { private final NodeAgent nodeAgent; private final NodeAgentScheduler nodeAgentScheduler; private NodeAgentWithScheduler(NodeAgent nodeAgent, NodeAgentScheduler nodeAgentScheduler) { this.nodeAgent = nodeAgent; this.nodeAgentScheduler = nodeAgentScheduler; } void start() { nodeAgent.start(currentContext()); } void stopForHostSuspension() { nodeAgent.stopForHostSuspension(currentContext()); } void stopForRemoval() { nodeAgent.stopForRemoval(currentContext()); } Optional<ContainerStats> updateContainerNodeMetrics(boolean isSuspended) { return nodeAgent.updateContainerNodeMetrics(currentContext(), isSuspended); } int getAndResetNumberOfUnhandledExceptions() { return nodeAgent.getAndResetNumberOfUnhandledExceptions(); } @Override public void scheduleTickWith(NodeAgentContext context, Instant at) { nodeAgentScheduler.scheduleTickWith(context, at); } @Override public boolean setFrozen(boolean frozen, Duration timeout) { return nodeAgentScheduler.setFrozen(frozen, timeout); } @Override public NodeAgentContext currentContext() { return nodeAgentScheduler.currentContext(); } } @FunctionalInterface interface NodeAgentWithSchedulerFactory { NodeAgentWithScheduler create(NodeAgentContext context); } private static NodeAgentWithScheduler create(Clock clock, NodeAgentFactory nodeAgentFactory, NodeAgentContext context) { NodeAgentContextManager contextManager = new NodeAgentContextManager(clock, context); NodeAgent nodeAgent = nodeAgentFactory.create(contextManager, context); return new NodeAgentWithScheduler(nodeAgent, contextManager); } }
Doh! :facepalm:
private void updateMemoryOverheadMetric(int numContainers, double memoryOverhead) { final String name = "mem.system.overhead"; Dimensions dimensions = new Dimensions(Map.of("containers", Integer.toString(numContainers))); metrics.declareGauge(Metrics.APPLICATION_HOST, name, dimensions, Metrics.DimensionType.DEFAULT) .sample(memoryOverhead); if (previousMemoryOverheadDimensions != null && !previousMemoryOverheadDimensions.equals(dimensions)) metrics.deleteMetricByDimension(name, previousMemoryOverheadDimensions, Metrics.DimensionType.DEFAULT); previousMemoryOverheadDimensions = dimensions; }
if (previousMemoryOverheadDimensions != null && !previousMemoryOverheadDimensions.equals(dimensions))
private void updateMemoryOverheadMetric(int numContainers, double memoryOverhead) { final String name = "mem.system.overhead"; Dimensions dimensions = new Dimensions(Map.of("containers", Integer.toString(numContainers))); metrics.declareGauge(Metrics.APPLICATION_HOST, name, dimensions, Metrics.DimensionType.DEFAULT) .sample(memoryOverhead); if (previousMemoryOverheadDimensions != null && !previousMemoryOverheadDimensions.equals(dimensions)) metrics.deleteMetricByDimension(name, previousMemoryOverheadDimensions, Metrics.DimensionType.DEFAULT); previousMemoryOverheadDimensions = dimensions; }
class NodeAdminImpl implements NodeAdmin { private static final Duration NODE_AGENT_FREEZE_TIMEOUT = Duration.ofSeconds(5); private static final Duration NODE_AGENT_SPREAD = Duration.ofSeconds(3); private final NodeAgentWithSchedulerFactory nodeAgentWithSchedulerFactory; private final Clock clock; private final Duration freezeTimeout; private final Duration spread; private boolean previousWantFrozen; private boolean isFrozen; private Instant startOfFreezeConvergence; private final Map<String, NodeAgentWithScheduler> nodeAgentWithSchedulerByHostname = new ConcurrentHashMap<>(); private final ProcMeminfoReader procMeminfoReader; private final Gauge jvmHeapUsed; private final Gauge jvmHeapFree; private final Gauge jvmHeapTotal; private final Gauge containerCount; private final Counter numberOfUnhandledExceptions; private final Metrics metrics; private Dimensions previousMemoryOverheadDimensions = null; public NodeAdminImpl(NodeAgentFactory nodeAgentFactory, Metrics metrics, Clock clock, FileSystem fileSystem) { this(nodeAgentContext -> create(clock, nodeAgentFactory, nodeAgentContext), metrics, clock, NODE_AGENT_FREEZE_TIMEOUT, NODE_AGENT_SPREAD, new ProcMeminfoReader(fileSystem)); } public NodeAdminImpl(NodeAgentFactory nodeAgentFactory, Metrics metrics, Clock clock, Duration freezeTimeout, Duration spread, ProcMeminfoReader procMeminfoReader) { this(nodeAgentContext -> create(clock, nodeAgentFactory, nodeAgentContext), metrics, clock, freezeTimeout, spread, procMeminfoReader); } NodeAdminImpl(NodeAgentWithSchedulerFactory nodeAgentWithSchedulerFactory, Metrics metrics, Clock clock, Duration freezeTimeout, Duration spread, ProcMeminfoReader procMeminfoReader) { this.nodeAgentWithSchedulerFactory = nodeAgentWithSchedulerFactory; this.clock = clock; this.freezeTimeout = freezeTimeout; this.spread = spread; this.previousWantFrozen = true; this.isFrozen = true; this.startOfFreezeConvergence = clock.instant(); this.numberOfUnhandledExceptions = metrics.declareCounter("unhandled_exceptions", new Dimensions(Map.of("src", "node-agents"))); this.procMeminfoReader = procMeminfoReader; this.jvmHeapUsed = metrics.declareGauge("mem.heap.used"); this.jvmHeapFree = metrics.declareGauge("mem.heap.free"); this.jvmHeapTotal = metrics.declareGauge("mem.heap.total"); this.containerCount = metrics.declareGauge("container.count"); this.metrics = metrics; } @Override public void refreshContainersToRun(Set<NodeAgentContext> nodeAgentContexts) { Map<String, NodeAgentContext> nodeAgentContextsByHostname = nodeAgentContexts.stream() .collect(Collectors.toMap(ctx -> ctx.node().id(), Function.identity())); diff(nodeAgentWithSchedulerByHostname.keySet(), nodeAgentContextsByHostname.keySet()) .forEach(hostname -> nodeAgentWithSchedulerByHostname.remove(hostname).stopForRemoval()); diff(nodeAgentContextsByHostname.keySet(), nodeAgentWithSchedulerByHostname.keySet()).forEach(hostname -> { NodeAgentWithScheduler naws = nodeAgentWithSchedulerFactory.create(nodeAgentContextsByHostname.get(hostname)); naws.start(); nodeAgentWithSchedulerByHostname.put(hostname, naws); }); Duration timeBetweenNodeAgents = spread.dividedBy(Math.max(nodeAgentContextsByHostname.size() - 1, 1)); Instant nextAgentStart = clock.instant(); for (Map.Entry<String, NodeAgentContext> entry : nodeAgentContextsByHostname.entrySet()) { nodeAgentWithSchedulerByHostname.get(entry.getKey()).scheduleTickWith(entry.getValue(), nextAgentStart); nextAgentStart = nextAgentStart.plus(timeBetweenNodeAgents); } } @Override public void updateMetrics(boolean isSuspended) { int numContainers = 0; long totalContainerMemoryBytes = 0; for (NodeAgentWithScheduler nodeAgentWithScheduler : nodeAgentWithSchedulerByHostname.values()) { int count = nodeAgentWithScheduler.getAndResetNumberOfUnhandledExceptions(); if (!isSuspended) numberOfUnhandledExceptions.add(count); Optional<ContainerStats> containerStats = nodeAgentWithScheduler.updateContainerNodeMetrics(isSuspended); if (containerStats.isPresent()) { ++numContainers; totalContainerMemoryBytes += containerStats.get().getMemoryStats().getUsage(); } } Runtime runtime = Runtime.getRuntime(); runtime.gc(); long freeMemory = runtime.freeMemory(); long totalMemory = runtime.totalMemory(); long usedMemory = totalMemory - freeMemory; jvmHeapFree.sample(freeMemory); jvmHeapUsed.sample(usedMemory); jvmHeapTotal.sample(totalMemory); if (!isSuspended) { containerCount.sample(numContainers); ProcMeminfo meminfo = procMeminfoReader.read(); updateMemoryOverheadMetric(numContainers, meminfo.memTotalBytes() - meminfo.memAvailableBytes() - totalContainerMemoryBytes); } } @Override public boolean setFrozen(boolean wantFrozen) { if (wantFrozen != previousWantFrozen) { if (wantFrozen) { this.startOfFreezeConvergence = clock.instant(); } else { this.startOfFreezeConvergence = null; } previousWantFrozen = wantFrozen; } boolean allNodeAgentsConverged = parallelStreamOfNodeAgentWithScheduler() .filter(nodeAgentScheduler -> !nodeAgentScheduler.setFrozen(wantFrozen, freezeTimeout)) .count() == 0; if (wantFrozen) { if (allNodeAgentsConverged) isFrozen = true; } else isFrozen = false; return allNodeAgentsConverged; } @Override public boolean isFrozen() { return isFrozen; } @Override public Duration subsystemFreezeDuration() { if (startOfFreezeConvergence == null) { return Duration.ZERO; } else { return Duration.between(startOfFreezeConvergence, clock.instant()); } } @Override public void stopNodeAgentServices() { parallelStreamOfNodeAgentWithScheduler().forEach(NodeAgentWithScheduler::stopForHostSuspension); } @Override public void start() { } @Override public void stop() { parallelStreamOfNodeAgentWithScheduler().forEach(NodeAgentWithScheduler::stopForRemoval); } /** * Returns a parallel stream of NodeAgentWithScheduler. * * <p>Why not just call nodeAgentWithSchedulerByHostname.values().parallelStream()? Experiments * with Java 11 have shown that with 10 nodes and forEach(), there are a maximum of 3 concurrent * threads. With HashMap it produces 5. With List it produces 10 concurrent threads.</p> */ private Stream<NodeAgentWithScheduler> parallelStreamOfNodeAgentWithScheduler() { return List.copyOf(nodeAgentWithSchedulerByHostname.values()).parallelStream(); } private static <T> Set<T> diff(Set<T> minuend, Set<T> subtrahend) { var result = new HashSet<>(minuend); result.removeAll(subtrahend); return result; } static class NodeAgentWithScheduler implements NodeAgentScheduler { private final NodeAgent nodeAgent; private final NodeAgentScheduler nodeAgentScheduler; private NodeAgentWithScheduler(NodeAgent nodeAgent, NodeAgentScheduler nodeAgentScheduler) { this.nodeAgent = nodeAgent; this.nodeAgentScheduler = nodeAgentScheduler; } void start() { nodeAgent.start(currentContext()); } void stopForHostSuspension() { nodeAgent.stopForHostSuspension(currentContext()); } void stopForRemoval() { nodeAgent.stopForRemoval(currentContext()); } Optional<ContainerStats> updateContainerNodeMetrics(boolean isSuspended) { return nodeAgent.updateContainerNodeMetrics(currentContext(), isSuspended); } int getAndResetNumberOfUnhandledExceptions() { return nodeAgent.getAndResetNumberOfUnhandledExceptions(); } @Override public void scheduleTickWith(NodeAgentContext context, Instant at) { nodeAgentScheduler.scheduleTickWith(context, at); } @Override public boolean setFrozen(boolean frozen, Duration timeout) { return nodeAgentScheduler.setFrozen(frozen, timeout); } @Override public NodeAgentContext currentContext() { return nodeAgentScheduler.currentContext(); } } @FunctionalInterface interface NodeAgentWithSchedulerFactory { NodeAgentWithScheduler create(NodeAgentContext context); } private static NodeAgentWithScheduler create(Clock clock, NodeAgentFactory nodeAgentFactory, NodeAgentContext context) { NodeAgentContextManager contextManager = new NodeAgentContextManager(clock, context); NodeAgent nodeAgent = nodeAgentFactory.create(contextManager, context); return new NodeAgentWithScheduler(nodeAgent, contextManager); } }
class NodeAdminImpl implements NodeAdmin { private static final Duration NODE_AGENT_FREEZE_TIMEOUT = Duration.ofSeconds(5); private static final Duration NODE_AGENT_SPREAD = Duration.ofSeconds(3); private final NodeAgentWithSchedulerFactory nodeAgentWithSchedulerFactory; private final Clock clock; private final Duration freezeTimeout; private final Duration spread; private boolean previousWantFrozen; private boolean isFrozen; private Instant startOfFreezeConvergence; private final Map<String, NodeAgentWithScheduler> nodeAgentWithSchedulerByHostname = new ConcurrentHashMap<>(); private final ProcMeminfoReader procMeminfoReader; private final Gauge jvmHeapUsed; private final Gauge jvmHeapFree; private final Gauge jvmHeapTotal; private final Gauge containerCount; private final Counter numberOfUnhandledExceptions; private final Metrics metrics; private Dimensions previousMemoryOverheadDimensions = null; public NodeAdminImpl(NodeAgentFactory nodeAgentFactory, Metrics metrics, Clock clock, FileSystem fileSystem) { this(nodeAgentContext -> create(clock, nodeAgentFactory, nodeAgentContext), metrics, clock, NODE_AGENT_FREEZE_TIMEOUT, NODE_AGENT_SPREAD, new ProcMeminfoReader(fileSystem)); } public NodeAdminImpl(NodeAgentFactory nodeAgentFactory, Metrics metrics, Clock clock, Duration freezeTimeout, Duration spread, ProcMeminfoReader procMeminfoReader) { this(nodeAgentContext -> create(clock, nodeAgentFactory, nodeAgentContext), metrics, clock, freezeTimeout, spread, procMeminfoReader); } NodeAdminImpl(NodeAgentWithSchedulerFactory nodeAgentWithSchedulerFactory, Metrics metrics, Clock clock, Duration freezeTimeout, Duration spread, ProcMeminfoReader procMeminfoReader) { this.nodeAgentWithSchedulerFactory = nodeAgentWithSchedulerFactory; this.clock = clock; this.freezeTimeout = freezeTimeout; this.spread = spread; this.previousWantFrozen = true; this.isFrozen = true; this.startOfFreezeConvergence = clock.instant(); this.numberOfUnhandledExceptions = metrics.declareCounter("unhandled_exceptions", new Dimensions(Map.of("src", "node-agents"))); this.procMeminfoReader = procMeminfoReader; this.jvmHeapUsed = metrics.declareGauge("mem.heap.used"); this.jvmHeapFree = metrics.declareGauge("mem.heap.free"); this.jvmHeapTotal = metrics.declareGauge("mem.heap.total"); this.containerCount = metrics.declareGauge("container.count"); this.metrics = metrics; } @Override public void refreshContainersToRun(Set<NodeAgentContext> nodeAgentContexts) { Map<String, NodeAgentContext> nodeAgentContextsByHostname = nodeAgentContexts.stream() .collect(Collectors.toMap(ctx -> ctx.node().id(), Function.identity())); diff(nodeAgentWithSchedulerByHostname.keySet(), nodeAgentContextsByHostname.keySet()) .forEach(hostname -> nodeAgentWithSchedulerByHostname.remove(hostname).stopForRemoval()); diff(nodeAgentContextsByHostname.keySet(), nodeAgentWithSchedulerByHostname.keySet()).forEach(hostname -> { NodeAgentWithScheduler naws = nodeAgentWithSchedulerFactory.create(nodeAgentContextsByHostname.get(hostname)); naws.start(); nodeAgentWithSchedulerByHostname.put(hostname, naws); }); Duration timeBetweenNodeAgents = spread.dividedBy(Math.max(nodeAgentContextsByHostname.size() - 1, 1)); Instant nextAgentStart = clock.instant(); for (Map.Entry<String, NodeAgentContext> entry : nodeAgentContextsByHostname.entrySet()) { nodeAgentWithSchedulerByHostname.get(entry.getKey()).scheduleTickWith(entry.getValue(), nextAgentStart); nextAgentStart = nextAgentStart.plus(timeBetweenNodeAgents); } } @Override public void updateMetrics(boolean isSuspended) { int numContainers = 0; long totalContainerMemoryBytes = 0; for (NodeAgentWithScheduler nodeAgentWithScheduler : nodeAgentWithSchedulerByHostname.values()) { int count = nodeAgentWithScheduler.getAndResetNumberOfUnhandledExceptions(); if (!isSuspended) numberOfUnhandledExceptions.add(count); Optional<ContainerStats> containerStats = nodeAgentWithScheduler.updateContainerNodeMetrics(isSuspended); if (containerStats.isPresent()) { ++numContainers; totalContainerMemoryBytes += containerStats.get().getMemoryStats().getUsage(); } } Runtime runtime = Runtime.getRuntime(); runtime.gc(); long freeMemory = runtime.freeMemory(); long totalMemory = runtime.totalMemory(); long usedMemory = totalMemory - freeMemory; jvmHeapFree.sample(freeMemory); jvmHeapUsed.sample(usedMemory); jvmHeapTotal.sample(totalMemory); if (!isSuspended) { containerCount.sample(numContainers); ProcMeminfo meminfo = procMeminfoReader.read(); updateMemoryOverheadMetric(numContainers, meminfo.memTotalBytes() - meminfo.memAvailableBytes() - totalContainerMemoryBytes); } } @Override public boolean setFrozen(boolean wantFrozen) { if (wantFrozen != previousWantFrozen) { if (wantFrozen) { this.startOfFreezeConvergence = clock.instant(); } else { this.startOfFreezeConvergence = null; } previousWantFrozen = wantFrozen; } boolean allNodeAgentsConverged = parallelStreamOfNodeAgentWithScheduler() .filter(nodeAgentScheduler -> !nodeAgentScheduler.setFrozen(wantFrozen, freezeTimeout)) .count() == 0; if (wantFrozen) { if (allNodeAgentsConverged) isFrozen = true; } else isFrozen = false; return allNodeAgentsConverged; } @Override public boolean isFrozen() { return isFrozen; } @Override public Duration subsystemFreezeDuration() { if (startOfFreezeConvergence == null) { return Duration.ZERO; } else { return Duration.between(startOfFreezeConvergence, clock.instant()); } } @Override public void stopNodeAgentServices() { parallelStreamOfNodeAgentWithScheduler().forEach(NodeAgentWithScheduler::stopForHostSuspension); } @Override public void start() { } @Override public void stop() { parallelStreamOfNodeAgentWithScheduler().forEach(NodeAgentWithScheduler::stopForRemoval); } /** * Returns a parallel stream of NodeAgentWithScheduler. * * <p>Why not just call nodeAgentWithSchedulerByHostname.values().parallelStream()? Experiments * with Java 11 have shown that with 10 nodes and forEach(), there are a maximum of 3 concurrent * threads. With HashMap it produces 5. With List it produces 10 concurrent threads.</p> */ private Stream<NodeAgentWithScheduler> parallelStreamOfNodeAgentWithScheduler() { return List.copyOf(nodeAgentWithSchedulerByHostname.values()).parallelStream(); } private static <T> Set<T> diff(Set<T> minuend, Set<T> subtrahend) { var result = new HashSet<>(minuend); result.removeAll(subtrahend); return result; } static class NodeAgentWithScheduler implements NodeAgentScheduler { private final NodeAgent nodeAgent; private final NodeAgentScheduler nodeAgentScheduler; private NodeAgentWithScheduler(NodeAgent nodeAgent, NodeAgentScheduler nodeAgentScheduler) { this.nodeAgent = nodeAgent; this.nodeAgentScheduler = nodeAgentScheduler; } void start() { nodeAgent.start(currentContext()); } void stopForHostSuspension() { nodeAgent.stopForHostSuspension(currentContext()); } void stopForRemoval() { nodeAgent.stopForRemoval(currentContext()); } Optional<ContainerStats> updateContainerNodeMetrics(boolean isSuspended) { return nodeAgent.updateContainerNodeMetrics(currentContext(), isSuspended); } int getAndResetNumberOfUnhandledExceptions() { return nodeAgent.getAndResetNumberOfUnhandledExceptions(); } @Override public void scheduleTickWith(NodeAgentContext context, Instant at) { nodeAgentScheduler.scheduleTickWith(context, at); } @Override public boolean setFrozen(boolean frozen, Duration timeout) { return nodeAgentScheduler.setFrozen(frozen, timeout); } @Override public NodeAgentContext currentContext() { return nodeAgentScheduler.currentContext(); } } @FunctionalInterface interface NodeAgentWithSchedulerFactory { NodeAgentWithScheduler create(NodeAgentContext context); } private static NodeAgentWithScheduler create(Clock clock, NodeAgentFactory nodeAgentFactory, NodeAgentContext context) { NodeAgentContextManager contextManager = new NodeAgentContextManager(clock, context); NodeAgent nodeAgent = nodeAgentFactory.create(contextManager, context); return new NodeAgentWithScheduler(nodeAgent, contextManager); } }
Should we perhaps generate a UUID here, and place with both the response, and in the log, to make it easier to find relevant log entries when a user asks what happened?
public HttpResponse auditAndHandle(HttpRequest request) { try { Path path = new Path(request.getUri()); return switch (request.getMethod()) { case GET: yield handleGET(path, request); case PUT: yield handlePUT(path, request); case POST: yield handlePOST(path, request); case PATCH: yield handlePATCH(path, request); case DELETE: yield handleDELETE(path, request); case OPTIONS: yield handleOPTIONS(); default: yield ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported"); }; } catch (RestApiException.Forbidden e) { return ErrorResponse.forbidden(Exceptions.toMessageString(e)); } catch (RestApiException.Unauthorized e) { return ErrorResponse.unauthorized(Exceptions.toMessageString(e)); } catch (NotExistsException e) { return ErrorResponse.notFoundError(Exceptions.toMessageString(e)); } catch (IllegalArgumentException e) { return ErrorResponse.badRequest(Exceptions.toMessageString(e)); } catch (ConfigServerException e) { return switch (e.code()) { case NOT_FOUND: yield ErrorResponse.notFoundError(Exceptions.toMessageString(e)); case ACTIVATION_CONFLICT: yield new ErrorResponse(CONFLICT, e.code().name(), Exceptions.toMessageString(e)); case INTERNAL_SERVER_ERROR: yield ErrorResponse.internalServerError(Exceptions.toMessageString(e)); default: yield new ErrorResponse(BAD_REQUEST, e.code().name(), Exceptions.toMessageString(e)); }; } catch (RuntimeException e) { log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e); return ErrorResponse.internalServerError("Unexpected error occurred"); } }
return ErrorResponse.internalServerError("Unexpected error occurred");
public HttpResponse auditAndHandle(HttpRequest request) { try { Path path = new Path(request.getUri()); return switch (request.getMethod()) { case GET: yield handleGET(path, request); case PUT: yield handlePUT(path, request); case POST: yield handlePOST(path, request); case PATCH: yield handlePATCH(path, request); case DELETE: yield handleDELETE(path, request); case OPTIONS: yield handleOPTIONS(); default: yield ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported"); }; } catch (RestApiException.Forbidden e) { return ErrorResponse.forbidden(Exceptions.toMessageString(e)); } catch (RestApiException.Unauthorized e) { return ErrorResponse.unauthorized(Exceptions.toMessageString(e)); } catch (NotExistsException e) { return ErrorResponse.notFoundError(Exceptions.toMessageString(e)); } catch (IllegalArgumentException e) { return ErrorResponse.badRequest(Exceptions.toMessageString(e)); } catch (ConfigServerException e) { return switch (e.code()) { case NOT_FOUND: yield ErrorResponse.notFoundError(Exceptions.toMessageString(e)); case ACTIVATION_CONFLICT: yield new ErrorResponse(CONFLICT, e.code().name(), Exceptions.toMessageString(e)); case INTERNAL_SERVER_ERROR: yield ErrorResponses.logThrowing(request, log, e); default: yield new ErrorResponse(BAD_REQUEST, e.code().name(), Exceptions.toMessageString(e)); }; } catch (RuntimeException e) { return ErrorResponses.logThrowing(request, log, e); } }
class ApplicationApiHandler extends AuditLoggingRequestHandler { private static final ObjectMapper jsonMapper = new ObjectMapper(); private final Controller controller; private final AccessControlRequests accessControlRequests; private final TestConfigSerializer testConfigSerializer; @Inject public ApplicationApiHandler(ThreadedHttpRequestHandler.Context parentCtx, Controller controller, AccessControlRequests accessControlRequests) { super(parentCtx, controller.auditLogger()); this.controller = controller; this.accessControlRequests = accessControlRequests; this.testConfigSerializer = new TestConfigSerializer(controller.system()); } @Override public Duration getTimeout() { return Duration.ofMinutes(20); } @Override private HttpResponse handleGET(Path path, HttpRequest request) { if (path.matches("/application/v4/")) return root(request); if (path.matches("/application/v4/notifications")) return notifications(request, Optional.ofNullable(request.getProperty("tenant")), true); if (path.matches("/application/v4/tenant")) return tenants(request); if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/request/operator")) return accessRequests(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/info")) return tenantInfo(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/info/profile")) return withCloudTenant(path.get("tenant"), this::tenantInfoProfile); if (path.matches("/application/v4/tenant/{tenant}/info/billing")) return withCloudTenant(path.get("tenant"), this::tenantInfoBilling); if (path.matches("/application/v4/tenant/{tenant}/info/contacts")) return withCloudTenant(path.get("tenant"), this::tenantInfoContacts); if (path.matches("/application/v4/tenant/{tenant}/notifications")) return notifications(request, Optional.of(path.get("tenant")), false); if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}/validate")) return validateSecretStore(path.get("tenant"), path.get("name"), request); if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), Optional.empty(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/compile-version")) return compileVersion(path.get("tenant"), path.get("application"), request.getProperty("allowMajor")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return JobControllerApiHandlerHelper.overviewResponse(controller, TenantAndApplicationId.from(path.get("tenant"), path.get("application")), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/package")) return applicationPackage(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/diff/{number}")) return applicationPackageDiff(path.get("tenant"), path.get("application"), path.get("number")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploying(path.get("tenant"), path.get("application"), "default", request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), "default", request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance")) return applications(path.get("tenant"), Optional.of(path.get("application")), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return instance(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.applications().requireApplication(TenantAndApplicationId.from(path.get("tenant"), path.get("application"))), controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)).descendingMap(), Optional.ofNullable(request.getProperty("limit")), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/package")) return devApplicationPackage(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/diff/{number}")) return devApplicationPackageDiff(runIdFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/test-config")) return testConfig(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}/logs")) return JobControllerApiHandlerHelper.vespaLogsResponse(controller.jobController(), runIdFromPath(path), asLong(request.getProperty("from"), 0), request.getBooleanProperty("tester")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return getReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{host}/status/{*}")) return status(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{host}/state/v1/{*}")) return stateV1(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/orchestrator")) return orchestrator(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/content/{*}")) return content(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return supportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/node/{node}/service-dump")) return getServiceDump(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("node"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/scaling")) return scaling(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/metrics")) return metrics(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId"))); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{host}/status/{*}")) return status(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId"))); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePUT(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/request/operator")) return requestSshAccess(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/approve/operator")) return approveAccessRequest(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/managed/operator")) return addManagedAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/info")) return updateTenantInfo(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/info/profile")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoProfile); if (path.matches("/application/v4/tenant/{tenant}/info/billing")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoBilling); if (path.matches("/application/v4/tenant/{tenant}/info/contacts")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoContacts); if (path.matches("/application/v4/tenant/{tenant}/archive-access")) return allowAwsArchiveAccess(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/archive-access/aws")) return allowAwsArchiveAccess(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/archive-access/gcp")) return allowGcpArchiveAccess(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return addSecretStore(path.get("tenant"), path.get("name"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePOST(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/key")) return addDeveloperKey(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), "default", false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), "default", true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), "default", request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return addDeployKey(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return submit(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return createInstance(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploy/{jobtype}")) return jobDeploy(appIdFromPath(path), jobTypeFromPath(path), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/submit")) return submit(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return trigger(appIdFromPath(path), jobTypeFromPath(path), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return pause(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindex")) return reindex(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return enableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return allowSupportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/node/{node}/service-dump")) return requestServiceDump(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("node"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePATCH(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return patchApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return patchApplication(path.get("tenant"), path.get("application"), request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleDELETE(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/managed/operator")) return removeManagedAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/key")) return removeDeveloperKey(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/archive-access")) return removeAwsArchiveAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/archive-access/aws")) return removeAwsArchiveAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/archive-access/gcp")) return removeGcpArchiveAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return deleteSecretStore(path.get("tenant"), path.get("name"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return removeAllProdDeployments(path.get("tenant"), path.get("application")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", "all"); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", path.get("choice")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return removeDeployKey(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit/{build}")) return cancelBuild(path.get("tenant"), path.get("application"), path.get("build")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return deleteInstance(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), "all"); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("choice")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.abortJobResponse(controller.jobController(), request, appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return resume(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return disableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return disallowSupportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleOPTIONS() { EmptyResponse response = new EmptyResponse(); response.headers().put("Allow", "GET,PUT,POST,PATCH,DELETE,OPTIONS"); return response; } private HttpResponse recursiveRoot(HttpRequest request) { Slime slime = new Slime(); Cursor tenantArray = slime.setArray(); List<Application> applications = controller.applications().asList(); for (Tenant tenant : controller.tenants().asList(includeDeleted(request))) toSlime(tenantArray.addObject(), tenant, applications.stream().filter(app -> app.id().tenant().equals(tenant.name())).collect(toList()), request); return new SlimeJsonResponse(slime); } private HttpResponse root(HttpRequest request) { return recurseOverTenants(request) ? recursiveRoot(request) : new ResourceResponse(request, "tenant"); } private HttpResponse tenants(HttpRequest request) { Slime slime = new Slime(); Cursor response = slime.setArray(); for (Tenant tenant : controller.tenants().asList(includeDeleted(request))) tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject()); return new SlimeJsonResponse(slime); } private HttpResponse tenant(String tenantName, HttpRequest request) { return controller.tenants().get(TenantName.from(tenantName), includeDeleted(request)) .map(tenant -> tenant(tenant, request)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist")); } private HttpResponse tenant(Tenant tenant, HttpRequest request) { Slime slime = new Slime(); toSlime(slime.setObject(), tenant, controller.applications().asList(tenant.name()), request); return new SlimeJsonResponse(slime); } private HttpResponse accessRequests(String tenantName, HttpRequest request) { var tenant = TenantName.from(tenantName); if (controller.tenants().require(tenant).type() != Tenant.Type.cloud) return ErrorResponse.badRequest("Can only see access requests for cloud tenants"); var accessControlService = controller.serviceRegistry().accessControlService(); var accessRoleInformation = accessControlService.getAccessRoleInformation(tenant); var managedAccess = accessControlService.getManagedAccess(tenant); var slime = new Slime(); var cursor = slime.setObject(); cursor.setBool("managedAccess", managedAccess); accessRoleInformation.getPendingRequest() .ifPresent(membershipRequest -> { var requestCursor = cursor.setObject("pendingRequest"); requestCursor.setString("requestTime", membershipRequest.getCreationTime()); requestCursor.setString("reason", membershipRequest.getReason()); }); var auditLogCursor = cursor.setArray("auditLog"); accessRoleInformation.getAuditLog() .forEach(auditLogEntry -> { var entryCursor = auditLogCursor.addObject(); entryCursor.setString("created", auditLogEntry.getCreationTime()); entryCursor.setString("approver", auditLogEntry.getApprover()); entryCursor.setString("reason", auditLogEntry.getReason()); entryCursor.setString("status", auditLogEntry.getAction()); }); return new SlimeJsonResponse(slime); } private HttpResponse requestSshAccess(String tenantName, HttpRequest request) { if (!isOperator(request)) { return ErrorResponse.forbidden("Only operators are allowed to request ssh access"); } if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) return ErrorResponse.badRequest("Can only request access for cloud tenants"); controller.serviceRegistry().accessControlService().requestSshAccess(TenantName.from(tenantName)); return new MessageResponse("OK"); } private HttpResponse approveAccessRequest(String tenantName, HttpRequest request) { var tenant = TenantName.from(tenantName); if (controller.tenants().require(tenant).type() != Tenant.Type.cloud) return ErrorResponse.badRequest("Can only see access requests for cloud tenants"); var inspector = toSlime(request.getData()).get(); var expiry = inspector.field("expiry").valid() ? Instant.ofEpochMilli(inspector.field("expiry").asLong()) : Instant.now().plus(1, ChronoUnit.DAYS); var approve = inspector.field("approve").asBool(); controller.serviceRegistry().accessControlService().decideSshAccess(tenant, expiry, OAuthCredentials.fromAuth0RequestContext(request.getJDiscRequest().context()), approve); return new MessageResponse("OK"); } private HttpResponse addManagedAccess(String tenantName) { return setManagedAccess(tenantName, true); } private HttpResponse removeManagedAccess(String tenantName) { return setManagedAccess(tenantName, false); } private HttpResponse setManagedAccess(String tenantName, boolean managedAccess) { var tenant = TenantName.from(tenantName); if (controller.tenants().require(tenant).type() != Tenant.Type.cloud) return ErrorResponse.badRequest("Can only set access privel for cloud tenants"); controller.serviceRegistry().accessControlService().setManagedAccess(tenant, managedAccess); var slime = new Slime(); slime.setObject().setBool("managedAccess", managedAccess); return new SlimeJsonResponse(slime); } private HttpResponse tenantInfo(String tenantName, HttpRequest request) { return controller.tenants().get(TenantName.from(tenantName)) .filter(tenant -> tenant.type() == Tenant.Type.cloud) .map(tenant -> tenantInfo(((CloudTenant)tenant).info(), request)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private HttpResponse withCloudTenant(String tenantName, Function<CloudTenant, SlimeJsonResponse> handler) { return controller.tenants().get(TenantName.from(tenantName)) .filter(tenant -> tenant.type() == Tenant.Type.cloud) .map(tenant -> handler.apply((CloudTenant) tenant)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private SlimeJsonResponse tenantInfo(TenantInfo info, HttpRequest request) { Slime slime = new Slime(); Cursor infoCursor = slime.setObject(); if (!info.isEmpty()) { infoCursor.setString("name", info.name()); infoCursor.setString("email", info.email()); infoCursor.setString("website", info.website()); infoCursor.setString("contactName", info.contact().name()); infoCursor.setString("contactEmail", info.contact().email()); toSlime(info.address(), infoCursor); toSlime(info.billingContact(), infoCursor); toSlime(info.contacts(), infoCursor); } return new SlimeJsonResponse(slime); } private SlimeJsonResponse tenantInfoProfile(CloudTenant cloudTenant) { var slime = new Slime(); var root = slime.setObject(); var info = cloudTenant.info(); if (!info.isEmpty()) { var contact = root.setObject("contact"); contact.setString("name", info.contact().name()); contact.setString("email", info.contact().email()); var tenant = root.setObject("tenant"); tenant.setString("company", info.name()); tenant.setString("website", info.website()); toSlime(info.address(), root); } return new SlimeJsonResponse(slime); } private SlimeJsonResponse withCloudTenant(String tenantName, HttpRequest request, BiFunction<CloudTenant, Inspector, SlimeJsonResponse> handler) { return controller.tenants().get(tenantName) .map(tenant -> handler.apply((CloudTenant) tenant, toSlime(request.getData()).get())) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private SlimeJsonResponse putTenantInfoProfile(CloudTenant cloudTenant, Inspector inspector) { var info = cloudTenant.info(); var mergedContact = TenantContact.empty() .withName(getString(inspector.field("contact").field("name"), info.contact().name())) .withEmail(getString(inspector.field("contact").field("email"), info.contact().email())); var mergedAddress = updateTenantInfoAddress(inspector.field("address"), info.address()); var mergedInfo = info .withName(getString(inspector.field("tenant").field("name"), info.name())) .withWebsite(getString(inspector.field("tenant").field("website"), info.website())) .withContact(mergedContact) .withAddress(mergedAddress); validateMergedTenantInfo(mergedInfo); controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private SlimeJsonResponse tenantInfoBilling(CloudTenant cloudTenant) { var slime = new Slime(); var root = slime.setObject(); var info = cloudTenant.info(); if (!info.isEmpty()) { var billingContact = info.billingContact(); var contact = root.setObject("contact"); contact.setString("name", billingContact.contact().name()); contact.setString("email", billingContact.contact().email()); contact.setString("phone", billingContact.contact().phone()); toSlime(billingContact.address(), root); } return new SlimeJsonResponse(slime); } private SlimeJsonResponse putTenantInfoBilling(CloudTenant cloudTenant, Inspector inspector) { var info = cloudTenant.info(); var contact = info.billingContact().contact(); var address = info.billingContact().address(); var mergedContact = updateTenantInfoContact(inspector.field("contact"), contact); var mergedAddress = updateTenantInfoAddress(inspector.field("address"), info.billingContact().address()); var mergedBilling = info.billingContact() .withContact(mergedContact) .withAddress(mergedAddress); var mergedInfo = info.withBilling(mergedBilling); controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private SlimeJsonResponse tenantInfoContacts(CloudTenant cloudTenant) { var slime = new Slime(); var root = slime.setObject(); toSlime(cloudTenant.info().contacts(), root); return new SlimeJsonResponse(slime); } private SlimeJsonResponse putTenantInfoContacts(CloudTenant cloudTenant, Inspector inspector) { var mergedInfo = cloudTenant.info() .withContacts(updateTenantInfoContacts(inspector.field("contacts"), cloudTenant.info().contacts())); controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private void validateMergedTenantInfo(TenantInfo mergedInfo) { if (mergedInfo.contact().name().isBlank()) { throw new IllegalArgumentException("'contactName' cannot be empty"); } if (mergedInfo.contact().email().isBlank()) { throw new IllegalArgumentException("'contactEmail' cannot be empty"); } if (! mergedInfo.contact().email().contains("@")) { throw new IllegalArgumentException("'contactEmail' needs to be an email address"); } if (! mergedInfo.website().isBlank()) { try { new URL(mergedInfo.website()); } catch (MalformedURLException e) { throw new IllegalArgumentException("'website' needs to be a valid address"); } } } private void toSlime(TenantAddress address, Cursor parentCursor) { if (address.isEmpty()) return; Cursor addressCursor = parentCursor.setObject("address"); addressCursor.setString("addressLines", address.address()); addressCursor.setString("postalCodeOrZip", address.code()); addressCursor.setString("city", address.city()); addressCursor.setString("stateRegionProvince", address.region()); addressCursor.setString("country", address.country()); } private void toSlime(TenantBilling billingContact, Cursor parentCursor) { if (billingContact.isEmpty()) return; Cursor addressCursor = parentCursor.setObject("billingContact"); addressCursor.setString("name", billingContact.contact().name()); addressCursor.setString("email", billingContact.contact().email()); addressCursor.setString("phone", billingContact.contact().phone()); toSlime(billingContact.address(), addressCursor); } private void toSlime(TenantContacts contacts, Cursor parentCursor) { Cursor contactsCursor = parentCursor.setArray("contacts"); contacts.all().forEach(contact -> { Cursor contactCursor = contactsCursor.addObject(); Cursor audiencesArray = contactCursor.setArray("audiences"); contact.audiences().forEach(audience -> audiencesArray.addString(toAudience(audience))); switch (contact.type()) { case EMAIL: var email = (TenantContacts.EmailContact) contact; contactCursor.setString("email", email.email()); return; default: throw new IllegalArgumentException("Serialization for contact type not implemented: " + contact.type()); } }); } private static TenantContacts.Audience fromAudience(String value) { return switch (value) { case "tenant": yield TenantContacts.Audience.TENANT; case "notifications": yield TenantContacts.Audience.NOTIFICATIONS; default: throw new IllegalArgumentException("Unknown contact audience '" + value + "'."); }; } private static String toAudience(TenantContacts.Audience audience) { return switch (audience) { case TENANT: yield "tenant"; case NOTIFICATIONS: yield "notifications"; }; } private HttpResponse updateTenantInfo(String tenantName, HttpRequest request) { return controller.tenants().get(TenantName.from(tenantName)) .filter(tenant -> tenant.type() == Tenant.Type.cloud) .map(tenant -> updateTenantInfo(((CloudTenant)tenant), request)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private String getString(Inspector field, String defaultVale) { return field.valid() ? field.asString().trim() : defaultVale; } private SlimeJsonResponse updateTenantInfo(CloudTenant tenant, HttpRequest request) { TenantInfo oldInfo = tenant.info(); Inspector insp = toSlime(request.getData()).get(); TenantContact mergedContact = TenantContact.empty() .withName(getString(insp.field("contactName"), oldInfo.contact().name())) .withEmail(getString(insp.field("contactEmail"), oldInfo.contact().email())); TenantInfo mergedInfo = TenantInfo.empty() .withName(getString(insp.field("name"), oldInfo.name())) .withEmail(getString(insp.field("email"), oldInfo.email())) .withWebsite(getString(insp.field("website"), oldInfo.website())) .withContact(mergedContact) .withAddress(updateTenantInfoAddress(insp.field("address"), oldInfo.address())) .withBilling(updateTenantInfoBillingContact(insp.field("billingContact"), oldInfo.billingContact())) .withContacts(updateTenantInfoContacts(insp.field("contacts"), oldInfo.contacts())); validateMergedTenantInfo(mergedInfo); controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private TenantAddress updateTenantInfoAddress(Inspector insp, TenantAddress oldAddress) { if (!insp.valid()) return oldAddress; TenantAddress address = TenantAddress.empty() .withCountry(getString(insp.field("country"), oldAddress.country())) .withRegion(getString(insp.field("stateRegionProvince"), oldAddress.region())) .withCity(getString(insp.field("city"), oldAddress.city())) .withCode(getString(insp.field("postalCodeOrZip"), oldAddress.code())) .withAddress(getString(insp.field("addressLines"), oldAddress.address())); List<String> fields = List.of(address.address(), address.code(), address.country(), address.city(), address.region()); if (fields.stream().allMatch(String::isBlank) || fields.stream().noneMatch(String::isBlank)) return address; throw new IllegalArgumentException("All address fields must be set"); } private TenantContact updateTenantInfoContact(Inspector insp, TenantContact oldContact) { if (!insp.valid()) return oldContact; String email = getString(insp.field("email"), oldContact.email()); if (!email.isBlank() && !email.contains("@")) { throw new IllegalArgumentException("'email' needs to be an email address"); } return TenantContact.empty() .withName(getString(insp.field("name"), oldContact.name())) .withEmail(getString(insp.field("email"), oldContact.email())) .withPhone(getString(insp.field("phone"), oldContact.phone())); } private TenantBilling updateTenantInfoBillingContact(Inspector insp, TenantBilling oldContact) { if (!insp.valid()) return oldContact; return TenantBilling.empty() .withContact(updateTenantInfoContact(insp, oldContact.contact())) .withAddress(updateTenantInfoAddress(insp.field("address"), oldContact.address())); } private TenantContacts updateTenantInfoContacts(Inspector insp, TenantContacts oldContacts) { if (!insp.valid()) return oldContacts; List<TenantContacts.EmailContact> contacts = SlimeUtils.entriesStream(insp).map(inspector -> { String email = inspector.field("email").asString().trim(); List<TenantContacts.Audience> audiences = SlimeUtils.entriesStream(inspector.field("audiences")) .map(audience -> fromAudience(audience.asString())) .toList(); if (!email.contains("@")) { throw new IllegalArgumentException("'email' needs to be an email address"); } return new TenantContacts.EmailContact(audiences, email); }).toList(); return new TenantContacts(contacts); } private HttpResponse notifications(HttpRequest request, Optional<String> tenant, boolean includeTenantFieldInResponse) { boolean productionOnly = showOnlyProductionInstances(request); boolean excludeMessages = "true".equals(request.getProperty("excludeMessages")); Slime slime = new Slime(); Cursor notificationsArray = slime.setObject().setArray("notifications"); tenant.map(t -> Stream.of(TenantName.from(t))) .orElseGet(() -> controller.notificationsDb().listTenantsWithNotifications().stream()) .flatMap(tenantName -> controller.notificationsDb().listNotifications(NotificationSource.from(tenantName), productionOnly).stream()) .filter(notification -> propertyEquals(request, "application", ApplicationName::from, notification.source().application()) && propertyEquals(request, "instance", InstanceName::from, notification.source().instance()) && propertyEquals(request, "zone", ZoneId::from, notification.source().zoneId()) && propertyEquals(request, "job", job -> JobType.fromJobName(job, controller.zoneRegistry()), notification.source().jobType()) && propertyEquals(request, "type", Notification.Type::valueOf, Optional.of(notification.type())) && propertyEquals(request, "level", Notification.Level::valueOf, Optional.of(notification.level()))) .forEach(notification -> toSlime(notificationsArray.addObject(), notification, includeTenantFieldInResponse, excludeMessages)); return new SlimeJsonResponse(slime); } private static <T> boolean propertyEquals(HttpRequest request, String property, Function<String, T> mapper, Optional<T> value) { return Optional.ofNullable(request.getProperty(property)) .map(propertyValue -> value.isPresent() && mapper.apply(propertyValue).equals(value.get())) .orElse(true); } private static void toSlime(Cursor cursor, Notification notification, boolean includeTenantFieldInResponse, boolean excludeMessages) { cursor.setLong("at", notification.at().toEpochMilli()); cursor.setString("level", notificationLevelAsString(notification.level())); cursor.setString("type", notificationTypeAsString(notification.type())); if (!excludeMessages) { Cursor messagesArray = cursor.setArray("messages"); notification.messages().forEach(messagesArray::addString); } if (includeTenantFieldInResponse) cursor.setString("tenant", notification.source().tenant().value()); notification.source().application().ifPresent(application -> cursor.setString("application", application.value())); notification.source().instance().ifPresent(instance -> cursor.setString("instance", instance.value())); notification.source().zoneId().ifPresent(zoneId -> { cursor.setString("environment", zoneId.environment().value()); cursor.setString("region", zoneId.region().value()); }); notification.source().clusterId().ifPresent(clusterId -> cursor.setString("clusterId", clusterId.value())); notification.source().jobType().ifPresent(jobType -> cursor.setString("jobName", jobType.jobName())); notification.source().runNumber().ifPresent(runNumber -> cursor.setLong("runNumber", runNumber)); } private static String notificationTypeAsString(Notification.Type type) { return switch (type) { case submission, applicationPackage: yield "applicationPackage"; case testPackage: yield "testPackage"; case deployment: yield "deployment"; case feedBlock: yield "feedBlock"; case reindex: yield "reindex"; }; } private static String notificationLevelAsString(Notification.Level level) { return switch (level) { case info: yield "info"; case warning: yield "warning"; case error: yield "error"; }; } private HttpResponse applications(String tenantName, Optional<String> applicationName, HttpRequest request) { TenantName tenant = TenantName.from(tenantName); getTenantOrThrow(tenantName); List<Application> applications = applicationName.isEmpty() ? controller.applications().asList(tenant) : controller.applications().getApplication(TenantAndApplicationId.from(tenantName, applicationName.get())) .map(List::of) .orElseThrow(() -> new NotExistsException("Application '" + applicationName.get() + "' does not exist")); Slime slime = new Slime(); Cursor applicationArray = slime.setArray(); for (Application application : applications) { Cursor applicationObject = applicationArray.addObject(); applicationObject.setString("tenant", application.id().tenant().value()); applicationObject.setString("application", application.id().application().value()); applicationObject.setString("url", withPath("/application/v4" + "/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value(), request.getUri()).toString()); Cursor instanceArray = applicationObject.setArray("instances"); for (InstanceName instance : showOnlyProductionInstances(request) ? application.productionInstances().keySet() : application.instances().keySet()) { Cursor instanceObject = instanceArray.addObject(); instanceObject.setString("instance", instance.value()); instanceObject.setString("url", withPath("/application/v4" + "/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value() + "/instance/" + instance.value(), request.getUri()).toString()); } } return new SlimeJsonResponse(slime); } private HttpResponse devApplicationPackage(ApplicationId id, JobType type) { ZoneId zone = type.zone(); RevisionId revision = controller.jobController().last(id, type).get().versions().targetRevision(); byte[] applicationPackage = controller.applications().applicationStore().get(new DeploymentId(id, zone), revision); return new ZipResponse(id.toFullString() + "." + zone.value() + ".zip", applicationPackage); } private HttpResponse devApplicationPackageDiff(RunId runId) { DeploymentId deploymentId = new DeploymentId(runId.application(), runId.job().type().zone()); return controller.applications().applicationStore().getDevDiff(deploymentId, runId.number()) .map(ByteArrayResponse::new) .orElseThrow(() -> new NotExistsException("No application package diff found for " + runId)); } private HttpResponse applicationPackage(String tenantName, String applicationName, HttpRequest request) { TenantAndApplicationId tenantAndApplication = TenantAndApplicationId.from(tenantName, applicationName); final long build; String requestedBuild = request.getProperty("build"); if (requestedBuild != null) { if (requestedBuild.equals("latestDeployed")) { build = controller.applications().requireApplication(tenantAndApplication).latestDeployedRevision() .map(RevisionId::number) .orElseThrow(() -> new NotExistsException("no application package has been deployed in production for " + tenantAndApplication)); } else { try { build = Validation.requireAtLeast(Long.parseLong(request.getProperty("build")), "build number", 1L); } catch (NumberFormatException e) { throw new IllegalArgumentException("invalid value for request parameter 'build'", e); } } } else { build = controller.applications().requireApplication(tenantAndApplication).revisions().last() .map(version -> version.id().number()) .orElseThrow(() -> new NotExistsException("no application package has been submitted for " + tenantAndApplication)); } RevisionId revision = RevisionId.forProduction(build); boolean tests = request.getBooleanProperty("tests"); byte[] applicationPackage = tests ? controller.applications().applicationStore().getTester(tenantAndApplication.tenant(), tenantAndApplication.application(), revision) : controller.applications().applicationStore().get(new DeploymentId(tenantAndApplication.defaultInstance(), ZoneId.defaultId()), revision); String filename = tenantAndApplication + (tests ? "-tests" : "-build") + revision.number() + ".zip"; return new ZipResponse(filename, applicationPackage); } private HttpResponse applicationPackageDiff(String tenant, String application, String number) { TenantAndApplicationId tenantAndApplication = TenantAndApplicationId.from(tenant, application); return controller.applications().applicationStore().getDiff(tenantAndApplication.tenant(), tenantAndApplication.application(), Long.parseLong(number)) .map(ByteArrayResponse::new) .orElseThrow(() -> new NotExistsException("No application package diff found for '" + tenantAndApplication + "' with build number " + number)); } private HttpResponse application(String tenantName, String applicationName, HttpRequest request) { Slime slime = new Slime(); toSlime(slime.setObject(), getApplication(tenantName, applicationName), request); return new SlimeJsonResponse(slime); } private HttpResponse compileVersion(String tenantName, String applicationName, String allowMajorParam) { Slime slime = new Slime(); OptionalInt allowMajor = OptionalInt.empty(); if (allowMajorParam != null) { try { allowMajor = OptionalInt.of(Integer.parseInt(allowMajorParam)); } catch (NumberFormatException e) { throw new IllegalArgumentException("Invalid major version '" + allowMajorParam + "'", e); } } Version compileVersion = controller.applications().compileVersion(TenantAndApplicationId.from(tenantName, applicationName), allowMajor); slime.setObject().setString("compileVersion", compileVersion.toFullString()); return new SlimeJsonResponse(slime); } private HttpResponse instance(String tenantName, String applicationName, String instanceName, HttpRequest request) { Slime slime = new Slime(); toSlime(slime.setObject(), getInstance(tenantName, applicationName, instanceName), controller.jobController().deploymentStatus(getApplication(tenantName, applicationName)), request); return new SlimeJsonResponse(slime); } private HttpResponse addDeveloperKey(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); Principal user = request.getJDiscRequest().getUserPrincipal(); String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString(); PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey); Slime root = new Slime(); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> { tenant = tenant.withDeveloperKey(developerKey, user); toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys()); controller.tenants().store(tenant); }); return new SlimeJsonResponse(root); } private HttpResponse validateSecretStore(String tenantName, String secretStoreName, HttpRequest request) { var awsRegion = request.getProperty("aws-region"); var parameterName = request.getProperty("parameter-name"); var applicationId = ApplicationId.fromFullString(request.getProperty("application-id")); var zoneId = requireZone(ZoneId.from(request.getProperty("zone"))); var deploymentId = new DeploymentId(applicationId, zoneId); var tenant = controller.tenants().require(applicationId.tenant(), CloudTenant.class); var tenantSecretStore = tenant.tenantSecretStores() .stream() .filter(secretStore -> secretStore.getName().equals(secretStoreName)) .findFirst(); if (tenantSecretStore.isEmpty()) return ErrorResponse.notFoundError("No secret store '" + secretStoreName + "' configured for tenant '" + tenantName + "'"); var response = controller.serviceRegistry().configServer().validateSecretStore(deploymentId, tenantSecretStore.get(), awsRegion, parameterName); try { var responseRoot = new Slime(); var responseCursor = responseRoot.setObject(); responseCursor.setString("target", deploymentId.toString()); var responseResultCursor = responseCursor.setObject("result"); var responseSlime = SlimeUtils.jsonToSlime(response); SlimeUtils.copyObject(responseSlime.get(), responseResultCursor); return new SlimeJsonResponse(responseRoot); } catch (JsonParseException e) { return ErrorResponse.internalServerError(response); } } private HttpResponse removeDeveloperKey(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString(); PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey); Principal user = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class).developerKeys().get(developerKey); Slime root = new Slime(); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> { tenant = tenant.withoutDeveloperKey(developerKey); toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys()); controller.tenants().store(tenant); }); return new SlimeJsonResponse(root); } private void toSlime(Cursor keysArray, Map<PublicKey, ? extends Principal> keys) { keys.forEach((key, principal) -> { Cursor keyObject = keysArray.addObject(); keyObject.setString("key", KeyUtils.toPem(key)); keyObject.setString("user", principal.getName()); }); } private HttpResponse addDeployKey(String tenantName, String applicationName, HttpRequest request) { String pemDeployKey = toSlime(request.getData()).get().field("key").asString(); PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey); Slime root = new Slime(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> { application = application.withDeployKey(deployKey); application.get().deployKeys().stream() .map(KeyUtils::toPem) .forEach(root.setObject().setArray("keys")::addString); controller.applications().store(application); }); return new SlimeJsonResponse(root); } private HttpResponse removeDeployKey(String tenantName, String applicationName, HttpRequest request) { String pemDeployKey = toSlime(request.getData()).get().field("key").asString(); PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey); Slime root = new Slime(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> { application = application.withoutDeployKey(deployKey); application.get().deployKeys().stream() .map(KeyUtils::toPem) .forEach(root.setObject().setArray("keys")::addString); controller.applications().store(application); }); return new SlimeJsonResponse(root); } private HttpResponse addSecretStore(String tenantName, String name, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); var data = toSlime(request.getData()).get(); var awsId = mandatory("awsId", data).asString(); var externalId = mandatory("externalId", data).asString(); var role = mandatory("role", data).asString(); var tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class); var tenantSecretStore = new TenantSecretStore(name, awsId, role); if (!tenantSecretStore.isValid()) { return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is invalid"); } if (tenant.tenantSecretStores().contains(tenantSecretStore)) { return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is already configured"); } controller.serviceRegistry().roleService().createTenantPolicy(TenantName.from(tenantName), name, awsId, role); controller.serviceRegistry().tenantSecretService().addSecretStore(tenant.name(), tenantSecretStore, externalId); controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withSecretStore(tenantSecretStore); controller.tenants().store(lockedTenant); }); tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class); var slime = new Slime(); toSlime(slime.setObject(), tenant.tenantSecretStores()); return new SlimeJsonResponse(slime); } private HttpResponse deleteSecretStore(String tenantName, String name, HttpRequest request) { var tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class); var optionalSecretStore = tenant.tenantSecretStores().stream() .filter(secretStore -> secretStore.getName().equals(name)) .findFirst(); if (optionalSecretStore.isEmpty()) return ErrorResponse.notFoundError("Could not delete secret store '" + name + "': Secret store not found"); var tenantSecretStore = optionalSecretStore.get(); controller.serviceRegistry().tenantSecretService().deleteSecretStore(tenant.name(), tenantSecretStore); controller.serviceRegistry().roleService().deleteTenantPolicy(tenant.name(), tenantSecretStore.getName(), tenantSecretStore.getRole()); controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withoutSecretStore(tenantSecretStore); controller.tenants().store(lockedTenant); }); tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class); var slime = new Slime(); toSlime(slime.setObject(), tenant.tenantSecretStores()); return new SlimeJsonResponse(slime); } private HttpResponse allowAwsArchiveAccess(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); var data = toSlime(request.getData()).get(); var role = mandatory("role", data).asString(); if (role.isBlank()) { return ErrorResponse.badRequest("AWS archive access role can't be whitespace only"); } controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> { var access = lockedTenant.get().archiveAccess(); lockedTenant = lockedTenant.withArchiveAccess(access.withAWSRole(role)); controller.tenants().store(lockedTenant); }); return new MessageResponse("AWS archive access role set to '" + role + "' for tenant " + tenantName + "."); } private HttpResponse removeAwsArchiveAccess(String tenantName) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> { var access = lockedTenant.get().archiveAccess(); lockedTenant = lockedTenant.withArchiveAccess(access.removeAWSRole()); controller.tenants().store(lockedTenant); }); return new MessageResponse("AWS archive access role removed for tenant " + tenantName + "."); } private HttpResponse allowGcpArchiveAccess(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); var data = toSlime(request.getData()).get(); var member = mandatory("member", data).asString(); if (member.isBlank()) { return ErrorResponse.badRequest("GCP archive access role can't be whitespace only"); } controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> { var access = lockedTenant.get().archiveAccess(); lockedTenant = lockedTenant.withArchiveAccess(access.withGCPMember(member)); controller.tenants().store(lockedTenant); }); return new MessageResponse("GCP archive access member set to '" + member + "' for tenant " + tenantName + "."); } private HttpResponse removeGcpArchiveAccess(String tenantName) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> { var access = lockedTenant.get().archiveAccess(); lockedTenant = lockedTenant.withArchiveAccess(access.removeGCPMember()); controller.tenants().store(lockedTenant); }); return new MessageResponse("GCP archive access member removed for tenant " + tenantName + "."); } private HttpResponse patchApplication(String tenantName, String applicationName, HttpRequest request) { Inspector requestObject = toSlime(request.getData()).get(); StringJoiner messageBuilder = new StringJoiner("\n").setEmptyValue("No applicable changes."); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> { Inspector majorVersionField = requestObject.field("majorVersion"); if (majorVersionField.valid()) { Integer majorVersion = majorVersionField.asLong() == 0 ? null : (int) majorVersionField.asLong(); application = application.withMajorVersion(majorVersion); messageBuilder.add("Set major version to " + (majorVersion == null ? "empty" : majorVersion)); } Inspector pemDeployKeyField = requestObject.field("pemDeployKey"); if (pemDeployKeyField.valid()) { String pemDeployKey = pemDeployKeyField.asString(); PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey); application = application.withDeployKey(deployKey); messageBuilder.add("Added deploy key " + pemDeployKey); } controller.applications().store(application); }); return new MessageResponse(messageBuilder.toString()); } private Application getApplication(String tenantName, String applicationName) { TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName); return controller.applications().getApplication(applicationId) .orElseThrow(() -> new NotExistsException(applicationId + " not found")); } private Instance getInstance(String tenantName, String applicationName, String instanceName) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); return controller.applications().getInstance(applicationId) .orElseThrow(() -> new NotExistsException(applicationId + " not found")); } private HttpResponse nodes(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone, NodeFilter.all().applications(id)); Slime slime = new Slime(); Cursor nodesArray = slime.setObject().setArray("nodes"); for (Node node : nodes) { Cursor nodeObject = nodesArray.addObject(); nodeObject.setString("hostname", node.hostname().value()); nodeObject.setString("state", valueOf(node.state())); node.reservedTo().ifPresent(tenant -> nodeObject.setString("reservedTo", tenant.value())); nodeObject.setString("orchestration", valueOf(node.serviceState())); nodeObject.setString("version", node.currentVersion().toString()); node.flavor().ifPresent(flavor -> nodeObject.setString("flavor", flavor)); toSlime(node.resources(), nodeObject); nodeObject.setString("clusterId", node.clusterId()); nodeObject.setString("clusterType", valueOf(node.clusterType())); nodeObject.setBool("down", node.down()); nodeObject.setBool("retired", node.retired() || node.wantToRetire()); nodeObject.setBool("restarting", node.wantedRestartGeneration() > node.restartGeneration()); nodeObject.setBool("rebooting", node.wantedRebootGeneration() > node.rebootGeneration()); nodeObject.setString("group", node.group()); nodeObject.setLong("index", node.index()); } return new SlimeJsonResponse(slime); } private HttpResponse clusters(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); com.yahoo.vespa.hosted.controller.api.integration.configserver.Application application = controller.serviceRegistry().configServer().nodeRepository().getApplication(zone, id); Slime slime = new Slime(); Cursor clustersObject = slime.setObject().setObject("clusters"); for (Cluster cluster : application.clusters().values()) { Cursor clusterObject = clustersObject.setObject(cluster.id().value()); clusterObject.setString("type", cluster.type().name()); toSlime(cluster.min(), clusterObject.setObject("min")); toSlime(cluster.max(), clusterObject.setObject("max")); toSlime(cluster.current(), clusterObject.setObject("current")); if (cluster.target().isPresent() && ! cluster.target().get().justNumbers().equals(cluster.current().justNumbers())) toSlime(cluster.target().get(), clusterObject.setObject("target")); cluster.suggested().ifPresent(suggested -> toSlime(suggested, clusterObject.setObject("suggested"))); utilizationToSlime(cluster.utilization(), clusterObject.setObject("utilization")); scalingEventsToSlime(cluster.scalingEvents(), clusterObject.setArray("scalingEvents")); clusterObject.setString("autoscalingStatusCode", cluster.autoscalingStatusCode()); clusterObject.setString("autoscalingStatus", cluster.autoscalingStatus()); clusterObject.setLong("scalingDuration", cluster.scalingDuration().toMillis()); clusterObject.setDouble("maxQueryGrowthRate", cluster.maxQueryGrowthRate()); clusterObject.setDouble("currentQueryFractionOfMax", cluster.currentQueryFractionOfMax()); } return new SlimeJsonResponse(slime); } private static String valueOf(Node.State state) { return switch (state) { case failed: yield "failed"; case parked: yield "parked"; case dirty: yield "dirty"; case ready: yield "ready"; case active: yield "active"; case inactive: yield "inactive"; case reserved: yield "reserved"; case provisioned: yield "provisioned"; case breakfixed: yield "breakfixed"; case deprovisioned: yield "deprovisioned"; default: throw new IllegalArgumentException("Unexpected node state '" + state + "'."); }; } static String valueOf(Node.ServiceState state) { switch (state) { case expectedUp: return "expectedUp"; case allowedDown: return "allowedDown"; case permanentlyDown: return "permanentlyDown"; case unorchestrated: return "unorchestrated"; case unknown: break; } return "unknown"; } private static String valueOf(Node.ClusterType type) { return switch (type) { case admin: yield "admin"; case content: yield "content"; case container: yield "container"; case combined: yield "combined"; case unknown: throw new IllegalArgumentException("Unexpected node cluster type '" + type + "'."); }; } private static String valueOf(NodeResources.DiskSpeed diskSpeed) { return switch (diskSpeed) { case fast : yield "fast"; case slow : yield "slow"; case any : yield "any"; }; } private static String valueOf(NodeResources.StorageType storageType) { return switch (storageType) { case remote : yield "remote"; case local : yield "local"; case any : yield "any"; }; } private HttpResponse logs(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) { ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); DeploymentId deployment = new DeploymentId(application, zone); InputStream logStream = controller.serviceRegistry().configServer().getLogs(deployment, queryParameters); return new HttpResponse(200) { @Override public void render(OutputStream outputStream) throws IOException { try (logStream) { logStream.transferTo(outputStream); } } @Override public long maxPendingBytes() { return 1 << 26; } }; } private HttpResponse supportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) { DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); SupportAccess supportAccess = controller.supportAccess().forDeployment(deployment); return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(supportAccess, controller.clock().instant())); } private HttpResponse allowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Principal principal = requireUserPrincipal(request); Instant now = controller.clock().instant(); SupportAccess allowed = controller.supportAccess().allow(deployment, now.plus(7, ChronoUnit.DAYS), principal.getName()); return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(allowed, now)); } private HttpResponse disallowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Principal principal = requireUserPrincipal(request); SupportAccess disallowed = controller.supportAccess().disallow(deployment, principal.getName()); controller.applications().deploymentTrigger().reTriggerOrAddToQueue(deployment, "re-triggered to disallow support access, by " + request.getJDiscRequest().getUserPrincipal().getName()); return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(disallowed, controller.clock().instant())); } private HttpResponse metrics(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); DeploymentId deployment = new DeploymentId(application, zone); List<ProtonMetrics> protonMetrics = controller.serviceRegistry().configServer().getProtonMetrics(deployment); return buildResponseFromProtonMetrics(protonMetrics); } private HttpResponse scaling(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { var from = Optional.ofNullable(request.getProperty("from")) .map(Long::valueOf) .map(Instant::ofEpochSecond) .orElse(Instant.EPOCH); var until = Optional.ofNullable(request.getProperty("until")) .map(Long::valueOf) .map(Instant::ofEpochSecond) .orElse(Instant.now(controller.clock())); var application = ApplicationId.from(tenantName, applicationName, instanceName); var zone = requireZone(environment, region); var deployment = new DeploymentId(application, zone); var events = controller.serviceRegistry().resourceDatabase().scalingEvents(from, until, deployment); var slime = new Slime(); var root = slime.setObject(); for (var entry : events.entrySet()) { var serviceRoot = root.setArray(entry.getKey().clusterId().value()); scalingEventsToSlime(entry.getValue(), serviceRoot); } return new SlimeJsonResponse(slime); } private JsonResponse buildResponseFromProtonMetrics(List<ProtonMetrics> protonMetrics) { try { var jsonObject = jsonMapper.createObjectNode(); var jsonArray = jsonMapper.createArrayNode(); for (ProtonMetrics metrics : protonMetrics) { jsonArray.add(metrics.toJson()); } jsonObject.set("metrics", jsonArray); return new JsonResponse(200, jsonMapper.writerWithDefaultPrettyPrinter().writeValueAsString(jsonObject)); } catch (JsonProcessingException e) { log.log(Level.WARNING, "Unable to build JsonResponse with Proton data: " + e.getMessage(), e); return new JsonResponse(500, ""); } } private HttpResponse trigger(ApplicationId id, JobType type, HttpRequest request) { ZoneRegistry zones = controller.zoneRegistry(); type = switch (type.environment()) { case test -> JobType.systemTest(zones, zones.systemZone().getCloudName()); case staging -> JobType.stagingTest(zones, zones.systemZone().getCloudName()); default -> type; }; Inspector requestObject = toSlime(request.getData()).get(); boolean requireTests = ! requestObject.field("skipTests").asBool(); boolean reTrigger = requestObject.field("reTrigger").asBool(); boolean upgradeRevision = ! requestObject.field("skipRevision").asBool(); boolean upgradePlatform = ! requestObject.field("skipUpgrade").asBool(); String triggered = reTrigger ? controller.applications().deploymentTrigger() .reTrigger(id, type, "re-triggered by " + request.getJDiscRequest().getUserPrincipal().getName()).type().jobName() : controller.applications().deploymentTrigger() .forceTrigger(id, type, "triggered by " + request.getJDiscRequest().getUserPrincipal().getName(), requireTests, upgradeRevision, upgradePlatform) .stream().map(job -> job.type().jobName()).collect(joining(", ")); String suppressedUpgrades = ( ! upgradeRevision || ! upgradePlatform ? ", without " : "") + (upgradeRevision ? "" : "revision") + ( ! upgradeRevision && ! upgradePlatform ? " and " : "") + (upgradePlatform ? "" : "platform") + ( ! upgradeRevision || ! upgradePlatform ? " upgrade" : ""); return new MessageResponse(triggered.isEmpty() ? "Job " + type.jobName() + " for " + id + " not triggered" : "Triggered " + triggered + " for " + id + suppressedUpgrades); } private HttpResponse pause(ApplicationId id, JobType type) { Instant until = controller.clock().instant().plus(DeploymentTrigger.maxPause); controller.applications().deploymentTrigger().pauseJob(id, type, until); return new MessageResponse(type.jobName() + " for " + id + " paused for " + DeploymentTrigger.maxPause); } private HttpResponse resume(ApplicationId id, JobType type) { controller.applications().deploymentTrigger().resumeJob(id, type); return new MessageResponse(type.jobName() + " for " + id + " resumed"); } private void toSlime(Cursor object, Application application, HttpRequest request) { object.setString("tenant", application.id().tenant().value()); object.setString("application", application.id().application().value()); object.setString("deployments", withPath("/application/v4" + "/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value() + "/job/", request.getUri()).toString()); DeploymentStatus status = controller.jobController().deploymentStatus(application); application.revisions().last().ifPresent(version -> JobControllerApiHandlerHelper.toSlime(object.setObject("latestVersion"), version)); application.projectId().ifPresent(id -> object.setLong("projectId", id)); application.instances().values().stream().findFirst().ifPresent(instance -> { if ( ! instance.change().isEmpty()) toSlime(object.setObject("deploying"), instance.change(), application); if ( ! status.outstandingChange(instance.name()).isEmpty()) toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), application); }); application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion)); Cursor instancesArray = object.setArray("instances"); for (Instance instance : showOnlyProductionInstances(request) ? application.productionInstances().values() : application.instances().values()) toSlime(instancesArray.addObject(), status, instance, application.deploymentSpec(), request); application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString); Cursor metricsObject = object.setObject("metrics"); metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality()); metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality()); Cursor activity = object.setObject("activity"); application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value())); application.owner().ifPresent(owner -> object.setString("owner", owner.username())); application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value())); } private void toSlime(Cursor object, DeploymentStatus status, Instance instance, DeploymentSpec deploymentSpec, HttpRequest request) { object.setString("instance", instance.name().value()); if (deploymentSpec.instance(instance.name()).isPresent()) { Collection<JobStatus> jobStatus = status.instanceJobs(instance.name()).values(); if ( ! instance.change().isEmpty()) toSlime(object.setObject("deploying"), instance.change(), status.application()); if ( ! status.outstandingChange(instance.name()).isEmpty()) toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), status.application()); Cursor changeBlockers = object.setArray("changeBlockers"); deploymentSpec.instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> { Cursor changeBlockerObject = changeBlockers.addObject(); changeBlockerObject.setBool("versions", changeBlocker.blocksVersions()); changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions()); changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId()); Cursor days = changeBlockerObject.setArray("days"); changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong); Cursor hours = changeBlockerObject.setArray("hours"); changeBlocker.window().hours().forEach(hours::addLong); })); } addRotationId(object, instance); List<Deployment> deployments = deploymentSpec.instance(instance.name()) .map(spec -> sortedDeployments(instance.deployments().values(), spec)) .orElse(List.copyOf(instance.deployments().values())); Cursor deploymentsArray = object.setArray("deployments"); for (Deployment deployment : deployments) { Cursor deploymentObject = deploymentsArray.addObject(); if (deployment.zone().environment() == Environment.prod && ! instance.rotations().isEmpty()) toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject); if (recurseOverDeployments(request)) toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request); else { deploymentObject.setString("environment", deployment.zone().environment().value()); deploymentObject.setString("region", deployment.zone().region().value()); deploymentObject.setString("url", withPath(request.getUri().getPath() + "/instance/" + instance.name().value() + "/environment/" + deployment.zone().environment().value() + "/region/" + deployment.zone().region().value(), request.getUri()).toString()); } } } private void addRotationId(Cursor object, Instance instance) { instance.rotations().stream() .map(AssignedRotation::rotationId) .findFirst() .ifPresent(rotation -> object.setString("rotationId", rotation.asString())); } private void toSlime(Cursor object, Instance instance, DeploymentStatus status, HttpRequest request) { Application application = status.application(); object.setString("tenant", instance.id().tenant().value()); object.setString("application", instance.id().application().value()); object.setString("instance", instance.id().instance().value()); object.setString("deployments", withPath("/application/v4" + "/tenant/" + instance.id().tenant().value() + "/application/" + instance.id().application().value() + "/instance/" + instance.id().instance().value() + "/job/", request.getUri()).toString()); application.revisions().last().ifPresent(version -> { version.sourceUrl().ifPresent(url -> object.setString("sourceUrl", url)); version.commit().ifPresent(commit -> object.setString("commit", commit)); }); application.projectId().ifPresent(id -> object.setLong("projectId", id)); if (application.deploymentSpec().instance(instance.name()).isPresent()) { Collection<JobStatus> jobStatus = status.instanceJobs(instance.name()).values(); if ( ! instance.change().isEmpty()) toSlime(object.setObject("deploying"), instance.change(), application); if ( ! status.outstandingChange(instance.name()).isEmpty()) toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), application); Cursor changeBlockers = object.setArray("changeBlockers"); application.deploymentSpec().instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> { Cursor changeBlockerObject = changeBlockers.addObject(); changeBlockerObject.setBool("versions", changeBlocker.blocksVersions()); changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions()); changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId()); Cursor days = changeBlockerObject.setArray("days"); changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong); Cursor hours = changeBlockerObject.setArray("hours"); changeBlocker.window().hours().forEach(hours::addLong); })); } application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion)); addRotationId(object, instance); List<Deployment> deployments = application.deploymentSpec().instance(instance.name()) .map(spec -> sortedDeployments(instance.deployments().values(), spec)) .orElse(List.copyOf(instance.deployments().values())); Cursor instancesArray = object.setArray("instances"); for (Deployment deployment : deployments) { Cursor deploymentObject = instancesArray.addObject(); if (deployment.zone().environment() == Environment.prod) { if (instance.rotations().size() == 1) { toSlime(instance.rotationStatus().of(instance.rotations().get(0).rotationId(), deployment), deploymentObject); } if ( ! recurseOverDeployments(request) && ! instance.rotations().isEmpty()) { toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject); } } if (recurseOverDeployments(request)) toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request); else { deploymentObject.setString("environment", deployment.zone().environment().value()); deploymentObject.setString("region", deployment.zone().region().value()); deploymentObject.setString("instance", instance.id().instance().value()); deploymentObject.setString("url", withPath(request.getUri().getPath() + "/environment/" + deployment.zone().environment().value() + "/region/" + deployment.zone().region().value(), request.getUri()).toString()); } } Stream.concat(status.jobSteps().keySet().stream() .filter(job -> job.application().instance().equals(instance.name())) .filter(job -> job.type().isProduction() && job.type().isDeployment()), controller.jobController().active(instance.id()).stream() .map(run -> run.id().job()) .filter(job -> job.type().environment().isManuallyDeployed())) .map(job -> job.type().zone()) .filter(zone -> ! instance.deployments().containsKey(zone)) .forEach(zone -> { Cursor deploymentObject = instancesArray.addObject(); deploymentObject.setString("environment", zone.environment().value()); deploymentObject.setString("region", zone.region().value()); }); application.deployKeys().stream().findFirst().ifPresent(key -> object.setString("pemDeployKey", KeyUtils.toPem(key))); application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString); Cursor metricsObject = object.setObject("metrics"); metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality()); metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality()); Cursor activity = object.setObject("activity"); application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value())); application.owner().ifPresent(owner -> object.setString("owner", owner.username())); application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value())); } private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); Instance instance = controller.applications().getInstance(id) .orElseThrow(() -> new NotExistsException(id + " not found")); DeploymentId deploymentId = new DeploymentId(instance.id(), requireZone(environment, region)); Deployment deployment = instance.deployments().get(deploymentId.zoneId()); if (deployment == null) throw new NotExistsException(instance + " is not deployed in " + deploymentId.zoneId()); Slime slime = new Slime(); toSlime(slime.setObject(), deploymentId, deployment, request); return new SlimeJsonResponse(slime); } private void toSlime(Cursor object, Change change, Application application) { change.platform().ifPresent(version -> object.setString("version", version.toString())); change.revision().ifPresent(revision -> JobControllerApiHandlerHelper.toSlime(object.setObject("revision"), application.revisions().get(revision))); } private void toSlime(Endpoint endpoint, Cursor object) { object.setString("cluster", endpoint.cluster().value()); object.setBool("tls", endpoint.tls()); object.setString("url", endpoint.url().toString()); object.setString("scope", endpointScopeString(endpoint.scope())); object.setString("routingMethod", routingMethodString(endpoint.routingMethod())); object.setBool("legacy", endpoint.legacy()); } private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) { response.setString("tenant", deploymentId.applicationId().tenant().value()); response.setString("application", deploymentId.applicationId().application().value()); response.setString("instance", deploymentId.applicationId().instance().value()); response.setString("environment", deploymentId.zoneId().environment().value()); response.setString("region", deploymentId.zoneId().region().value()); var application = controller.applications().requireApplication(TenantAndApplicationId.from(deploymentId.applicationId())); boolean legacyEndpoints = request.getBooleanProperty("includeLegacyEndpoints"); var endpointArray = response.setArray("endpoints"); EndpointList zoneEndpoints = controller.routing().readEndpointsOf(deploymentId) .scope(Endpoint.Scope.zone); if (!legacyEndpoints) { zoneEndpoints = zoneEndpoints.not().legacy().direct(); } for (var endpoint : zoneEndpoints) { toSlime(endpoint, endpointArray.addObject()); } EndpointList declaredEndpoints = controller.routing().declaredEndpointsOf(application) .targets(deploymentId); if (!legacyEndpoints) { declaredEndpoints = declaredEndpoints.not().legacy().direct(); } for (var endpoint : declaredEndpoints) { toSlime(endpoint, endpointArray.addObject()); } response.setString("clusters", withPath(toPath(deploymentId) + "/clusters", request.getUri()).toString()); response.setString("nodes", withPathAndQuery("/zone/v2/" + deploymentId.zoneId().environment() + "/" + deploymentId.zoneId().region() + "/nodes/v2/node/", "recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString()); response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString()); response.setString("version", deployment.version().toFullString()); response.setString("revision", application.revisions().get(deployment.revision()).stringId()); response.setLong("build", deployment.revision().number()); Instant lastDeploymentStart = controller.jobController().lastDeploymentStart(deploymentId.applicationId(), deployment); response.setLong("deployTimeEpochMs", lastDeploymentStart.toEpochMilli()); controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zoneId()) .ifPresent(deploymentTimeToLive -> response.setLong("expiryTimeEpochMs", lastDeploymentStart.plus(deploymentTimeToLive).toEpochMilli())); application.projectId().ifPresent(i -> response.setString("screwdriverId", String.valueOf(i))); var instance = application.instances().get(deploymentId.applicationId().instance()); if (instance != null) { if (!instance.rotations().isEmpty() && deployment.zone().environment() == Environment.prod) toSlime(instance.rotations(), instance.rotationStatus(), deployment, response); if (!deployment.zone().environment().isManuallyDeployed()) { DeploymentStatus status = controller.jobController().deploymentStatus(application); JobId jobId = new JobId(instance.id(), JobType.deploymentTo(deployment.zone())); Optional.ofNullable(status.jobSteps().get(jobId)) .ifPresent(stepStatus -> { JobControllerApiHandlerHelper.toSlime(response.setObject("applicationVersion"), application.revisions().get(deployment.revision())); if ( ! status.jobsToRun().containsKey(stepStatus.job().get())) response.setString("status", "complete"); else if (stepStatus.readyAt(instance.change()).map(controller.clock().instant()::isBefore).orElse(true)) response.setString("status", "pending"); else response.setString("status", "running"); }); } else { var deploymentRun = controller.jobController().last(deploymentId.applicationId(), JobType.deploymentTo(deploymentId.zoneId())); deploymentRun.ifPresent(run -> { response.setString("status", run.hasEnded() ? "complete" : "running"); }); } } response.setDouble("quota", deployment.quota().rate()); deployment.cost().ifPresent(cost -> response.setDouble("cost", cost)); controller.archiveBucketDb().archiveUriFor(deploymentId.zoneId(), deploymentId.applicationId().tenant(), false) .ifPresent(archiveUri -> response.setString("archiveUri", archiveUri.toString())); Cursor activity = response.setObject("activity"); deployment.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); deployment.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); deployment.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); deployment.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); DeploymentMetrics metrics = deployment.metrics(); Cursor metricsObject = response.setObject("metrics"); metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond()); metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond()); metricsObject.setDouble("documentCount", metrics.documentCount()); metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis()); metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis()); metrics.instant().ifPresent(instant -> metricsObject.setLong("lastUpdated", instant.toEpochMilli())); } private void toSlime(RotationState state, Cursor object) { Cursor bcpStatus = object.setObject("bcpStatus"); bcpStatus.setString("rotationStatus", rotationStateString(state)); } private void toSlime(List<AssignedRotation> rotations, RotationStatus status, Deployment deployment, Cursor object) { var array = object.setArray("endpointStatus"); for (var rotation : rotations) { var statusObject = array.addObject(); var targets = status.of(rotation.rotationId()); statusObject.setString("endpointId", rotation.endpointId().id()); statusObject.setString("rotationId", rotation.rotationId().asString()); statusObject.setString("clusterId", rotation.clusterId().value()); statusObject.setString("status", rotationStateString(status.of(rotation.rotationId(), deployment))); statusObject.setLong("lastUpdated", targets.lastUpdated().toEpochMilli()); } } private URI monitoringSystemUri(DeploymentId deploymentId) { return controller.zoneRegistry().getMonitoringSystemUri(deploymentId); } private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) { Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName)); ZoneId zone = requireZone(environment, region); Deployment deployment = instance.deployments().get(zone); if (deployment == null) { throw new NotExistsException(instance + " has no deployment in " + zone); } DeploymentId deploymentId = new DeploymentId(instance.id(), zone); RoutingStatus.Agent agent = isOperator(request) ? RoutingStatus.Agent.operator : RoutingStatus.Agent.tenant; RoutingStatus.Value status = inService ? RoutingStatus.Value.in : RoutingStatus.Value.out; controller.routing().of(deploymentId).setRoutingStatus(status, agent); return new MessageResponse(Text.format("Successfully set %s in %s %s service", instance.id().toShortString(), zone, inService ? "in" : "out of")); } private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Slime slime = new Slime(); Cursor array = slime.setObject().setArray("globalrotationoverride"); Optional<Endpoint> primaryEndpoint = controller.routing().readDeclaredEndpointsOf(deploymentId.applicationId()) .requiresRotation() .primary(); if (primaryEndpoint.isPresent()) { DeploymentRoutingContext context = controller.routing().of(deploymentId); RoutingStatus status = context.routingStatus(); array.addString(primaryEndpoint.get().upstreamName(deploymentId)); Cursor statusObject = array.addObject(); statusObject.setString("status", status.value().name()); statusObject.setString("reason", ""); statusObject.setString("agent", status.agent().name()); statusObject.setLong("timestamp", status.changedAt().getEpochSecond()); } return new SlimeJsonResponse(slime); } private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region, Optional<String> endpointId) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); Instance instance = controller.applications().requireInstance(applicationId); ZoneId zone = requireZone(environment, region); RotationId rotation = findRotationId(instance, endpointId); Deployment deployment = instance.deployments().get(zone); if (deployment == null) { throw new NotExistsException(instance + " has no deployment in " + zone); } Slime slime = new Slime(); Cursor response = slime.setObject(); toSlime(instance.rotationStatus().of(rotation, deployment), response); return new SlimeJsonResponse(slime); } private HttpResponse deploying(String tenantName, String applicationName, String instanceName, HttpRequest request) { Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName)); Slime slime = new Slime(); Cursor root = slime.setObject(); if ( ! instance.change().isEmpty()) { instance.change().platform().ifPresent(version -> root.setString("platform", version.toString())); instance.change().revision().ifPresent(revision -> root.setString("application", revision.toString())); root.setBool("pinned", instance.change().isPinned()); } return new SlimeJsonResponse(slime); } private HttpResponse suspended(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); boolean suspended = controller.applications().isSuspended(deploymentId); Slime slime = new Slime(); Cursor response = slime.setObject(); response.setBool("suspended", suspended); return new SlimeJsonResponse(slime); } private HttpResponse status(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String host, HttpURL.Path restPath, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); return controller.serviceRegistry().configServer().getServiceNodePage(deploymentId, serviceName, DomainName.of(host), HttpURL.Path.parse("/status").append(restPath), Query.empty().add(request.getJDiscRequest().parameters())); } private HttpResponse orchestrator(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); return controller.serviceRegistry().configServer().getServiceNodes(deploymentId); } private HttpResponse stateV1(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String host, HttpURL.Path rest, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Query query = Query.empty().add(request.getJDiscRequest().parameters()); query = query.set("forwarded-url", HttpURL.from(request.getUri()).withQuery(Query.empty()).asURI().toString()); return controller.serviceRegistry().configServer().getServiceNodePage( deploymentId, serviceName, DomainName.of(host), HttpURL.Path.parse("/state/v1").append(rest), query); } private HttpResponse content(String tenantName, String applicationName, String instanceName, String environment, String region, HttpURL.Path restPath, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); return controller.serviceRegistry().configServer().getApplicationPackageContent(deploymentId, restPath, request.getUri()); } private HttpResponse updateTenant(String tenantName, HttpRequest request) { getTenantOrThrow(tenantName); TenantName tenant = TenantName.from(tenantName); Inspector requestObject = toSlime(request.getData()).get(); controller.tenants().update(accessControlRequests.specification(tenant, requestObject), accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest())); return tenant(controller.tenants().require(TenantName.from(tenantName)), request); } private HttpResponse createTenant(String tenantName, HttpRequest request) { TenantName tenant = TenantName.from(tenantName); Inspector requestObject = toSlime(request.getData()).get(); controller.tenants().create(accessControlRequests.specification(tenant, requestObject), accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest())); if (controller.system().isPublic()) { User user = getAttribute(request, User.ATTRIBUTE_NAME, User.class); TenantInfo info = controller.tenants().require(tenant, CloudTenant.class) .info() .withContact(TenantContact.from(user.name(), user.email())); controller.tenants().lockOrThrow(tenant, LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(info); controller.tenants().store(lockedTenant); }); } return tenant(controller.tenants().require(TenantName.from(tenantName)), request); } private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) { Inspector requestObject = toSlime(request.getData()).get(); TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); Credentials credentials = accessControlRequests.credentials(id.tenant(), requestObject, request.getJDiscRequest()); Application application = controller.applications().createApplication(id, credentials); Slime slime = new Slime(); toSlime(id, slime.setObject(), request); return new SlimeJsonResponse(slime); } private HttpResponse createInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) { TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName); if (controller.applications().getApplication(applicationId).isEmpty()) createApplication(tenantName, applicationName, request); controller.applications().createInstance(applicationId.instance(instanceName)); Slime slime = new Slime(); toSlime(applicationId.instance(instanceName), slime.setObject(), request); return new SlimeJsonResponse(slime); } /** Trigger deployment of the given Vespa version if a valid one is given, e.g., "7.8.9". */ private HttpResponse deployPlatform(String tenantName, String applicationName, String instanceName, boolean pin, HttpRequest request) { String versionString = readToString(request.getData()); ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); StringBuilder response = new StringBuilder(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { Version version = Version.fromString(versionString); VersionStatus versionStatus = controller.readVersionStatus(); if (version.equals(Version.emptyVersion)) version = controller.systemVersion(versionStatus); if (!versionStatus.isActive(version)) throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " + "Version is not active in this system. " + "Active versions: " + versionStatus.versions() .stream() .map(VespaVersion::versionNumber) .map(Version::toString) .collect(joining(", "))); Change change = Change.of(version); if (pin) change = change.withPin(); controller.applications().deploymentTrigger().forceChange(id, change); response.append("Triggered ").append(change).append(" for ").append(id); }); return new MessageResponse(response.toString()); } /** Trigger deployment to the last known application package for the given application. */ private HttpResponse deployApplication(String tenantName, String applicationName, String instanceName, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); Inspector buildField = toSlime(request.getData()).get().field("build"); long build = buildField.valid() ? buildField.asLong() : -1; StringBuilder response = new StringBuilder(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { RevisionId revision = build == -1 ? application.get().revisions().last().get().id() : getRevision(application.get(), build); Change change = Change.of(revision); controller.applications().deploymentTrigger().forceChange(id, change); response.append("Triggered ").append(change).append(" for ").append(id); }); return new MessageResponse(response.toString()); } private RevisionId getRevision(Application application, long build) { return application.revisions().withPackage().stream() .map(ApplicationVersion::id) .filter(version -> version.number() == build) .findFirst() .filter(version -> controller.applications().applicationStore().hasBuild(application.id().tenant(), application.id().application(), build)) .orElseThrow(() -> new IllegalArgumentException("Build number '" + build + "' was not found")); } private HttpResponse cancelBuild(String tenantName, String applicationName, String build){ TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); RevisionId revision = RevisionId.forProduction(Long.parseLong(build)); controller.applications().lockApplicationOrThrow(id, application -> { controller.applications().store(application.withRevisions(revisions -> revisions.with(revisions.get(revision).skipped()))); }); return new MessageResponse("Marked build '" + build + "' as non-deployable"); } /** Cancel ongoing change for given application, e.g., everything with {"cancel":"all"} */ private HttpResponse cancelDeploy(String tenantName, String applicationName, String instanceName, String choice) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); StringBuilder response = new StringBuilder(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { Change change = application.get().require(id.instance()).change(); if (change.isEmpty()) { response.append("No deployment in progress for ").append(id).append(" at this time"); return; } ChangesToCancel cancel = ChangesToCancel.valueOf(choice.toUpperCase()); controller.applications().deploymentTrigger().cancelChange(id, cancel); response.append("Changed deployment from '").append(change).append("' to '").append(controller.applications().requireInstance(id).change()).append("' for ").append(id); }); return new MessageResponse(response.toString()); } /** Schedule reindexing of an application, or a subset of clusters, possibly on a subset of documents. */ private HttpResponse reindex(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); List<String> clusterNames = Optional.ofNullable(request.getProperty("clusterId")).stream() .flatMap(clusters -> Stream.of(clusters.split(","))) .filter(cluster -> ! cluster.isBlank()) .toList(); List<String> documentTypes = Optional.ofNullable(request.getProperty("documentType")).stream() .flatMap(types -> Stream.of(types.split(","))) .filter(type -> ! type.isBlank()) .toList(); Double speed = request.hasProperty("speed") ? Double.parseDouble(request.getProperty("speed")) : null; boolean indexedOnly = request.getBooleanProperty("indexedOnly"); controller.applications().reindex(id, zone, clusterNames, documentTypes, indexedOnly, speed); return new MessageResponse("Requested reindexing of " + id + " in " + zone + (clusterNames.isEmpty() ? "" : ", on clusters " + String.join(", ", clusterNames)) + (documentTypes.isEmpty() ? "" : ", for types " + String.join(", ", documentTypes)) + (indexedOnly ? ", for indexed types" : "") + (speed != null ? ", with speed " + speed : "")); } /** Gets reindexing status of an application in a zone. */ private HttpResponse getReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); ApplicationReindexing reindexing = controller.applications().applicationReindexing(id, zone); Slime slime = new Slime(); Cursor root = slime.setObject(); root.setBool("enabled", reindexing.enabled()); Cursor clustersArray = root.setArray("clusters"); reindexing.clusters().entrySet().stream().sorted(comparingByKey()) .forEach(cluster -> { Cursor clusterObject = clustersArray.addObject(); clusterObject.setString("name", cluster.getKey()); Cursor pendingArray = clusterObject.setArray("pending"); cluster.getValue().pending().entrySet().stream().sorted(comparingByKey()) .forEach(pending -> { Cursor pendingObject = pendingArray.addObject(); pendingObject.setString("type", pending.getKey()); pendingObject.setLong("requiredGeneration", pending.getValue()); }); Cursor readyArray = clusterObject.setArray("ready"); cluster.getValue().ready().entrySet().stream().sorted(comparingByKey()) .forEach(ready -> { Cursor readyObject = readyArray.addObject(); readyObject.setString("type", ready.getKey()); setStatus(readyObject, ready.getValue()); }); }); return new SlimeJsonResponse(slime); } void setStatus(Cursor statusObject, ApplicationReindexing.Status status) { status.readyAt().ifPresent(readyAt -> statusObject.setLong("readyAtMillis", readyAt.toEpochMilli())); status.startedAt().ifPresent(startedAt -> statusObject.setLong("startedAtMillis", startedAt.toEpochMilli())); status.endedAt().ifPresent(endedAt -> statusObject.setLong("endedAtMillis", endedAt.toEpochMilli())); status.state().map(ApplicationApiHandler::toString).ifPresent(state -> statusObject.setString("state", state)); status.message().ifPresent(message -> statusObject.setString("message", message)); status.progress().ifPresent(progress -> statusObject.setDouble("progress", progress)); status.speed().ifPresent(speed -> statusObject.setDouble("speed", speed)); } private static String toString(ApplicationReindexing.State state) { return switch (state) { case PENDING: yield "pending"; case RUNNING: yield "running"; case FAILED: yield "failed"; case SUCCESSFUL: yield "successful"; }; } /** Enables reindexing of an application in a zone. */ private HttpResponse enableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); controller.applications().enableReindexing(id, zone); return new MessageResponse("Enabled reindexing of " + id + " in " + zone); } /** Disables reindexing of an application in a zone. */ private HttpResponse disableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); controller.applications().disableReindexing(id, zone); return new MessageResponse("Disabled reindexing of " + id + " in " + zone); } /** Schedule restart of deployment, or specific host in a deployment */ private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); RestartFilter restartFilter = new RestartFilter() .withHostName(Optional.ofNullable(request.getProperty("hostname")).map(HostName::of)) .withClusterType(Optional.ofNullable(request.getProperty("clusterType")).map(ClusterSpec.Type::from)) .withClusterId(Optional.ofNullable(request.getProperty("clusterId")).map(ClusterSpec.Id::from)); controller.applications().restart(deploymentId, restartFilter); return new MessageResponse("Requested restart of " + deploymentId); } /** Set suspension status of the given deployment. */ private HttpResponse suspend(String tenantName, String applicationName, String instanceName, String environment, String region, boolean suspend) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); controller.applications().setSuspension(deploymentId, suspend); return new MessageResponse((suspend ? "Suspended" : "Resumed") + " orchestration of " + deploymentId); } private HttpResponse jobDeploy(ApplicationId id, JobType type, HttpRequest request) { if ( ! type.environment().isManuallyDeployed() && ! isOperator(request)) throw new IllegalArgumentException("Direct deployments are only allowed to manually deployed environments."); Map<String, byte[]> dataParts = parseDataParts(request); if ( ! dataParts.containsKey("applicationZip")) throw new IllegalArgumentException("Missing required form part 'applicationZip'"); ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP)); controller.applications().verifyApplicationIdentityConfiguration(id.tenant(), Optional.of(id.instance()), Optional.of(type.zone()), applicationPackage, Optional.of(requireUserPrincipal(request))); Optional<Version> version = Optional.ofNullable(dataParts.get("deployOptions")) .map(json -> SlimeUtils.jsonToSlime(json).get()) .flatMap(options -> optional("vespaVersion", options)) .map(Version::fromString); ensureApplicationExists(TenantAndApplicationId.from(id), request); boolean dryRun = Optional.ofNullable(dataParts.get("deployOptions")) .map(json -> SlimeUtils.jsonToSlime(json).get()) .flatMap(options -> optional("dryRun", options)) .map(Boolean::valueOf) .orElse(false); controller.jobController().deploy(id, type, version, applicationPackage, dryRun); RunId runId = controller.jobController().last(id, type).get().id(); Slime slime = new Slime(); Cursor rootObject = slime.setObject(); rootObject.setString("message", "Deployment started in " + runId + ". This may take about 15 minutes the first time."); rootObject.setLong("run", runId.number()); return new SlimeJsonResponse(slime); } private HttpResponse deploy(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); Map<String, byte[]> dataParts = parseDataParts(request); if ( ! dataParts.containsKey("deployOptions")) return ErrorResponse.badRequest("Missing required form part 'deployOptions'"); Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get(); Optional<SystemApplication> systemApplication = SystemApplication.matching(applicationId); if (systemApplication.isEmpty() || !systemApplication.get().hasApplicationPackage()) { return ErrorResponse.badRequest("Deployment of " + applicationId + " is not supported through this API"); } String vespaVersion = deployOptions.field("vespaVersion").asString(); if (!vespaVersion.isEmpty() && !vespaVersion.equals("null")) { return ErrorResponse.badRequest("Specifying version for " + applicationId + " is not permitted"); } VersionStatus versionStatus = controller.readVersionStatus(); if (versionStatus.isUpgrading()) { throw new IllegalArgumentException("Deployment of system applications during a system upgrade is not allowed"); } Optional<VespaVersion> systemVersion = versionStatus.systemVersion(); if (systemVersion.isEmpty()) { throw new IllegalArgumentException("Deployment of system applications is not permitted until system version is determined"); } ActivateResult result = controller.applications() .deploySystemApplicationPackage(systemApplication.get(), zone, systemVersion.get().versionNumber()); return new SlimeJsonResponse(toSlime(result)); } private HttpResponse deleteTenant(String tenantName, HttpRequest request) { boolean forget = request.getBooleanProperty("forget"); if (forget && !isOperator(request)) return ErrorResponse.forbidden("Only operators can forget a tenant"); controller.tenants().delete(TenantName.from(tenantName), Optional.of(accessControlRequests.credentials(TenantName.from(tenantName), toSlime(request.getData()).get(), request.getJDiscRequest())), forget); return new MessageResponse("Deleted tenant " + tenantName); } private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) { TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest()); controller.applications().deleteApplication(id, credentials); return new MessageResponse("Deleted application " + id); } private HttpResponse deleteInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) { TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); controller.applications().deleteInstance(id.instance(instanceName)); if (controller.applications().requireApplication(id).instances().isEmpty()) { Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest()); controller.applications().deleteApplication(id, credentials); } return new MessageResponse("Deleted instance " + id.instance(instanceName).toFullString()); } private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId id = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); controller.applications().deactivate(id.applicationId(), id.zoneId()); controller.jobController().last(id.applicationId(), JobType.deploymentTo(id.zoneId())) .filter(run -> ! run.hasEnded()) .ifPresent(last -> controller.jobController().abort(last.id(), "deployment deactivated by " + request.getJDiscRequest().getUserPrincipal().getName())); return new MessageResponse("Deactivated " + id); } /** Returns test config for indicated job, with production deployments of the default instance if the given is not in deployment spec. */ private HttpResponse testConfig(ApplicationId id, JobType type) { Application application = controller.applications().requireApplication(TenantAndApplicationId.from(id)); ApplicationId prodInstanceId = application.deploymentSpec().instance(id.instance()).isPresent() ? id : TenantAndApplicationId.from(id).defaultInstance(); HashSet<DeploymentId> deployments = controller.applications() .getInstance(prodInstanceId).stream() .flatMap(instance -> instance.productionDeployments().keySet().stream()) .map(zone -> new DeploymentId(prodInstanceId, zone)) .collect(Collectors.toCollection(HashSet::new)); ApplicationId toTest = type.isProduction() ? prodInstanceId : id; if ( ! type.isProduction()) deployments.add(new DeploymentId(toTest, type.zone())); Deployment deployment = application.require(toTest.instance()).deployments().get(type.zone()); if (deployment == null) throw new NotExistsException(toTest + " is not deployed in " + type.zone()); return new SlimeJsonResponse(testConfigSerializer.configSlime(id, type, false, deployment.version(), deployment.revision(), deployment.at(), controller.routing().readTestRunnerEndpointsOf(deployments), controller.applications().reachableContentClustersByZone(deployments))); } private HttpResponse requestServiceDump(String tenant, String application, String instance, String environment, String region, String hostname, HttpRequest request) { NodeRepository nodeRepository = controller.serviceRegistry().configServer().nodeRepository(); ZoneId zone = requireZone(environment, region); Slime report = getReport(nodeRepository, zone, tenant, application, instance, hostname).orElse(null); if (report != null) { Cursor cursor = report.get(); boolean force = request.getBooleanProperty("force"); if (!force && cursor.field("failedAt").asLong() == 0 && cursor.field("completedAt").asLong() == 0) { throw new IllegalArgumentException("Service dump already in progress for " + cursor.field("configId").asString()); } } Slime requestPayload; try { requestPayload = SlimeUtils.jsonToSlimeOrThrow(request.getData().readAllBytes()); } catch (Exception e) { throw new IllegalArgumentException("Missing or invalid JSON in request content", e); } Cursor requestPayloadCursor = requestPayload.get(); String configId = requestPayloadCursor.field("configId").asString(); long expiresAt = requestPayloadCursor.field("expiresAt").asLong(); if (configId.isEmpty()) { throw new IllegalArgumentException("Missing configId"); } Cursor artifactsCursor = requestPayloadCursor.field("artifacts"); int artifactEntries = artifactsCursor.entries(); if (artifactEntries == 0) { throw new IllegalArgumentException("Missing or empty 'artifacts'"); } Slime dumpRequest = new Slime(); Cursor dumpRequestCursor = dumpRequest.setObject(); dumpRequestCursor.setLong("createdMillis", controller.clock().millis()); dumpRequestCursor.setString("configId", configId); Cursor dumpRequestArtifactsCursor = dumpRequestCursor.setArray("artifacts"); for (int i = 0; i < artifactEntries; i++) { dumpRequestArtifactsCursor.addString(artifactsCursor.entry(i).asString()); } if (expiresAt > 0) { dumpRequestCursor.setLong("expiresAt", expiresAt); } Cursor dumpOptionsCursor = requestPayloadCursor.field("dumpOptions"); if (dumpOptionsCursor.children() > 0) { SlimeUtils.copyObject(dumpOptionsCursor, dumpRequestCursor.setObject("dumpOptions")); } var reportsUpdate = Map.of("serviceDump", new String(uncheck(() -> SlimeUtils.toJsonBytes(dumpRequest)))); nodeRepository.updateReports(zone, hostname, reportsUpdate); boolean wait = request.getBooleanProperty("wait"); if (!wait) return new MessageResponse("Request created"); return waitForServiceDumpResult(nodeRepository, zone, tenant, application, instance, hostname); } private HttpResponse getServiceDump(String tenant, String application, String instance, String environment, String region, String hostname, HttpRequest request) { NodeRepository nodeRepository = controller.serviceRegistry().configServer().nodeRepository(); ZoneId zone = requireZone(environment, region); Slime report = getReport(nodeRepository, zone, tenant, application, instance, hostname) .orElseThrow(() -> new NotExistsException("No service dump for node " + hostname)); return new SlimeJsonResponse(report); } private HttpResponse waitForServiceDumpResult(NodeRepository nodeRepository, ZoneId zone, String tenant, String application, String instance, String hostname) { int pollInterval = 2; Slime report; while (true) { report = getReport(nodeRepository, zone, tenant, application, instance, hostname).get(); Cursor cursor = report.get(); if (cursor.field("completedAt").asLong() > 0 || cursor.field("failedAt").asLong() > 0) { break; } final Slime copyForLambda = report; log.fine(() -> uncheck(() -> new String(SlimeUtils.toJsonBytes(copyForLambda)))); log.fine("Sleeping " + pollInterval + " seconds before checking report status again"); controller.sleeper().sleep(Duration.ofSeconds(pollInterval)); } return new SlimeJsonResponse(report); } private Optional<Slime> getReport(NodeRepository nodeRepository, ZoneId zone, String tenant, String application, String instance, String hostname) { Node node; try { node = nodeRepository.getNode(zone, hostname); } catch (IllegalArgumentException e) { throw new NotExistsException(hostname); } ApplicationId app = ApplicationId.from(tenant, application, instance); ApplicationId owner = node.owner().orElseThrow(() -> new IllegalArgumentException("Node has no owner")); if (!app.equals(owner)) { throw new IllegalArgumentException("Node is not owned by " + app.toFullString()); } String json = node.reports().get("serviceDump"); if (json == null) return Optional.empty(); return Optional.of(SlimeUtils.jsonToSlimeOrThrow(json)); } private static SourceRevision toSourceRevision(Inspector object) { if (!object.field("repository").valid() || !object.field("branch").valid() || !object.field("commit").valid()) { throw new IllegalArgumentException("Must specify \"repository\", \"branch\", and \"commit\"."); } return new SourceRevision(object.field("repository").asString(), object.field("branch").asString(), object.field("commit").asString()); } private Tenant getTenantOrThrow(String tenantName) { return controller.tenants().get(tenantName) .orElseThrow(() -> new NotExistsException(new TenantId(tenantName))); } private void toSlime(Cursor object, Tenant tenant, List<Application> applications, HttpRequest request) { object.setString("tenant", tenant.name().value()); object.setString("type", tenantType(tenant)); switch (tenant.type()) { case athenz: AthenzTenant athenzTenant = (AthenzTenant) tenant; object.setString("athensDomain", athenzTenant.domain().getName()); object.setString("property", athenzTenant.property().id()); athenzTenant.propertyId().ifPresent(id -> object.setString("propertyId", id.toString())); athenzTenant.contact().ifPresent(c -> { object.setString("propertyUrl", c.propertyUrl().toString()); object.setString("contactsUrl", c.url().toString()); object.setString("issueCreationUrl", c.issueTrackerUrl().toString()); Cursor contactsArray = object.setArray("contacts"); c.persons().forEach(persons -> { Cursor personArray = contactsArray.addArray(); persons.forEach(personArray::addString); }); }); break; case cloud: { CloudTenant cloudTenant = (CloudTenant) tenant; cloudTenant.creator().ifPresent(creator -> object.setString("creator", creator.getName())); Cursor pemDeveloperKeysArray = object.setArray("pemDeveloperKeys"); cloudTenant.developerKeys().forEach((key, user) -> { Cursor keyObject = pemDeveloperKeysArray.addObject(); keyObject.setString("key", KeyUtils.toPem(key)); keyObject.setString("user", user.getName()); }); toSlime(object, cloudTenant.tenantSecretStores()); toSlime(object.setObject("integrations").setObject("aws"), controller.serviceRegistry().roleService().getTenantRole(tenant.name()), cloudTenant.tenantSecretStores()); try { var usedQuota = applications.stream() .map(Application::quotaUsage) .reduce(QuotaUsage.none, QuotaUsage::add); toSlime(object.setObject("quota"), usedQuota); } catch (Exception e) { log.warning(String.format("Failed to get quota for tenant %s: %s", tenant.name(), Exceptions.toMessageString(e))); } cloudTenant.archiveAccess().awsRole().ifPresent(role -> object.setString("archiveAccessRole", role)); toSlime(cloudTenant.archiveAccess(), object.setObject("archiveAccess")); break; } case deleted: break; default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'."); } Cursor applicationArray = object.setArray("applications"); for (Application application : applications) { DeploymentStatus status = null; Collection<Instance> instances = showOnlyProductionInstances(request) ? application.productionInstances().values() : application.instances().values(); if (instances.isEmpty() && !showOnlyActiveInstances(request)) toSlime(application.id(), applicationArray.addObject(), request); for (Instance instance : instances) { if (showOnlyActiveInstances(request) && instance.deployments().isEmpty()) continue; if (recurseOverApplications(request)) { if (status == null) status = controller.jobController().deploymentStatus(application); toSlime(applicationArray.addObject(), instance, status, request); } else { toSlime(instance.id(), applicationArray.addObject(), request); } } } tenantMetaDataToSlime(tenant, applications, object.setObject("metaData")); } private void toSlime(ArchiveAccess archiveAccess, Cursor object) { archiveAccess.awsRole().ifPresent(role -> object.setString("awsRole", role)); archiveAccess.gcpMember().ifPresent(member -> object.setString("gcpMember", member)); } private void toSlime(Cursor object, QuotaUsage usage) { object.setDouble("budgetUsed", usage.rate()); } private void toSlime(ClusterResources resources, Cursor object) { object.setLong("nodes", resources.nodes()); object.setLong("groups", resources.groups()); toSlime(resources.nodeResources(), object.setObject("nodeResources")); double cost = ResourceMeterMaintainer.cost(resources, controller.serviceRegistry().zoneRegistry().system()); object.setDouble("cost", cost); } private void utilizationToSlime(Cluster.Utilization utilization, Cursor utilizationObject) { utilizationObject.setDouble("cpu", utilization.cpu()); utilizationObject.setDouble("idealCpu", utilization.idealCpu()); utilizationObject.setDouble("currentCpu", utilization.currentCpu()); utilizationObject.setDouble("peakCpu", utilization.peakCpu()); utilizationObject.setDouble("memory", utilization.memory()); utilizationObject.setDouble("idealMemory", utilization.idealMemory()); utilizationObject.setDouble("currentMemory", utilization.currentMemory()); utilizationObject.setDouble("peakMemory", utilization.peakMemory()); utilizationObject.setDouble("disk", utilization.disk()); utilizationObject.setDouble("idealDisk", utilization.idealDisk()); utilizationObject.setDouble("currentDisk", utilization.currentDisk()); utilizationObject.setDouble("peakDisk", utilization.peakDisk()); } private void scalingEventsToSlime(List<Cluster.ScalingEvent> scalingEvents, Cursor scalingEventsArray) { for (Cluster.ScalingEvent scalingEvent : scalingEvents) { Cursor scalingEventObject = scalingEventsArray.addObject(); toSlime(scalingEvent.from(), scalingEventObject.setObject("from")); toSlime(scalingEvent.to(), scalingEventObject.setObject("to")); scalingEventObject.setLong("at", scalingEvent.at().toEpochMilli()); scalingEvent.completion().ifPresent(completion -> scalingEventObject.setLong("completion", completion.toEpochMilli())); } } private void toSlime(NodeResources resources, Cursor object) { object.setDouble("vcpu", resources.vcpu()); object.setDouble("memoryGb", resources.memoryGb()); object.setDouble("diskGb", resources.diskGb()); object.setDouble("bandwidthGbps", resources.bandwidthGbps()); object.setString("diskSpeed", valueOf(resources.diskSpeed())); object.setString("storageType", valueOf(resources.storageType())); } private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) { object.setString("tenant", tenant.name().value()); Cursor metaData = object.setObject("metaData"); metaData.setString("type", tenantType(tenant)); switch (tenant.type()) { case athenz: AthenzTenant athenzTenant = (AthenzTenant) tenant; metaData.setString("athensDomain", athenzTenant.domain().getName()); metaData.setString("property", athenzTenant.property().id()); break; case cloud: break; case deleted: break; default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'."); } object.setString("url", withPath("/application/v4/tenant/" + tenant.name().value(), requestURI).toString()); } private void tenantMetaDataToSlime(Tenant tenant, List<Application> applications, Cursor object) { Optional<Instant> lastDev = applications.stream() .flatMap(application -> application.instances().values().stream()) .flatMap(instance -> instance.deployments().values().stream() .filter(deployment -> deployment.zone().environment() == Environment.dev) .map(deployment -> controller.jobController().lastDeploymentStart(instance.id(), deployment))) .max(Comparator.naturalOrder()) .or(() -> applications.stream() .flatMap(application -> application.instances().values().stream()) .flatMap(instance -> JobType.allIn(controller.zoneRegistry()).stream() .filter(job -> job.environment() == Environment.dev) .flatMap(jobType -> controller.jobController().last(instance.id(), jobType).stream())) .map(Run::start) .max(Comparator.naturalOrder())); Optional<Instant> lastSubmission = applications.stream() .flatMap(app -> app.revisions().last().flatMap(ApplicationVersion::buildTime).stream()) .max(Comparator.naturalOrder()); object.setLong("createdAtMillis", tenant.createdAt().toEpochMilli()); if (tenant.type() == Tenant.Type.deleted) object.setLong("deletedAtMillis", ((DeletedTenant) tenant).deletedAt().toEpochMilli()); lastDev.ifPresent(instant -> object.setLong("lastDeploymentToDevMillis", instant.toEpochMilli())); lastSubmission.ifPresent(instant -> object.setLong("lastSubmissionToProdMillis", instant.toEpochMilli())); tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.user) .ifPresent(instant -> object.setLong("lastLoginByUserMillis", instant.toEpochMilli())); tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.developer) .ifPresent(instant -> object.setLong("lastLoginByDeveloperMillis", instant.toEpochMilli())); tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.administrator) .ifPresent(instant -> object.setLong("lastLoginByAdministratorMillis", instant.toEpochMilli())); } /** Returns a copy of the given URI with the host and port from the given URI, the path set to the given path and the query set to given query*/ private URI withPathAndQuery(String newPath, String newQuery, URI uri) { try { return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, newQuery, null); } catch (URISyntaxException e) { throw new RuntimeException("Will not happen", e); } } /** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */ private URI withPath(String newPath, URI uri) { return withPathAndQuery(newPath, null, uri); } private String toPath(DeploymentId id) { return path("/application", "v4", "tenant", id.applicationId().tenant(), "application", id.applicationId().application(), "instance", id.applicationId().instance(), "environment", id.zoneId().environment(), "region", id.zoneId().region()); } private long asLong(String valueOrNull, long defaultWhenNull) { if (valueOrNull == null) return defaultWhenNull; try { return Long.parseLong(valueOrNull); } catch (NumberFormatException e) { throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'"); } } private Slime toSlime(InputStream jsonStream) { try { byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000); return SlimeUtils.jsonToSlime(jsonBytes); } catch (IOException e) { throw new RuntimeException(); } } private static Principal requireUserPrincipal(HttpRequest request) { Principal principal = request.getJDiscRequest().getUserPrincipal(); if (principal == null) throw new RestApiException.InternalServerError("Expected a user principal"); return principal; } private Inspector mandatory(String key, Inspector object) { if ( ! object.field(key).valid()) throw new IllegalArgumentException("'" + key + "' is missing"); return object.field(key); } private Optional<String> optional(String key, Inspector object) { return SlimeUtils.optionalString(object.field(key)); } private static String path(Object... elements) { return Joiner.on("/").join(elements); } private void toSlime(TenantAndApplicationId id, Cursor object, HttpRequest request) { object.setString("tenant", id.tenant().value()); object.setString("application", id.application().value()); object.setString("url", withPath("/application/v4" + "/tenant/" + id.tenant().value() + "/application/" + id.application().value(), request.getUri()).toString()); } private void toSlime(ApplicationId id, Cursor object, HttpRequest request) { object.setString("tenant", id.tenant().value()); object.setString("application", id.application().value()); object.setString("instance", id.instance().value()); object.setString("url", withPath("/application/v4" + "/tenant/" + id.tenant().value() + "/application/" + id.application().value() + "/instance/" + id.instance().value(), request.getUri()).toString()); } private Slime toSlime(ActivateResult result) { Slime slime = new Slime(); Cursor object = slime.setObject(); object.setString("revisionId", result.revisionId().id()); object.setLong("applicationZipSize", result.applicationZipSizeBytes()); Cursor logArray = object.setArray("prepareMessages"); if (result.prepareResponse().log != null) { for (Log logMessage : result.prepareResponse().log) { Cursor logObject = logArray.addObject(); logObject.setLong("time", logMessage.time); logObject.setString("level", logMessage.level); logObject.setString("message", logMessage.message); } } Cursor changeObject = object.setObject("configChangeActions"); Cursor restartActionsArray = changeObject.setArray("restart"); for (RestartAction restartAction : result.prepareResponse().configChangeActions.restartActions) { Cursor restartActionObject = restartActionsArray.addObject(); restartActionObject.setString("clusterName", restartAction.clusterName); restartActionObject.setString("clusterType", restartAction.clusterType); restartActionObject.setString("serviceType", restartAction.serviceType); serviceInfosToSlime(restartAction.services, restartActionObject.setArray("services")); stringsToSlime(restartAction.messages, restartActionObject.setArray("messages")); } Cursor refeedActionsArray = changeObject.setArray("refeed"); for (RefeedAction refeedAction : result.prepareResponse().configChangeActions.refeedActions) { Cursor refeedActionObject = refeedActionsArray.addObject(); refeedActionObject.setString("name", refeedAction.name); refeedActionObject.setString("documentType", refeedAction.documentType); refeedActionObject.setString("clusterName", refeedAction.clusterName); serviceInfosToSlime(refeedAction.services, refeedActionObject.setArray("services")); stringsToSlime(refeedAction.messages, refeedActionObject.setArray("messages")); } return slime; } private void serviceInfosToSlime(List<ServiceInfo> serviceInfoList, Cursor array) { for (ServiceInfo serviceInfo : serviceInfoList) { Cursor serviceInfoObject = array.addObject(); serviceInfoObject.setString("serviceName", serviceInfo.serviceName); serviceInfoObject.setString("serviceType", serviceInfo.serviceType); serviceInfoObject.setString("configId", serviceInfo.configId); serviceInfoObject.setString("hostName", serviceInfo.hostName); } } private void stringsToSlime(List<String> strings, Cursor array) { for (String string : strings) array.addString(string); } private void toSlime(Cursor object, List<TenantSecretStore> tenantSecretStores) { Cursor secretStore = object.setArray("secretStores"); tenantSecretStores.forEach(store -> { toSlime(secretStore.addObject(), store); }); } private void toSlime(Cursor object, TenantRoles tenantRoles, List<TenantSecretStore> tenantSecretStores) { object.setString("tenantRole", tenantRoles.containerRole()); var stores = object.setArray("accounts"); tenantSecretStores.forEach(secretStore -> { toSlime(stores.addObject(), secretStore); }); } private void toSlime(Cursor object, TenantSecretStore secretStore) { object.setString("name", secretStore.getName()); object.setString("awsId", secretStore.getAwsId()); object.setString("role", secretStore.getRole()); } private String readToString(InputStream stream) { Scanner scanner = new Scanner(stream).useDelimiter("\\A"); if ( ! scanner.hasNext()) return null; return scanner.next(); } private static boolean recurseOverTenants(HttpRequest request) { return recurseOverApplications(request) || "tenant".equals(request.getProperty("recursive")); } private static boolean recurseOverApplications(HttpRequest request) { return recurseOverDeployments(request) || "application".equals(request.getProperty("recursive")); } private static boolean recurseOverDeployments(HttpRequest request) { return ImmutableSet.of("all", "true", "deployment").contains(request.getProperty("recursive")); } private static boolean showOnlyProductionInstances(HttpRequest request) { return "true".equals(request.getProperty("production")); } private static boolean showOnlyActiveInstances(HttpRequest request) { return "true".equals(request.getProperty("activeInstances")); } private static boolean includeDeleted(HttpRequest request) { return "true".equals(request.getProperty("includeDeleted")); } private static String tenantType(Tenant tenant) { return switch (tenant.type()) { case athenz: yield "ATHENS"; case cloud: yield "CLOUD"; case deleted: yield "DELETED"; }; } private static ApplicationId appIdFromPath(Path path) { return ApplicationId.from(path.get("tenant"), path.get("application"), path.get("instance")); } private JobType jobTypeFromPath(Path path) { return JobType.fromJobName(path.get("jobtype"), controller.zoneRegistry()); } private RunId runIdFromPath(Path path) { long number = Long.parseLong(path.get("number")); return new RunId(appIdFromPath(path), jobTypeFromPath(path), number); } private HttpResponse submit(String tenant, String application, HttpRequest request) { Map<String, byte[]> dataParts = parseDataParts(request); Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get(); long projectId = submitOptions.field("projectId").asLong(); projectId = projectId == 0 ? 1 : projectId; Optional<String> repository = optional("repository", submitOptions); Optional<String> branch = optional("branch", submitOptions); Optional<String> commit = optional("commit", submitOptions); Optional<SourceRevision> sourceRevision = repository.isPresent() && branch.isPresent() && commit.isPresent() ? Optional.of(new SourceRevision(repository.get(), branch.get(), commit.get())) : Optional.empty(); Optional<String> sourceUrl = optional("sourceUrl", submitOptions); Optional<String> authorEmail = optional("authorEmail", submitOptions); Optional<String> description = optional("description", submitOptions); int risk = (int) submitOptions.field("risk").asLong(); sourceUrl.map(URI::create).ifPresent(url -> { if (url.getHost() == null || url.getScheme() == null) throw new IllegalArgumentException("Source URL must include scheme and host"); }); ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP), true); byte[] testPackage = dataParts.get(EnvironmentResource.APPLICATION_TEST_ZIP); Submission submission = new Submission(applicationPackage, testPackage, sourceUrl, sourceRevision, authorEmail, description, risk); controller.applications().verifyApplicationIdentityConfiguration(TenantName.from(tenant), Optional.empty(), Optional.empty(), applicationPackage, Optional.of(requireUserPrincipal(request))); TenantAndApplicationId id = TenantAndApplicationId.from(tenant, application); ensureApplicationExists(id, request); return JobControllerApiHandlerHelper.submitResponse(controller.jobController(), id, submission, projectId); } private HttpResponse removeAllProdDeployments(String tenant, String application) { JobControllerApiHandlerHelper.submitResponse(controller.jobController(), TenantAndApplicationId.from(tenant, application), new Submission(ApplicationPackage.deploymentRemoval(), new byte[0], Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), 0), 0); return new MessageResponse("All deployments removed"); } private ZoneId requireZone(String environment, String region) { return requireZone(ZoneId.from(environment, region)); } private ZoneId requireZone(ZoneId zone) { if (zone.environment() == Environment.prod && zone.region().value().equals("controller")) { return zone; } if (!controller.zoneRegistry().hasZone(zone)) { throw new IllegalArgumentException("Zone " + zone + " does not exist in this system"); } return zone; } private static Map<String, byte[]> parseDataParts(HttpRequest request) { String contentHash = request.getHeader("X-Content-Hash"); if (contentHash == null) return new MultipartParser().parse(request); DigestInputStream digester = Signatures.sha256Digester(request.getData()); var dataParts = new MultipartParser().parse(request.getHeader("Content-Type"), digester, request.getUri()); if ( ! Arrays.equals(digester.getMessageDigest().digest(), Base64.getDecoder().decode(contentHash))) throw new IllegalArgumentException("Value of X-Content-Hash header does not match computed content hash"); return dataParts; } private static RotationId findRotationId(Instance instance, Optional<String> endpointId) { if (instance.rotations().isEmpty()) { throw new NotExistsException("global rotation does not exist for " + instance); } if (endpointId.isPresent()) { return instance.rotations().stream() .filter(r -> r.endpointId().id().equals(endpointId.get())) .map(AssignedRotation::rotationId) .findFirst() .orElseThrow(() -> new NotExistsException("endpoint " + endpointId.get() + " does not exist for " + instance)); } else if (instance.rotations().size() > 1) { throw new IllegalArgumentException(instance + " has multiple rotations. Query parameter 'endpointId' must be given"); } return instance.rotations().get(0).rotationId(); } private static String rotationStateString(RotationState state) { return switch (state) { case in: yield "IN"; case out: yield "OUT"; case unknown: yield "UNKNOWN"; }; } private static String endpointScopeString(Endpoint.Scope scope) { return switch (scope) { case weighted: yield "weighted"; case application: yield "application"; case global: yield "global"; case zone: yield "zone"; }; } private static String routingMethodString(RoutingMethod method) { return switch (method) { case exclusive: yield "exclusive"; case sharedLayer4: yield "sharedLayer4"; }; } private static <T> T getAttribute(HttpRequest request, String attributeName, Class<T> cls) { return Optional.ofNullable(request.getJDiscRequest().context().get(attributeName)) .filter(cls::isInstance) .map(cls::cast) .orElseThrow(() -> new IllegalArgumentException("Attribute '" + attributeName + "' was not set on request")); } /** Returns whether given request is by an operator */ private static boolean isOperator(HttpRequest request) { var securityContext = getAttribute(request, SecurityContext.ATTRIBUTE_NAME, SecurityContext.class); return securityContext.roles().stream() .map(Role::definition) .anyMatch(definition -> definition == RoleDefinition.hostedOperator); } private void ensureApplicationExists(TenantAndApplicationId id, HttpRequest request) { if (controller.applications().getApplication(id).isEmpty()) { log.fine("Application does not exist in public, creating: " + id); var credentials = accessControlRequests.credentials(id.tenant(), null /* not used on public */ , request.getJDiscRequest()); controller.applications().createApplication(id, credentials); } } private List<Deployment> sortedDeployments(Collection<Deployment> deployments, DeploymentInstanceSpec spec) { List<ZoneId> productionZones = spec.zones().stream() .filter(z -> z.region().isPresent()) .map(z -> ZoneId.from(z.environment(), z.region().get())) .toList(); return deployments.stream() .sorted(comparingInt(deployment -> productionZones.indexOf(deployment.zone()))) .collect(collectingAndThen(Collectors.toList(), Collections::unmodifiableList)); } }
class ApplicationApiHandler extends AuditLoggingRequestHandler { private static final ObjectMapper jsonMapper = new ObjectMapper(); private final Controller controller; private final AccessControlRequests accessControlRequests; private final TestConfigSerializer testConfigSerializer; @Inject public ApplicationApiHandler(ThreadedHttpRequestHandler.Context parentCtx, Controller controller, AccessControlRequests accessControlRequests) { super(parentCtx, controller.auditLogger()); this.controller = controller; this.accessControlRequests = accessControlRequests; this.testConfigSerializer = new TestConfigSerializer(controller.system()); } @Override public Duration getTimeout() { return Duration.ofMinutes(20); } @Override private HttpResponse handleGET(Path path, HttpRequest request) { if (path.matches("/application/v4/")) return root(request); if (path.matches("/application/v4/notifications")) return notifications(request, Optional.ofNullable(request.getProperty("tenant")), true); if (path.matches("/application/v4/tenant")) return tenants(request); if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/request/operator")) return accessRequests(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/info")) return tenantInfo(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/info/profile")) return withCloudTenant(path.get("tenant"), this::tenantInfoProfile); if (path.matches("/application/v4/tenant/{tenant}/info/billing")) return withCloudTenant(path.get("tenant"), this::tenantInfoBilling); if (path.matches("/application/v4/tenant/{tenant}/info/contacts")) return withCloudTenant(path.get("tenant"), this::tenantInfoContacts); if (path.matches("/application/v4/tenant/{tenant}/notifications")) return notifications(request, Optional.of(path.get("tenant")), false); if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}/validate")) return validateSecretStore(path.get("tenant"), path.get("name"), request); if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), Optional.empty(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/compile-version")) return compileVersion(path.get("tenant"), path.get("application"), request.getProperty("allowMajor")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return JobControllerApiHandlerHelper.overviewResponse(controller, TenantAndApplicationId.from(path.get("tenant"), path.get("application")), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/package")) return applicationPackage(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/diff/{number}")) return applicationPackageDiff(path.get("tenant"), path.get("application"), path.get("number")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploying(path.get("tenant"), path.get("application"), "default", request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), "default", request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance")) return applications(path.get("tenant"), Optional.of(path.get("application")), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return instance(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.applications().requireApplication(TenantAndApplicationId.from(path.get("tenant"), path.get("application"))), controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)).descendingMap(), Optional.ofNullable(request.getProperty("limit")), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/package")) return devApplicationPackage(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/diff/{number}")) return devApplicationPackageDiff(runIdFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/test-config")) return testConfig(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}/logs")) return JobControllerApiHandlerHelper.vespaLogsResponse(controller.jobController(), runIdFromPath(path), asLong(request.getProperty("from"), 0), request.getBooleanProperty("tester")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return getReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{host}/status/{*}")) return status(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{host}/state/v1/{*}")) return stateV1(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/orchestrator")) return orchestrator(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/content/{*}")) return content(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return supportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/node/{node}/service-dump")) return getServiceDump(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("node"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/scaling")) return scaling(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/metrics")) return metrics(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId"))); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{host}/status/{*}")) return status(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId"))); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePUT(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/request/operator")) return requestSshAccess(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/approve/operator")) return approveAccessRequest(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/managed/operator")) return addManagedAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/info")) return updateTenantInfo(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/info/profile")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoProfile); if (path.matches("/application/v4/tenant/{tenant}/info/billing")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoBilling); if (path.matches("/application/v4/tenant/{tenant}/info/contacts")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoContacts); if (path.matches("/application/v4/tenant/{tenant}/archive-access")) return allowAwsArchiveAccess(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/archive-access/aws")) return allowAwsArchiveAccess(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/archive-access/gcp")) return allowGcpArchiveAccess(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return addSecretStore(path.get("tenant"), path.get("name"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePOST(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/key")) return addDeveloperKey(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), "default", false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), "default", true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), "default", request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return addDeployKey(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return submit(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return createInstance(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploy/{jobtype}")) return jobDeploy(appIdFromPath(path), jobTypeFromPath(path), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/submit")) return submit(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return trigger(appIdFromPath(path), jobTypeFromPath(path), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return pause(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindex")) return reindex(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return enableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return allowSupportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/node/{node}/service-dump")) return requestServiceDump(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("node"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePATCH(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return patchApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return patchApplication(path.get("tenant"), path.get("application"), request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleDELETE(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/managed/operator")) return removeManagedAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/key")) return removeDeveloperKey(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/archive-access")) return removeAwsArchiveAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/archive-access/aws")) return removeAwsArchiveAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/archive-access/gcp")) return removeGcpArchiveAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return deleteSecretStore(path.get("tenant"), path.get("name"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return removeAllProdDeployments(path.get("tenant"), path.get("application")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", "all"); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", path.get("choice")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return removeDeployKey(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit/{build}")) return cancelBuild(path.get("tenant"), path.get("application"), path.get("build")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return deleteInstance(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), "all"); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("choice")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.abortJobResponse(controller.jobController(), request, appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return resume(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return disableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return disallowSupportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleOPTIONS() { EmptyResponse response = new EmptyResponse(); response.headers().put("Allow", "GET,PUT,POST,PATCH,DELETE,OPTIONS"); return response; } private HttpResponse recursiveRoot(HttpRequest request) { Slime slime = new Slime(); Cursor tenantArray = slime.setArray(); List<Application> applications = controller.applications().asList(); for (Tenant tenant : controller.tenants().asList(includeDeleted(request))) toSlime(tenantArray.addObject(), tenant, applications.stream().filter(app -> app.id().tenant().equals(tenant.name())).collect(toList()), request); return new SlimeJsonResponse(slime); } private HttpResponse root(HttpRequest request) { return recurseOverTenants(request) ? recursiveRoot(request) : new ResourceResponse(request, "tenant"); } private HttpResponse tenants(HttpRequest request) { Slime slime = new Slime(); Cursor response = slime.setArray(); for (Tenant tenant : controller.tenants().asList(includeDeleted(request))) tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject()); return new SlimeJsonResponse(slime); } private HttpResponse tenant(String tenantName, HttpRequest request) { return controller.tenants().get(TenantName.from(tenantName), includeDeleted(request)) .map(tenant -> tenant(tenant, request)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist")); } private HttpResponse tenant(Tenant tenant, HttpRequest request) { Slime slime = new Slime(); toSlime(slime.setObject(), tenant, controller.applications().asList(tenant.name()), request); return new SlimeJsonResponse(slime); } private HttpResponse accessRequests(String tenantName, HttpRequest request) { var tenant = TenantName.from(tenantName); if (controller.tenants().require(tenant).type() != Tenant.Type.cloud) return ErrorResponse.badRequest("Can only see access requests for cloud tenants"); var accessControlService = controller.serviceRegistry().accessControlService(); var accessRoleInformation = accessControlService.getAccessRoleInformation(tenant); var managedAccess = accessControlService.getManagedAccess(tenant); var slime = new Slime(); var cursor = slime.setObject(); cursor.setBool("managedAccess", managedAccess); accessRoleInformation.getPendingRequest() .ifPresent(membershipRequest -> { var requestCursor = cursor.setObject("pendingRequest"); requestCursor.setString("requestTime", membershipRequest.getCreationTime()); requestCursor.setString("reason", membershipRequest.getReason()); }); var auditLogCursor = cursor.setArray("auditLog"); accessRoleInformation.getAuditLog() .forEach(auditLogEntry -> { var entryCursor = auditLogCursor.addObject(); entryCursor.setString("created", auditLogEntry.getCreationTime()); entryCursor.setString("approver", auditLogEntry.getApprover()); entryCursor.setString("reason", auditLogEntry.getReason()); entryCursor.setString("status", auditLogEntry.getAction()); }); return new SlimeJsonResponse(slime); } private HttpResponse requestSshAccess(String tenantName, HttpRequest request) { if (!isOperator(request)) { return ErrorResponse.forbidden("Only operators are allowed to request ssh access"); } if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) return ErrorResponse.badRequest("Can only request access for cloud tenants"); controller.serviceRegistry().accessControlService().requestSshAccess(TenantName.from(tenantName)); return new MessageResponse("OK"); } private HttpResponse approveAccessRequest(String tenantName, HttpRequest request) { var tenant = TenantName.from(tenantName); if (controller.tenants().require(tenant).type() != Tenant.Type.cloud) return ErrorResponse.badRequest("Can only see access requests for cloud tenants"); var inspector = toSlime(request.getData()).get(); var expiry = inspector.field("expiry").valid() ? Instant.ofEpochMilli(inspector.field("expiry").asLong()) : Instant.now().plus(1, ChronoUnit.DAYS); var approve = inspector.field("approve").asBool(); controller.serviceRegistry().accessControlService().decideSshAccess(tenant, expiry, OAuthCredentials.fromAuth0RequestContext(request.getJDiscRequest().context()), approve); return new MessageResponse("OK"); } private HttpResponse addManagedAccess(String tenantName) { return setManagedAccess(tenantName, true); } private HttpResponse removeManagedAccess(String tenantName) { return setManagedAccess(tenantName, false); } private HttpResponse setManagedAccess(String tenantName, boolean managedAccess) { var tenant = TenantName.from(tenantName); if (controller.tenants().require(tenant).type() != Tenant.Type.cloud) return ErrorResponse.badRequest("Can only set access privel for cloud tenants"); controller.serviceRegistry().accessControlService().setManagedAccess(tenant, managedAccess); var slime = new Slime(); slime.setObject().setBool("managedAccess", managedAccess); return new SlimeJsonResponse(slime); } private HttpResponse tenantInfo(String tenantName, HttpRequest request) { return controller.tenants().get(TenantName.from(tenantName)) .filter(tenant -> tenant.type() == Tenant.Type.cloud) .map(tenant -> tenantInfo(((CloudTenant)tenant).info(), request)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private HttpResponse withCloudTenant(String tenantName, Function<CloudTenant, SlimeJsonResponse> handler) { return controller.tenants().get(TenantName.from(tenantName)) .filter(tenant -> tenant.type() == Tenant.Type.cloud) .map(tenant -> handler.apply((CloudTenant) tenant)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private SlimeJsonResponse tenantInfo(TenantInfo info, HttpRequest request) { Slime slime = new Slime(); Cursor infoCursor = slime.setObject(); if (!info.isEmpty()) { infoCursor.setString("name", info.name()); infoCursor.setString("email", info.email()); infoCursor.setString("website", info.website()); infoCursor.setString("contactName", info.contact().name()); infoCursor.setString("contactEmail", info.contact().email()); toSlime(info.address(), infoCursor); toSlime(info.billingContact(), infoCursor); toSlime(info.contacts(), infoCursor); } return new SlimeJsonResponse(slime); } private SlimeJsonResponse tenantInfoProfile(CloudTenant cloudTenant) { var slime = new Slime(); var root = slime.setObject(); var info = cloudTenant.info(); if (!info.isEmpty()) { var contact = root.setObject("contact"); contact.setString("name", info.contact().name()); contact.setString("email", info.contact().email()); var tenant = root.setObject("tenant"); tenant.setString("company", info.name()); tenant.setString("website", info.website()); toSlime(info.address(), root); } return new SlimeJsonResponse(slime); } private SlimeJsonResponse withCloudTenant(String tenantName, HttpRequest request, BiFunction<CloudTenant, Inspector, SlimeJsonResponse> handler) { return controller.tenants().get(tenantName) .map(tenant -> handler.apply((CloudTenant) tenant, toSlime(request.getData()).get())) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private SlimeJsonResponse putTenantInfoProfile(CloudTenant cloudTenant, Inspector inspector) { var info = cloudTenant.info(); var mergedContact = TenantContact.empty() .withName(getString(inspector.field("contact").field("name"), info.contact().name())) .withEmail(getString(inspector.field("contact").field("email"), info.contact().email())); var mergedAddress = updateTenantInfoAddress(inspector.field("address"), info.address()); var mergedInfo = info .withName(getString(inspector.field("tenant").field("name"), info.name())) .withWebsite(getString(inspector.field("tenant").field("website"), info.website())) .withContact(mergedContact) .withAddress(mergedAddress); validateMergedTenantInfo(mergedInfo); controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private SlimeJsonResponse tenantInfoBilling(CloudTenant cloudTenant) { var slime = new Slime(); var root = slime.setObject(); var info = cloudTenant.info(); if (!info.isEmpty()) { var billingContact = info.billingContact(); var contact = root.setObject("contact"); contact.setString("name", billingContact.contact().name()); contact.setString("email", billingContact.contact().email()); contact.setString("phone", billingContact.contact().phone()); toSlime(billingContact.address(), root); } return new SlimeJsonResponse(slime); } private SlimeJsonResponse putTenantInfoBilling(CloudTenant cloudTenant, Inspector inspector) { var info = cloudTenant.info(); var contact = info.billingContact().contact(); var address = info.billingContact().address(); var mergedContact = updateTenantInfoContact(inspector.field("contact"), contact); var mergedAddress = updateTenantInfoAddress(inspector.field("address"), info.billingContact().address()); var mergedBilling = info.billingContact() .withContact(mergedContact) .withAddress(mergedAddress); var mergedInfo = info.withBilling(mergedBilling); controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private SlimeJsonResponse tenantInfoContacts(CloudTenant cloudTenant) { var slime = new Slime(); var root = slime.setObject(); toSlime(cloudTenant.info().contacts(), root); return new SlimeJsonResponse(slime); } private SlimeJsonResponse putTenantInfoContacts(CloudTenant cloudTenant, Inspector inspector) { var mergedInfo = cloudTenant.info() .withContacts(updateTenantInfoContacts(inspector.field("contacts"), cloudTenant.info().contacts())); controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private void validateMergedTenantInfo(TenantInfo mergedInfo) { if (mergedInfo.contact().name().isBlank()) { throw new IllegalArgumentException("'contactName' cannot be empty"); } if (mergedInfo.contact().email().isBlank()) { throw new IllegalArgumentException("'contactEmail' cannot be empty"); } if (! mergedInfo.contact().email().contains("@")) { throw new IllegalArgumentException("'contactEmail' needs to be an email address"); } if (! mergedInfo.website().isBlank()) { try { new URL(mergedInfo.website()); } catch (MalformedURLException e) { throw new IllegalArgumentException("'website' needs to be a valid address"); } } } private void toSlime(TenantAddress address, Cursor parentCursor) { if (address.isEmpty()) return; Cursor addressCursor = parentCursor.setObject("address"); addressCursor.setString("addressLines", address.address()); addressCursor.setString("postalCodeOrZip", address.code()); addressCursor.setString("city", address.city()); addressCursor.setString("stateRegionProvince", address.region()); addressCursor.setString("country", address.country()); } private void toSlime(TenantBilling billingContact, Cursor parentCursor) { if (billingContact.isEmpty()) return; Cursor addressCursor = parentCursor.setObject("billingContact"); addressCursor.setString("name", billingContact.contact().name()); addressCursor.setString("email", billingContact.contact().email()); addressCursor.setString("phone", billingContact.contact().phone()); toSlime(billingContact.address(), addressCursor); } private void toSlime(TenantContacts contacts, Cursor parentCursor) { Cursor contactsCursor = parentCursor.setArray("contacts"); contacts.all().forEach(contact -> { Cursor contactCursor = contactsCursor.addObject(); Cursor audiencesArray = contactCursor.setArray("audiences"); contact.audiences().forEach(audience -> audiencesArray.addString(toAudience(audience))); switch (contact.type()) { case EMAIL: var email = (TenantContacts.EmailContact) contact; contactCursor.setString("email", email.email()); return; default: throw new IllegalArgumentException("Serialization for contact type not implemented: " + contact.type()); } }); } private static TenantContacts.Audience fromAudience(String value) { return switch (value) { case "tenant": yield TenantContacts.Audience.TENANT; case "notifications": yield TenantContacts.Audience.NOTIFICATIONS; default: throw new IllegalArgumentException("Unknown contact audience '" + value + "'."); }; } private static String toAudience(TenantContacts.Audience audience) { return switch (audience) { case TENANT: yield "tenant"; case NOTIFICATIONS: yield "notifications"; }; } private HttpResponse updateTenantInfo(String tenantName, HttpRequest request) { return controller.tenants().get(TenantName.from(tenantName)) .filter(tenant -> tenant.type() == Tenant.Type.cloud) .map(tenant -> updateTenantInfo(((CloudTenant)tenant), request)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private String getString(Inspector field, String defaultVale) { return field.valid() ? field.asString().trim() : defaultVale; } private SlimeJsonResponse updateTenantInfo(CloudTenant tenant, HttpRequest request) { TenantInfo oldInfo = tenant.info(); Inspector insp = toSlime(request.getData()).get(); TenantContact mergedContact = TenantContact.empty() .withName(getString(insp.field("contactName"), oldInfo.contact().name())) .withEmail(getString(insp.field("contactEmail"), oldInfo.contact().email())); TenantInfo mergedInfo = TenantInfo.empty() .withName(getString(insp.field("name"), oldInfo.name())) .withEmail(getString(insp.field("email"), oldInfo.email())) .withWebsite(getString(insp.field("website"), oldInfo.website())) .withContact(mergedContact) .withAddress(updateTenantInfoAddress(insp.field("address"), oldInfo.address())) .withBilling(updateTenantInfoBillingContact(insp.field("billingContact"), oldInfo.billingContact())) .withContacts(updateTenantInfoContacts(insp.field("contacts"), oldInfo.contacts())); validateMergedTenantInfo(mergedInfo); controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private TenantAddress updateTenantInfoAddress(Inspector insp, TenantAddress oldAddress) { if (!insp.valid()) return oldAddress; TenantAddress address = TenantAddress.empty() .withCountry(getString(insp.field("country"), oldAddress.country())) .withRegion(getString(insp.field("stateRegionProvince"), oldAddress.region())) .withCity(getString(insp.field("city"), oldAddress.city())) .withCode(getString(insp.field("postalCodeOrZip"), oldAddress.code())) .withAddress(getString(insp.field("addressLines"), oldAddress.address())); List<String> fields = List.of(address.address(), address.code(), address.country(), address.city(), address.region()); if (fields.stream().allMatch(String::isBlank) || fields.stream().noneMatch(String::isBlank)) return address; throw new IllegalArgumentException("All address fields must be set"); } private TenantContact updateTenantInfoContact(Inspector insp, TenantContact oldContact) { if (!insp.valid()) return oldContact; String email = getString(insp.field("email"), oldContact.email()); if (!email.isBlank() && !email.contains("@")) { throw new IllegalArgumentException("'email' needs to be an email address"); } return TenantContact.empty() .withName(getString(insp.field("name"), oldContact.name())) .withEmail(getString(insp.field("email"), oldContact.email())) .withPhone(getString(insp.field("phone"), oldContact.phone())); } private TenantBilling updateTenantInfoBillingContact(Inspector insp, TenantBilling oldContact) { if (!insp.valid()) return oldContact; return TenantBilling.empty() .withContact(updateTenantInfoContact(insp, oldContact.contact())) .withAddress(updateTenantInfoAddress(insp.field("address"), oldContact.address())); } private TenantContacts updateTenantInfoContacts(Inspector insp, TenantContacts oldContacts) { if (!insp.valid()) return oldContacts; List<TenantContacts.EmailContact> contacts = SlimeUtils.entriesStream(insp).map(inspector -> { String email = inspector.field("email").asString().trim(); List<TenantContacts.Audience> audiences = SlimeUtils.entriesStream(inspector.field("audiences")) .map(audience -> fromAudience(audience.asString())) .toList(); if (!email.contains("@")) { throw new IllegalArgumentException("'email' needs to be an email address"); } return new TenantContacts.EmailContact(audiences, email); }).toList(); return new TenantContacts(contacts); } private HttpResponse notifications(HttpRequest request, Optional<String> tenant, boolean includeTenantFieldInResponse) { boolean productionOnly = showOnlyProductionInstances(request); boolean excludeMessages = "true".equals(request.getProperty("excludeMessages")); Slime slime = new Slime(); Cursor notificationsArray = slime.setObject().setArray("notifications"); tenant.map(t -> Stream.of(TenantName.from(t))) .orElseGet(() -> controller.notificationsDb().listTenantsWithNotifications().stream()) .flatMap(tenantName -> controller.notificationsDb().listNotifications(NotificationSource.from(tenantName), productionOnly).stream()) .filter(notification -> propertyEquals(request, "application", ApplicationName::from, notification.source().application()) && propertyEquals(request, "instance", InstanceName::from, notification.source().instance()) && propertyEquals(request, "zone", ZoneId::from, notification.source().zoneId()) && propertyEquals(request, "job", job -> JobType.fromJobName(job, controller.zoneRegistry()), notification.source().jobType()) && propertyEquals(request, "type", Notification.Type::valueOf, Optional.of(notification.type())) && propertyEquals(request, "level", Notification.Level::valueOf, Optional.of(notification.level()))) .forEach(notification -> toSlime(notificationsArray.addObject(), notification, includeTenantFieldInResponse, excludeMessages)); return new SlimeJsonResponse(slime); } private static <T> boolean propertyEquals(HttpRequest request, String property, Function<String, T> mapper, Optional<T> value) { return Optional.ofNullable(request.getProperty(property)) .map(propertyValue -> value.isPresent() && mapper.apply(propertyValue).equals(value.get())) .orElse(true); } private static void toSlime(Cursor cursor, Notification notification, boolean includeTenantFieldInResponse, boolean excludeMessages) { cursor.setLong("at", notification.at().toEpochMilli()); cursor.setString("level", notificationLevelAsString(notification.level())); cursor.setString("type", notificationTypeAsString(notification.type())); if (!excludeMessages) { Cursor messagesArray = cursor.setArray("messages"); notification.messages().forEach(messagesArray::addString); } if (includeTenantFieldInResponse) cursor.setString("tenant", notification.source().tenant().value()); notification.source().application().ifPresent(application -> cursor.setString("application", application.value())); notification.source().instance().ifPresent(instance -> cursor.setString("instance", instance.value())); notification.source().zoneId().ifPresent(zoneId -> { cursor.setString("environment", zoneId.environment().value()); cursor.setString("region", zoneId.region().value()); }); notification.source().clusterId().ifPresent(clusterId -> cursor.setString("clusterId", clusterId.value())); notification.source().jobType().ifPresent(jobType -> cursor.setString("jobName", jobType.jobName())); notification.source().runNumber().ifPresent(runNumber -> cursor.setLong("runNumber", runNumber)); } private static String notificationTypeAsString(Notification.Type type) { return switch (type) { case submission, applicationPackage: yield "applicationPackage"; case testPackage: yield "testPackage"; case deployment: yield "deployment"; case feedBlock: yield "feedBlock"; case reindex: yield "reindex"; }; } private static String notificationLevelAsString(Notification.Level level) { return switch (level) { case info: yield "info"; case warning: yield "warning"; case error: yield "error"; }; } private HttpResponse applications(String tenantName, Optional<String> applicationName, HttpRequest request) { TenantName tenant = TenantName.from(tenantName); getTenantOrThrow(tenantName); List<Application> applications = applicationName.isEmpty() ? controller.applications().asList(tenant) : controller.applications().getApplication(TenantAndApplicationId.from(tenantName, applicationName.get())) .map(List::of) .orElseThrow(() -> new NotExistsException("Application '" + applicationName.get() + "' does not exist")); Slime slime = new Slime(); Cursor applicationArray = slime.setArray(); for (Application application : applications) { Cursor applicationObject = applicationArray.addObject(); applicationObject.setString("tenant", application.id().tenant().value()); applicationObject.setString("application", application.id().application().value()); applicationObject.setString("url", withPath("/application/v4" + "/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value(), request.getUri()).toString()); Cursor instanceArray = applicationObject.setArray("instances"); for (InstanceName instance : showOnlyProductionInstances(request) ? application.productionInstances().keySet() : application.instances().keySet()) { Cursor instanceObject = instanceArray.addObject(); instanceObject.setString("instance", instance.value()); instanceObject.setString("url", withPath("/application/v4" + "/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value() + "/instance/" + instance.value(), request.getUri()).toString()); } } return new SlimeJsonResponse(slime); } private HttpResponse devApplicationPackage(ApplicationId id, JobType type) { ZoneId zone = type.zone(); RevisionId revision = controller.jobController().last(id, type).get().versions().targetRevision(); byte[] applicationPackage = controller.applications().applicationStore().get(new DeploymentId(id, zone), revision); return new ZipResponse(id.toFullString() + "." + zone.value() + ".zip", applicationPackage); } private HttpResponse devApplicationPackageDiff(RunId runId) { DeploymentId deploymentId = new DeploymentId(runId.application(), runId.job().type().zone()); return controller.applications().applicationStore().getDevDiff(deploymentId, runId.number()) .map(ByteArrayResponse::new) .orElseThrow(() -> new NotExistsException("No application package diff found for " + runId)); } private HttpResponse applicationPackage(String tenantName, String applicationName, HttpRequest request) { TenantAndApplicationId tenantAndApplication = TenantAndApplicationId.from(tenantName, applicationName); final long build; String requestedBuild = request.getProperty("build"); if (requestedBuild != null) { if (requestedBuild.equals("latestDeployed")) { build = controller.applications().requireApplication(tenantAndApplication).latestDeployedRevision() .map(RevisionId::number) .orElseThrow(() -> new NotExistsException("no application package has been deployed in production for " + tenantAndApplication)); } else { try { build = Validation.requireAtLeast(Long.parseLong(request.getProperty("build")), "build number", 1L); } catch (NumberFormatException e) { throw new IllegalArgumentException("invalid value for request parameter 'build'", e); } } } else { build = controller.applications().requireApplication(tenantAndApplication).revisions().last() .map(version -> version.id().number()) .orElseThrow(() -> new NotExistsException("no application package has been submitted for " + tenantAndApplication)); } RevisionId revision = RevisionId.forProduction(build); boolean tests = request.getBooleanProperty("tests"); byte[] applicationPackage = tests ? controller.applications().applicationStore().getTester(tenantAndApplication.tenant(), tenantAndApplication.application(), revision) : controller.applications().applicationStore().get(new DeploymentId(tenantAndApplication.defaultInstance(), ZoneId.defaultId()), revision); String filename = tenantAndApplication + (tests ? "-tests" : "-build") + revision.number() + ".zip"; return new ZipResponse(filename, applicationPackage); } private HttpResponse applicationPackageDiff(String tenant, String application, String number) { TenantAndApplicationId tenantAndApplication = TenantAndApplicationId.from(tenant, application); return controller.applications().applicationStore().getDiff(tenantAndApplication.tenant(), tenantAndApplication.application(), Long.parseLong(number)) .map(ByteArrayResponse::new) .orElseThrow(() -> new NotExistsException("No application package diff found for '" + tenantAndApplication + "' with build number " + number)); } private HttpResponse application(String tenantName, String applicationName, HttpRequest request) { Slime slime = new Slime(); toSlime(slime.setObject(), getApplication(tenantName, applicationName), request); return new SlimeJsonResponse(slime); } private HttpResponse compileVersion(String tenantName, String applicationName, String allowMajorParam) { Slime slime = new Slime(); OptionalInt allowMajor = OptionalInt.empty(); if (allowMajorParam != null) { try { allowMajor = OptionalInt.of(Integer.parseInt(allowMajorParam)); } catch (NumberFormatException e) { throw new IllegalArgumentException("Invalid major version '" + allowMajorParam + "'", e); } } Version compileVersion = controller.applications().compileVersion(TenantAndApplicationId.from(tenantName, applicationName), allowMajor); slime.setObject().setString("compileVersion", compileVersion.toFullString()); return new SlimeJsonResponse(slime); } private HttpResponse instance(String tenantName, String applicationName, String instanceName, HttpRequest request) { Slime slime = new Slime(); toSlime(slime.setObject(), getInstance(tenantName, applicationName, instanceName), controller.jobController().deploymentStatus(getApplication(tenantName, applicationName)), request); return new SlimeJsonResponse(slime); } private HttpResponse addDeveloperKey(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); Principal user = request.getJDiscRequest().getUserPrincipal(); String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString(); PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey); Slime root = new Slime(); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> { tenant = tenant.withDeveloperKey(developerKey, user); toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys()); controller.tenants().store(tenant); }); return new SlimeJsonResponse(root); } private HttpResponse validateSecretStore(String tenantName, String secretStoreName, HttpRequest request) { var awsRegion = request.getProperty("aws-region"); var parameterName = request.getProperty("parameter-name"); var applicationId = ApplicationId.fromFullString(request.getProperty("application-id")); var zoneId = requireZone(ZoneId.from(request.getProperty("zone"))); var deploymentId = new DeploymentId(applicationId, zoneId); var tenant = controller.tenants().require(applicationId.tenant(), CloudTenant.class); var tenantSecretStore = tenant.tenantSecretStores() .stream() .filter(secretStore -> secretStore.getName().equals(secretStoreName)) .findFirst(); if (tenantSecretStore.isEmpty()) return ErrorResponse.notFoundError("No secret store '" + secretStoreName + "' configured for tenant '" + tenantName + "'"); var response = controller.serviceRegistry().configServer().validateSecretStore(deploymentId, tenantSecretStore.get(), awsRegion, parameterName); try { var responseRoot = new Slime(); var responseCursor = responseRoot.setObject(); responseCursor.setString("target", deploymentId.toString()); var responseResultCursor = responseCursor.setObject("result"); var responseSlime = SlimeUtils.jsonToSlime(response); SlimeUtils.copyObject(responseSlime.get(), responseResultCursor); return new SlimeJsonResponse(responseRoot); } catch (JsonParseException e) { return ErrorResponses.logThrowing(request, log, e); } } private HttpResponse removeDeveloperKey(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString(); PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey); Principal user = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class).developerKeys().get(developerKey); Slime root = new Slime(); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> { tenant = tenant.withoutDeveloperKey(developerKey); toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys()); controller.tenants().store(tenant); }); return new SlimeJsonResponse(root); } private void toSlime(Cursor keysArray, Map<PublicKey, ? extends Principal> keys) { keys.forEach((key, principal) -> { Cursor keyObject = keysArray.addObject(); keyObject.setString("key", KeyUtils.toPem(key)); keyObject.setString("user", principal.getName()); }); } private HttpResponse addDeployKey(String tenantName, String applicationName, HttpRequest request) { String pemDeployKey = toSlime(request.getData()).get().field("key").asString(); PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey); Slime root = new Slime(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> { application = application.withDeployKey(deployKey); application.get().deployKeys().stream() .map(KeyUtils::toPem) .forEach(root.setObject().setArray("keys")::addString); controller.applications().store(application); }); return new SlimeJsonResponse(root); } private HttpResponse removeDeployKey(String tenantName, String applicationName, HttpRequest request) { String pemDeployKey = toSlime(request.getData()).get().field("key").asString(); PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey); Slime root = new Slime(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> { application = application.withoutDeployKey(deployKey); application.get().deployKeys().stream() .map(KeyUtils::toPem) .forEach(root.setObject().setArray("keys")::addString); controller.applications().store(application); }); return new SlimeJsonResponse(root); } private HttpResponse addSecretStore(String tenantName, String name, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); var data = toSlime(request.getData()).get(); var awsId = mandatory("awsId", data).asString(); var externalId = mandatory("externalId", data).asString(); var role = mandatory("role", data).asString(); var tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class); var tenantSecretStore = new TenantSecretStore(name, awsId, role); if (!tenantSecretStore.isValid()) { return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is invalid"); } if (tenant.tenantSecretStores().contains(tenantSecretStore)) { return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is already configured"); } controller.serviceRegistry().roleService().createTenantPolicy(TenantName.from(tenantName), name, awsId, role); controller.serviceRegistry().tenantSecretService().addSecretStore(tenant.name(), tenantSecretStore, externalId); controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withSecretStore(tenantSecretStore); controller.tenants().store(lockedTenant); }); tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class); var slime = new Slime(); toSlime(slime.setObject(), tenant.tenantSecretStores()); return new SlimeJsonResponse(slime); } private HttpResponse deleteSecretStore(String tenantName, String name, HttpRequest request) { var tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class); var optionalSecretStore = tenant.tenantSecretStores().stream() .filter(secretStore -> secretStore.getName().equals(name)) .findFirst(); if (optionalSecretStore.isEmpty()) return ErrorResponse.notFoundError("Could not delete secret store '" + name + "': Secret store not found"); var tenantSecretStore = optionalSecretStore.get(); controller.serviceRegistry().tenantSecretService().deleteSecretStore(tenant.name(), tenantSecretStore); controller.serviceRegistry().roleService().deleteTenantPolicy(tenant.name(), tenantSecretStore.getName(), tenantSecretStore.getRole()); controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withoutSecretStore(tenantSecretStore); controller.tenants().store(lockedTenant); }); tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class); var slime = new Slime(); toSlime(slime.setObject(), tenant.tenantSecretStores()); return new SlimeJsonResponse(slime); } private HttpResponse allowAwsArchiveAccess(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); var data = toSlime(request.getData()).get(); var role = mandatory("role", data).asString(); if (role.isBlank()) { return ErrorResponse.badRequest("AWS archive access role can't be whitespace only"); } controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> { var access = lockedTenant.get().archiveAccess(); lockedTenant = lockedTenant.withArchiveAccess(access.withAWSRole(role)); controller.tenants().store(lockedTenant); }); return new MessageResponse("AWS archive access role set to '" + role + "' for tenant " + tenantName + "."); } private HttpResponse removeAwsArchiveAccess(String tenantName) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> { var access = lockedTenant.get().archiveAccess(); lockedTenant = lockedTenant.withArchiveAccess(access.removeAWSRole()); controller.tenants().store(lockedTenant); }); return new MessageResponse("AWS archive access role removed for tenant " + tenantName + "."); } private HttpResponse allowGcpArchiveAccess(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); var data = toSlime(request.getData()).get(); var member = mandatory("member", data).asString(); if (member.isBlank()) { return ErrorResponse.badRequest("GCP archive access role can't be whitespace only"); } controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> { var access = lockedTenant.get().archiveAccess(); lockedTenant = lockedTenant.withArchiveAccess(access.withGCPMember(member)); controller.tenants().store(lockedTenant); }); return new MessageResponse("GCP archive access member set to '" + member + "' for tenant " + tenantName + "."); } private HttpResponse removeGcpArchiveAccess(String tenantName) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> { var access = lockedTenant.get().archiveAccess(); lockedTenant = lockedTenant.withArchiveAccess(access.removeGCPMember()); controller.tenants().store(lockedTenant); }); return new MessageResponse("GCP archive access member removed for tenant " + tenantName + "."); } private HttpResponse patchApplication(String tenantName, String applicationName, HttpRequest request) { Inspector requestObject = toSlime(request.getData()).get(); StringJoiner messageBuilder = new StringJoiner("\n").setEmptyValue("No applicable changes."); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> { Inspector majorVersionField = requestObject.field("majorVersion"); if (majorVersionField.valid()) { Integer majorVersion = majorVersionField.asLong() == 0 ? null : (int) majorVersionField.asLong(); application = application.withMajorVersion(majorVersion); messageBuilder.add("Set major version to " + (majorVersion == null ? "empty" : majorVersion)); } Inspector pemDeployKeyField = requestObject.field("pemDeployKey"); if (pemDeployKeyField.valid()) { String pemDeployKey = pemDeployKeyField.asString(); PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey); application = application.withDeployKey(deployKey); messageBuilder.add("Added deploy key " + pemDeployKey); } controller.applications().store(application); }); return new MessageResponse(messageBuilder.toString()); } private Application getApplication(String tenantName, String applicationName) { TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName); return controller.applications().getApplication(applicationId) .orElseThrow(() -> new NotExistsException(applicationId + " not found")); } private Instance getInstance(String tenantName, String applicationName, String instanceName) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); return controller.applications().getInstance(applicationId) .orElseThrow(() -> new NotExistsException(applicationId + " not found")); } private HttpResponse nodes(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone, NodeFilter.all().applications(id)); Slime slime = new Slime(); Cursor nodesArray = slime.setObject().setArray("nodes"); for (Node node : nodes) { Cursor nodeObject = nodesArray.addObject(); nodeObject.setString("hostname", node.hostname().value()); nodeObject.setString("state", valueOf(node.state())); node.reservedTo().ifPresent(tenant -> nodeObject.setString("reservedTo", tenant.value())); nodeObject.setString("orchestration", valueOf(node.serviceState())); nodeObject.setString("version", node.currentVersion().toString()); node.flavor().ifPresent(flavor -> nodeObject.setString("flavor", flavor)); toSlime(node.resources(), nodeObject); nodeObject.setString("clusterId", node.clusterId()); nodeObject.setString("clusterType", valueOf(node.clusterType())); nodeObject.setBool("down", node.down()); nodeObject.setBool("retired", node.retired() || node.wantToRetire()); nodeObject.setBool("restarting", node.wantedRestartGeneration() > node.restartGeneration()); nodeObject.setBool("rebooting", node.wantedRebootGeneration() > node.rebootGeneration()); nodeObject.setString("group", node.group()); nodeObject.setLong("index", node.index()); } return new SlimeJsonResponse(slime); } private HttpResponse clusters(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); com.yahoo.vespa.hosted.controller.api.integration.configserver.Application application = controller.serviceRegistry().configServer().nodeRepository().getApplication(zone, id); Slime slime = new Slime(); Cursor clustersObject = slime.setObject().setObject("clusters"); for (Cluster cluster : application.clusters().values()) { Cursor clusterObject = clustersObject.setObject(cluster.id().value()); clusterObject.setString("type", cluster.type().name()); toSlime(cluster.min(), clusterObject.setObject("min")); toSlime(cluster.max(), clusterObject.setObject("max")); toSlime(cluster.current(), clusterObject.setObject("current")); if (cluster.target().isPresent() && ! cluster.target().get().justNumbers().equals(cluster.current().justNumbers())) toSlime(cluster.target().get(), clusterObject.setObject("target")); cluster.suggested().ifPresent(suggested -> toSlime(suggested, clusterObject.setObject("suggested"))); utilizationToSlime(cluster.utilization(), clusterObject.setObject("utilization")); scalingEventsToSlime(cluster.scalingEvents(), clusterObject.setArray("scalingEvents")); clusterObject.setString("autoscalingStatusCode", cluster.autoscalingStatusCode()); clusterObject.setString("autoscalingStatus", cluster.autoscalingStatus()); clusterObject.setLong("scalingDuration", cluster.scalingDuration().toMillis()); clusterObject.setDouble("maxQueryGrowthRate", cluster.maxQueryGrowthRate()); clusterObject.setDouble("currentQueryFractionOfMax", cluster.currentQueryFractionOfMax()); } return new SlimeJsonResponse(slime); } private static String valueOf(Node.State state) { return switch (state) { case failed: yield "failed"; case parked: yield "parked"; case dirty: yield "dirty"; case ready: yield "ready"; case active: yield "active"; case inactive: yield "inactive"; case reserved: yield "reserved"; case provisioned: yield "provisioned"; case breakfixed: yield "breakfixed"; case deprovisioned: yield "deprovisioned"; default: throw new IllegalArgumentException("Unexpected node state '" + state + "'."); }; } static String valueOf(Node.ServiceState state) { switch (state) { case expectedUp: return "expectedUp"; case allowedDown: return "allowedDown"; case permanentlyDown: return "permanentlyDown"; case unorchestrated: return "unorchestrated"; case unknown: break; } return "unknown"; } private static String valueOf(Node.ClusterType type) { return switch (type) { case admin: yield "admin"; case content: yield "content"; case container: yield "container"; case combined: yield "combined"; case unknown: throw new IllegalArgumentException("Unexpected node cluster type '" + type + "'."); }; } private static String valueOf(NodeResources.DiskSpeed diskSpeed) { return switch (diskSpeed) { case fast : yield "fast"; case slow : yield "slow"; case any : yield "any"; }; } private static String valueOf(NodeResources.StorageType storageType) { return switch (storageType) { case remote : yield "remote"; case local : yield "local"; case any : yield "any"; }; } private HttpResponse logs(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) { ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); DeploymentId deployment = new DeploymentId(application, zone); InputStream logStream = controller.serviceRegistry().configServer().getLogs(deployment, queryParameters); return new HttpResponse(200) { @Override public void render(OutputStream outputStream) throws IOException { try (logStream) { logStream.transferTo(outputStream); } } @Override public long maxPendingBytes() { return 1 << 26; } }; } private HttpResponse supportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) { DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); SupportAccess supportAccess = controller.supportAccess().forDeployment(deployment); return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(supportAccess, controller.clock().instant())); } private HttpResponse allowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Principal principal = requireUserPrincipal(request); Instant now = controller.clock().instant(); SupportAccess allowed = controller.supportAccess().allow(deployment, now.plus(7, ChronoUnit.DAYS), principal.getName()); return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(allowed, now)); } private HttpResponse disallowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Principal principal = requireUserPrincipal(request); SupportAccess disallowed = controller.supportAccess().disallow(deployment, principal.getName()); controller.applications().deploymentTrigger().reTriggerOrAddToQueue(deployment, "re-triggered to disallow support access, by " + request.getJDiscRequest().getUserPrincipal().getName()); return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(disallowed, controller.clock().instant())); } private HttpResponse metrics(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); DeploymentId deployment = new DeploymentId(application, zone); List<ProtonMetrics> protonMetrics = controller.serviceRegistry().configServer().getProtonMetrics(deployment); return buildResponseFromProtonMetrics(protonMetrics); } private HttpResponse scaling(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { var from = Optional.ofNullable(request.getProperty("from")) .map(Long::valueOf) .map(Instant::ofEpochSecond) .orElse(Instant.EPOCH); var until = Optional.ofNullable(request.getProperty("until")) .map(Long::valueOf) .map(Instant::ofEpochSecond) .orElse(Instant.now(controller.clock())); var application = ApplicationId.from(tenantName, applicationName, instanceName); var zone = requireZone(environment, region); var deployment = new DeploymentId(application, zone); var events = controller.serviceRegistry().resourceDatabase().scalingEvents(from, until, deployment); var slime = new Slime(); var root = slime.setObject(); for (var entry : events.entrySet()) { var serviceRoot = root.setArray(entry.getKey().clusterId().value()); scalingEventsToSlime(entry.getValue(), serviceRoot); } return new SlimeJsonResponse(slime); } private JsonResponse buildResponseFromProtonMetrics(List<ProtonMetrics> protonMetrics) { try { var jsonObject = jsonMapper.createObjectNode(); var jsonArray = jsonMapper.createArrayNode(); for (ProtonMetrics metrics : protonMetrics) { jsonArray.add(metrics.toJson()); } jsonObject.set("metrics", jsonArray); return new JsonResponse(200, jsonMapper.writerWithDefaultPrettyPrinter().writeValueAsString(jsonObject)); } catch (JsonProcessingException e) { log.log(Level.WARNING, "Unable to build JsonResponse with Proton data: " + e.getMessage(), e); return new JsonResponse(500, ""); } } private HttpResponse trigger(ApplicationId id, JobType type, HttpRequest request) { ZoneRegistry zones = controller.zoneRegistry(); type = switch (type.environment()) { case test -> JobType.systemTest(zones, zones.systemZone().getCloudName()); case staging -> JobType.stagingTest(zones, zones.systemZone().getCloudName()); default -> type; }; Inspector requestObject = toSlime(request.getData()).get(); boolean requireTests = ! requestObject.field("skipTests").asBool(); boolean reTrigger = requestObject.field("reTrigger").asBool(); boolean upgradeRevision = ! requestObject.field("skipRevision").asBool(); boolean upgradePlatform = ! requestObject.field("skipUpgrade").asBool(); String triggered = reTrigger ? controller.applications().deploymentTrigger() .reTrigger(id, type, "re-triggered by " + request.getJDiscRequest().getUserPrincipal().getName()).type().jobName() : controller.applications().deploymentTrigger() .forceTrigger(id, type, "triggered by " + request.getJDiscRequest().getUserPrincipal().getName(), requireTests, upgradeRevision, upgradePlatform) .stream().map(job -> job.type().jobName()).collect(joining(", ")); String suppressedUpgrades = ( ! upgradeRevision || ! upgradePlatform ? ", without " : "") + (upgradeRevision ? "" : "revision") + ( ! upgradeRevision && ! upgradePlatform ? " and " : "") + (upgradePlatform ? "" : "platform") + ( ! upgradeRevision || ! upgradePlatform ? " upgrade" : ""); return new MessageResponse(triggered.isEmpty() ? "Job " + type.jobName() + " for " + id + " not triggered" : "Triggered " + triggered + " for " + id + suppressedUpgrades); } private HttpResponse pause(ApplicationId id, JobType type) { Instant until = controller.clock().instant().plus(DeploymentTrigger.maxPause); controller.applications().deploymentTrigger().pauseJob(id, type, until); return new MessageResponse(type.jobName() + " for " + id + " paused for " + DeploymentTrigger.maxPause); } private HttpResponse resume(ApplicationId id, JobType type) { controller.applications().deploymentTrigger().resumeJob(id, type); return new MessageResponse(type.jobName() + " for " + id + " resumed"); } private void toSlime(Cursor object, Application application, HttpRequest request) { object.setString("tenant", application.id().tenant().value()); object.setString("application", application.id().application().value()); object.setString("deployments", withPath("/application/v4" + "/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value() + "/job/", request.getUri()).toString()); DeploymentStatus status = controller.jobController().deploymentStatus(application); application.revisions().last().ifPresent(version -> JobControllerApiHandlerHelper.toSlime(object.setObject("latestVersion"), version)); application.projectId().ifPresent(id -> object.setLong("projectId", id)); application.instances().values().stream().findFirst().ifPresent(instance -> { if ( ! instance.change().isEmpty()) toSlime(object.setObject("deploying"), instance.change(), application); if ( ! status.outstandingChange(instance.name()).isEmpty()) toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), application); }); application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion)); Cursor instancesArray = object.setArray("instances"); for (Instance instance : showOnlyProductionInstances(request) ? application.productionInstances().values() : application.instances().values()) toSlime(instancesArray.addObject(), status, instance, application.deploymentSpec(), request); application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString); Cursor metricsObject = object.setObject("metrics"); metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality()); metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality()); Cursor activity = object.setObject("activity"); application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value())); application.owner().ifPresent(owner -> object.setString("owner", owner.username())); application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value())); } private void toSlime(Cursor object, DeploymentStatus status, Instance instance, DeploymentSpec deploymentSpec, HttpRequest request) { object.setString("instance", instance.name().value()); if (deploymentSpec.instance(instance.name()).isPresent()) { Collection<JobStatus> jobStatus = status.instanceJobs(instance.name()).values(); if ( ! instance.change().isEmpty()) toSlime(object.setObject("deploying"), instance.change(), status.application()); if ( ! status.outstandingChange(instance.name()).isEmpty()) toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), status.application()); Cursor changeBlockers = object.setArray("changeBlockers"); deploymentSpec.instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> { Cursor changeBlockerObject = changeBlockers.addObject(); changeBlockerObject.setBool("versions", changeBlocker.blocksVersions()); changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions()); changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId()); Cursor days = changeBlockerObject.setArray("days"); changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong); Cursor hours = changeBlockerObject.setArray("hours"); changeBlocker.window().hours().forEach(hours::addLong); })); } addRotationId(object, instance); List<Deployment> deployments = deploymentSpec.instance(instance.name()) .map(spec -> sortedDeployments(instance.deployments().values(), spec)) .orElse(List.copyOf(instance.deployments().values())); Cursor deploymentsArray = object.setArray("deployments"); for (Deployment deployment : deployments) { Cursor deploymentObject = deploymentsArray.addObject(); if (deployment.zone().environment() == Environment.prod && ! instance.rotations().isEmpty()) toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject); if (recurseOverDeployments(request)) toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request); else { deploymentObject.setString("environment", deployment.zone().environment().value()); deploymentObject.setString("region", deployment.zone().region().value()); deploymentObject.setString("url", withPath(request.getUri().getPath() + "/instance/" + instance.name().value() + "/environment/" + deployment.zone().environment().value() + "/region/" + deployment.zone().region().value(), request.getUri()).toString()); } } } private void addRotationId(Cursor object, Instance instance) { instance.rotations().stream() .map(AssignedRotation::rotationId) .findFirst() .ifPresent(rotation -> object.setString("rotationId", rotation.asString())); } private void toSlime(Cursor object, Instance instance, DeploymentStatus status, HttpRequest request) { Application application = status.application(); object.setString("tenant", instance.id().tenant().value()); object.setString("application", instance.id().application().value()); object.setString("instance", instance.id().instance().value()); object.setString("deployments", withPath("/application/v4" + "/tenant/" + instance.id().tenant().value() + "/application/" + instance.id().application().value() + "/instance/" + instance.id().instance().value() + "/job/", request.getUri()).toString()); application.revisions().last().ifPresent(version -> { version.sourceUrl().ifPresent(url -> object.setString("sourceUrl", url)); version.commit().ifPresent(commit -> object.setString("commit", commit)); }); application.projectId().ifPresent(id -> object.setLong("projectId", id)); if (application.deploymentSpec().instance(instance.name()).isPresent()) { Collection<JobStatus> jobStatus = status.instanceJobs(instance.name()).values(); if ( ! instance.change().isEmpty()) toSlime(object.setObject("deploying"), instance.change(), application); if ( ! status.outstandingChange(instance.name()).isEmpty()) toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), application); Cursor changeBlockers = object.setArray("changeBlockers"); application.deploymentSpec().instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> { Cursor changeBlockerObject = changeBlockers.addObject(); changeBlockerObject.setBool("versions", changeBlocker.blocksVersions()); changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions()); changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId()); Cursor days = changeBlockerObject.setArray("days"); changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong); Cursor hours = changeBlockerObject.setArray("hours"); changeBlocker.window().hours().forEach(hours::addLong); })); } application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion)); addRotationId(object, instance); List<Deployment> deployments = application.deploymentSpec().instance(instance.name()) .map(spec -> sortedDeployments(instance.deployments().values(), spec)) .orElse(List.copyOf(instance.deployments().values())); Cursor instancesArray = object.setArray("instances"); for (Deployment deployment : deployments) { Cursor deploymentObject = instancesArray.addObject(); if (deployment.zone().environment() == Environment.prod) { if (instance.rotations().size() == 1) { toSlime(instance.rotationStatus().of(instance.rotations().get(0).rotationId(), deployment), deploymentObject); } if ( ! recurseOverDeployments(request) && ! instance.rotations().isEmpty()) { toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject); } } if (recurseOverDeployments(request)) toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request); else { deploymentObject.setString("environment", deployment.zone().environment().value()); deploymentObject.setString("region", deployment.zone().region().value()); deploymentObject.setString("instance", instance.id().instance().value()); deploymentObject.setString("url", withPath(request.getUri().getPath() + "/environment/" + deployment.zone().environment().value() + "/region/" + deployment.zone().region().value(), request.getUri()).toString()); } } Stream.concat(status.jobSteps().keySet().stream() .filter(job -> job.application().instance().equals(instance.name())) .filter(job -> job.type().isProduction() && job.type().isDeployment()), controller.jobController().active(instance.id()).stream() .map(run -> run.id().job()) .filter(job -> job.type().environment().isManuallyDeployed())) .map(job -> job.type().zone()) .filter(zone -> ! instance.deployments().containsKey(zone)) .forEach(zone -> { Cursor deploymentObject = instancesArray.addObject(); deploymentObject.setString("environment", zone.environment().value()); deploymentObject.setString("region", zone.region().value()); }); application.deployKeys().stream().findFirst().ifPresent(key -> object.setString("pemDeployKey", KeyUtils.toPem(key))); application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString); Cursor metricsObject = object.setObject("metrics"); metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality()); metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality()); Cursor activity = object.setObject("activity"); application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value())); application.owner().ifPresent(owner -> object.setString("owner", owner.username())); application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value())); } private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); Instance instance = controller.applications().getInstance(id) .orElseThrow(() -> new NotExistsException(id + " not found")); DeploymentId deploymentId = new DeploymentId(instance.id(), requireZone(environment, region)); Deployment deployment = instance.deployments().get(deploymentId.zoneId()); if (deployment == null) throw new NotExistsException(instance + " is not deployed in " + deploymentId.zoneId()); Slime slime = new Slime(); toSlime(slime.setObject(), deploymentId, deployment, request); return new SlimeJsonResponse(slime); } private void toSlime(Cursor object, Change change, Application application) { change.platform().ifPresent(version -> object.setString("version", version.toString())); change.revision().ifPresent(revision -> JobControllerApiHandlerHelper.toSlime(object.setObject("revision"), application.revisions().get(revision))); } private void toSlime(Endpoint endpoint, Cursor object) { object.setString("cluster", endpoint.cluster().value()); object.setBool("tls", endpoint.tls()); object.setString("url", endpoint.url().toString()); object.setString("scope", endpointScopeString(endpoint.scope())); object.setString("routingMethod", routingMethodString(endpoint.routingMethod())); object.setBool("legacy", endpoint.legacy()); } private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) { response.setString("tenant", deploymentId.applicationId().tenant().value()); response.setString("application", deploymentId.applicationId().application().value()); response.setString("instance", deploymentId.applicationId().instance().value()); response.setString("environment", deploymentId.zoneId().environment().value()); response.setString("region", deploymentId.zoneId().region().value()); var application = controller.applications().requireApplication(TenantAndApplicationId.from(deploymentId.applicationId())); boolean legacyEndpoints = request.getBooleanProperty("includeLegacyEndpoints"); var endpointArray = response.setArray("endpoints"); EndpointList zoneEndpoints = controller.routing().readEndpointsOf(deploymentId) .scope(Endpoint.Scope.zone); if (!legacyEndpoints) { zoneEndpoints = zoneEndpoints.not().legacy().direct(); } for (var endpoint : zoneEndpoints) { toSlime(endpoint, endpointArray.addObject()); } EndpointList declaredEndpoints = controller.routing().declaredEndpointsOf(application) .targets(deploymentId); if (!legacyEndpoints) { declaredEndpoints = declaredEndpoints.not().legacy().direct(); } for (var endpoint : declaredEndpoints) { toSlime(endpoint, endpointArray.addObject()); } response.setString("clusters", withPath(toPath(deploymentId) + "/clusters", request.getUri()).toString()); response.setString("nodes", withPathAndQuery("/zone/v2/" + deploymentId.zoneId().environment() + "/" + deploymentId.zoneId().region() + "/nodes/v2/node/", "recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString()); response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString()); response.setString("version", deployment.version().toFullString()); response.setString("revision", application.revisions().get(deployment.revision()).stringId()); response.setLong("build", deployment.revision().number()); Instant lastDeploymentStart = controller.jobController().lastDeploymentStart(deploymentId.applicationId(), deployment); response.setLong("deployTimeEpochMs", lastDeploymentStart.toEpochMilli()); controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zoneId()) .ifPresent(deploymentTimeToLive -> response.setLong("expiryTimeEpochMs", lastDeploymentStart.plus(deploymentTimeToLive).toEpochMilli())); application.projectId().ifPresent(i -> response.setString("screwdriverId", String.valueOf(i))); var instance = application.instances().get(deploymentId.applicationId().instance()); if (instance != null) { if (!instance.rotations().isEmpty() && deployment.zone().environment() == Environment.prod) toSlime(instance.rotations(), instance.rotationStatus(), deployment, response); if (!deployment.zone().environment().isManuallyDeployed()) { DeploymentStatus status = controller.jobController().deploymentStatus(application); JobId jobId = new JobId(instance.id(), JobType.deploymentTo(deployment.zone())); Optional.ofNullable(status.jobSteps().get(jobId)) .ifPresent(stepStatus -> { JobControllerApiHandlerHelper.toSlime(response.setObject("applicationVersion"), application.revisions().get(deployment.revision())); if ( ! status.jobsToRun().containsKey(stepStatus.job().get())) response.setString("status", "complete"); else if (stepStatus.readyAt(instance.change()).map(controller.clock().instant()::isBefore).orElse(true)) response.setString("status", "pending"); else response.setString("status", "running"); }); } else { var deploymentRun = controller.jobController().last(deploymentId.applicationId(), JobType.deploymentTo(deploymentId.zoneId())); deploymentRun.ifPresent(run -> { response.setString("status", run.hasEnded() ? "complete" : "running"); }); } } response.setDouble("quota", deployment.quota().rate()); deployment.cost().ifPresent(cost -> response.setDouble("cost", cost)); controller.archiveBucketDb().archiveUriFor(deploymentId.zoneId(), deploymentId.applicationId().tenant(), false) .ifPresent(archiveUri -> response.setString("archiveUri", archiveUri.toString())); Cursor activity = response.setObject("activity"); deployment.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); deployment.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); deployment.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); deployment.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); DeploymentMetrics metrics = deployment.metrics(); Cursor metricsObject = response.setObject("metrics"); metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond()); metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond()); metricsObject.setDouble("documentCount", metrics.documentCount()); metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis()); metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis()); metrics.instant().ifPresent(instant -> metricsObject.setLong("lastUpdated", instant.toEpochMilli())); } private void toSlime(RotationState state, Cursor object) { Cursor bcpStatus = object.setObject("bcpStatus"); bcpStatus.setString("rotationStatus", rotationStateString(state)); } private void toSlime(List<AssignedRotation> rotations, RotationStatus status, Deployment deployment, Cursor object) { var array = object.setArray("endpointStatus"); for (var rotation : rotations) { var statusObject = array.addObject(); var targets = status.of(rotation.rotationId()); statusObject.setString("endpointId", rotation.endpointId().id()); statusObject.setString("rotationId", rotation.rotationId().asString()); statusObject.setString("clusterId", rotation.clusterId().value()); statusObject.setString("status", rotationStateString(status.of(rotation.rotationId(), deployment))); statusObject.setLong("lastUpdated", targets.lastUpdated().toEpochMilli()); } } private URI monitoringSystemUri(DeploymentId deploymentId) { return controller.zoneRegistry().getMonitoringSystemUri(deploymentId); } private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) { Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName)); ZoneId zone = requireZone(environment, region); Deployment deployment = instance.deployments().get(zone); if (deployment == null) { throw new NotExistsException(instance + " has no deployment in " + zone); } DeploymentId deploymentId = new DeploymentId(instance.id(), zone); RoutingStatus.Agent agent = isOperator(request) ? RoutingStatus.Agent.operator : RoutingStatus.Agent.tenant; RoutingStatus.Value status = inService ? RoutingStatus.Value.in : RoutingStatus.Value.out; controller.routing().of(deploymentId).setRoutingStatus(status, agent); return new MessageResponse(Text.format("Successfully set %s in %s %s service", instance.id().toShortString(), zone, inService ? "in" : "out of")); } private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Slime slime = new Slime(); Cursor array = slime.setObject().setArray("globalrotationoverride"); Optional<Endpoint> primaryEndpoint = controller.routing().readDeclaredEndpointsOf(deploymentId.applicationId()) .requiresRotation() .primary(); if (primaryEndpoint.isPresent()) { DeploymentRoutingContext context = controller.routing().of(deploymentId); RoutingStatus status = context.routingStatus(); array.addString(primaryEndpoint.get().upstreamName(deploymentId)); Cursor statusObject = array.addObject(); statusObject.setString("status", status.value().name()); statusObject.setString("reason", ""); statusObject.setString("agent", status.agent().name()); statusObject.setLong("timestamp", status.changedAt().getEpochSecond()); } return new SlimeJsonResponse(slime); } private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region, Optional<String> endpointId) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); Instance instance = controller.applications().requireInstance(applicationId); ZoneId zone = requireZone(environment, region); RotationId rotation = findRotationId(instance, endpointId); Deployment deployment = instance.deployments().get(zone); if (deployment == null) { throw new NotExistsException(instance + " has no deployment in " + zone); } Slime slime = new Slime(); Cursor response = slime.setObject(); toSlime(instance.rotationStatus().of(rotation, deployment), response); return new SlimeJsonResponse(slime); } private HttpResponse deploying(String tenantName, String applicationName, String instanceName, HttpRequest request) { Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName)); Slime slime = new Slime(); Cursor root = slime.setObject(); if ( ! instance.change().isEmpty()) { instance.change().platform().ifPresent(version -> root.setString("platform", version.toString())); instance.change().revision().ifPresent(revision -> root.setString("application", revision.toString())); root.setBool("pinned", instance.change().isPinned()); } return new SlimeJsonResponse(slime); } private HttpResponse suspended(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); boolean suspended = controller.applications().isSuspended(deploymentId); Slime slime = new Slime(); Cursor response = slime.setObject(); response.setBool("suspended", suspended); return new SlimeJsonResponse(slime); } private HttpResponse status(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String host, HttpURL.Path restPath, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); return controller.serviceRegistry().configServer().getServiceNodePage(deploymentId, serviceName, DomainName.of(host), HttpURL.Path.parse("/status").append(restPath), Query.empty().add(request.getJDiscRequest().parameters())); } private HttpResponse orchestrator(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); return controller.serviceRegistry().configServer().getServiceNodes(deploymentId); } private HttpResponse stateV1(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String host, HttpURL.Path rest, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Query query = Query.empty().add(request.getJDiscRequest().parameters()); query = query.set("forwarded-url", HttpURL.from(request.getUri()).withQuery(Query.empty()).asURI().toString()); return controller.serviceRegistry().configServer().getServiceNodePage( deploymentId, serviceName, DomainName.of(host), HttpURL.Path.parse("/state/v1").append(rest), query); } private HttpResponse content(String tenantName, String applicationName, String instanceName, String environment, String region, HttpURL.Path restPath, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); return controller.serviceRegistry().configServer().getApplicationPackageContent(deploymentId, restPath, request.getUri()); } private HttpResponse updateTenant(String tenantName, HttpRequest request) { getTenantOrThrow(tenantName); TenantName tenant = TenantName.from(tenantName); Inspector requestObject = toSlime(request.getData()).get(); controller.tenants().update(accessControlRequests.specification(tenant, requestObject), accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest())); return tenant(controller.tenants().require(TenantName.from(tenantName)), request); } private HttpResponse createTenant(String tenantName, HttpRequest request) { TenantName tenant = TenantName.from(tenantName); Inspector requestObject = toSlime(request.getData()).get(); controller.tenants().create(accessControlRequests.specification(tenant, requestObject), accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest())); if (controller.system().isPublic()) { User user = getAttribute(request, User.ATTRIBUTE_NAME, User.class); TenantInfo info = controller.tenants().require(tenant, CloudTenant.class) .info() .withContact(TenantContact.from(user.name(), user.email())); controller.tenants().lockOrThrow(tenant, LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(info); controller.tenants().store(lockedTenant); }); } return tenant(controller.tenants().require(TenantName.from(tenantName)), request); } private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) { Inspector requestObject = toSlime(request.getData()).get(); TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); Credentials credentials = accessControlRequests.credentials(id.tenant(), requestObject, request.getJDiscRequest()); Application application = controller.applications().createApplication(id, credentials); Slime slime = new Slime(); toSlime(id, slime.setObject(), request); return new SlimeJsonResponse(slime); } private HttpResponse createInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) { TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName); if (controller.applications().getApplication(applicationId).isEmpty()) createApplication(tenantName, applicationName, request); controller.applications().createInstance(applicationId.instance(instanceName)); Slime slime = new Slime(); toSlime(applicationId.instance(instanceName), slime.setObject(), request); return new SlimeJsonResponse(slime); } /** Trigger deployment of the given Vespa version if a valid one is given, e.g., "7.8.9". */ private HttpResponse deployPlatform(String tenantName, String applicationName, String instanceName, boolean pin, HttpRequest request) { String versionString = readToString(request.getData()); ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); StringBuilder response = new StringBuilder(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { Version version = Version.fromString(versionString); VersionStatus versionStatus = controller.readVersionStatus(); if (version.equals(Version.emptyVersion)) version = controller.systemVersion(versionStatus); if (!versionStatus.isActive(version)) throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " + "Version is not active in this system. " + "Active versions: " + versionStatus.versions() .stream() .map(VespaVersion::versionNumber) .map(Version::toString) .collect(joining(", "))); Change change = Change.of(version); if (pin) change = change.withPin(); controller.applications().deploymentTrigger().forceChange(id, change); response.append("Triggered ").append(change).append(" for ").append(id); }); return new MessageResponse(response.toString()); } /** Trigger deployment to the last known application package for the given application. */ private HttpResponse deployApplication(String tenantName, String applicationName, String instanceName, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); Inspector buildField = toSlime(request.getData()).get().field("build"); long build = buildField.valid() ? buildField.asLong() : -1; StringBuilder response = new StringBuilder(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { RevisionId revision = build == -1 ? application.get().revisions().last().get().id() : getRevision(application.get(), build); Change change = Change.of(revision); controller.applications().deploymentTrigger().forceChange(id, change); response.append("Triggered ").append(change).append(" for ").append(id); }); return new MessageResponse(response.toString()); } private RevisionId getRevision(Application application, long build) { return application.revisions().withPackage().stream() .map(ApplicationVersion::id) .filter(version -> version.number() == build) .findFirst() .filter(version -> controller.applications().applicationStore().hasBuild(application.id().tenant(), application.id().application(), build)) .orElseThrow(() -> new IllegalArgumentException("Build number '" + build + "' was not found")); } private HttpResponse cancelBuild(String tenantName, String applicationName, String build){ TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); RevisionId revision = RevisionId.forProduction(Long.parseLong(build)); controller.applications().lockApplicationOrThrow(id, application -> { controller.applications().store(application.withRevisions(revisions -> revisions.with(revisions.get(revision).skipped()))); }); return new MessageResponse("Marked build '" + build + "' as non-deployable"); } /** Cancel ongoing change for given application, e.g., everything with {"cancel":"all"} */ private HttpResponse cancelDeploy(String tenantName, String applicationName, String instanceName, String choice) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); StringBuilder response = new StringBuilder(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { Change change = application.get().require(id.instance()).change(); if (change.isEmpty()) { response.append("No deployment in progress for ").append(id).append(" at this time"); return; } ChangesToCancel cancel = ChangesToCancel.valueOf(choice.toUpperCase()); controller.applications().deploymentTrigger().cancelChange(id, cancel); response.append("Changed deployment from '").append(change).append("' to '").append(controller.applications().requireInstance(id).change()).append("' for ").append(id); }); return new MessageResponse(response.toString()); } /** Schedule reindexing of an application, or a subset of clusters, possibly on a subset of documents. */ private HttpResponse reindex(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); List<String> clusterNames = Optional.ofNullable(request.getProperty("clusterId")).stream() .flatMap(clusters -> Stream.of(clusters.split(","))) .filter(cluster -> ! cluster.isBlank()) .toList(); List<String> documentTypes = Optional.ofNullable(request.getProperty("documentType")).stream() .flatMap(types -> Stream.of(types.split(","))) .filter(type -> ! type.isBlank()) .toList(); Double speed = request.hasProperty("speed") ? Double.parseDouble(request.getProperty("speed")) : null; boolean indexedOnly = request.getBooleanProperty("indexedOnly"); controller.applications().reindex(id, zone, clusterNames, documentTypes, indexedOnly, speed); return new MessageResponse("Requested reindexing of " + id + " in " + zone + (clusterNames.isEmpty() ? "" : ", on clusters " + String.join(", ", clusterNames)) + (documentTypes.isEmpty() ? "" : ", for types " + String.join(", ", documentTypes)) + (indexedOnly ? ", for indexed types" : "") + (speed != null ? ", with speed " + speed : "")); } /** Gets reindexing status of an application in a zone. */ private HttpResponse getReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); ApplicationReindexing reindexing = controller.applications().applicationReindexing(id, zone); Slime slime = new Slime(); Cursor root = slime.setObject(); root.setBool("enabled", reindexing.enabled()); Cursor clustersArray = root.setArray("clusters"); reindexing.clusters().entrySet().stream().sorted(comparingByKey()) .forEach(cluster -> { Cursor clusterObject = clustersArray.addObject(); clusterObject.setString("name", cluster.getKey()); Cursor pendingArray = clusterObject.setArray("pending"); cluster.getValue().pending().entrySet().stream().sorted(comparingByKey()) .forEach(pending -> { Cursor pendingObject = pendingArray.addObject(); pendingObject.setString("type", pending.getKey()); pendingObject.setLong("requiredGeneration", pending.getValue()); }); Cursor readyArray = clusterObject.setArray("ready"); cluster.getValue().ready().entrySet().stream().sorted(comparingByKey()) .forEach(ready -> { Cursor readyObject = readyArray.addObject(); readyObject.setString("type", ready.getKey()); setStatus(readyObject, ready.getValue()); }); }); return new SlimeJsonResponse(slime); } void setStatus(Cursor statusObject, ApplicationReindexing.Status status) { status.readyAt().ifPresent(readyAt -> statusObject.setLong("readyAtMillis", readyAt.toEpochMilli())); status.startedAt().ifPresent(startedAt -> statusObject.setLong("startedAtMillis", startedAt.toEpochMilli())); status.endedAt().ifPresent(endedAt -> statusObject.setLong("endedAtMillis", endedAt.toEpochMilli())); status.state().map(ApplicationApiHandler::toString).ifPresent(state -> statusObject.setString("state", state)); status.message().ifPresent(message -> statusObject.setString("message", message)); status.progress().ifPresent(progress -> statusObject.setDouble("progress", progress)); status.speed().ifPresent(speed -> statusObject.setDouble("speed", speed)); } private static String toString(ApplicationReindexing.State state) { return switch (state) { case PENDING: yield "pending"; case RUNNING: yield "running"; case FAILED: yield "failed"; case SUCCESSFUL: yield "successful"; }; } /** Enables reindexing of an application in a zone. */ private HttpResponse enableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); controller.applications().enableReindexing(id, zone); return new MessageResponse("Enabled reindexing of " + id + " in " + zone); } /** Disables reindexing of an application in a zone. */ private HttpResponse disableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); controller.applications().disableReindexing(id, zone); return new MessageResponse("Disabled reindexing of " + id + " in " + zone); } /** Schedule restart of deployment, or specific host in a deployment */ private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); RestartFilter restartFilter = new RestartFilter() .withHostName(Optional.ofNullable(request.getProperty("hostname")).map(HostName::of)) .withClusterType(Optional.ofNullable(request.getProperty("clusterType")).map(ClusterSpec.Type::from)) .withClusterId(Optional.ofNullable(request.getProperty("clusterId")).map(ClusterSpec.Id::from)); controller.applications().restart(deploymentId, restartFilter); return new MessageResponse("Requested restart of " + deploymentId); } /** Set suspension status of the given deployment. */ private HttpResponse suspend(String tenantName, String applicationName, String instanceName, String environment, String region, boolean suspend) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); controller.applications().setSuspension(deploymentId, suspend); return new MessageResponse((suspend ? "Suspended" : "Resumed") + " orchestration of " + deploymentId); } private HttpResponse jobDeploy(ApplicationId id, JobType type, HttpRequest request) { if ( ! type.environment().isManuallyDeployed() && ! isOperator(request)) throw new IllegalArgumentException("Direct deployments are only allowed to manually deployed environments."); Map<String, byte[]> dataParts = parseDataParts(request); if ( ! dataParts.containsKey("applicationZip")) throw new IllegalArgumentException("Missing required form part 'applicationZip'"); ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP)); controller.applications().verifyApplicationIdentityConfiguration(id.tenant(), Optional.of(id.instance()), Optional.of(type.zone()), applicationPackage, Optional.of(requireUserPrincipal(request))); Optional<Version> version = Optional.ofNullable(dataParts.get("deployOptions")) .map(json -> SlimeUtils.jsonToSlime(json).get()) .flatMap(options -> optional("vespaVersion", options)) .map(Version::fromString); ensureApplicationExists(TenantAndApplicationId.from(id), request); boolean dryRun = Optional.ofNullable(dataParts.get("deployOptions")) .map(json -> SlimeUtils.jsonToSlime(json).get()) .flatMap(options -> optional("dryRun", options)) .map(Boolean::valueOf) .orElse(false); controller.jobController().deploy(id, type, version, applicationPackage, dryRun); RunId runId = controller.jobController().last(id, type).get().id(); Slime slime = new Slime(); Cursor rootObject = slime.setObject(); rootObject.setString("message", "Deployment started in " + runId + ". This may take about 15 minutes the first time."); rootObject.setLong("run", runId.number()); return new SlimeJsonResponse(slime); } private HttpResponse deploy(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); Map<String, byte[]> dataParts = parseDataParts(request); if ( ! dataParts.containsKey("deployOptions")) return ErrorResponse.badRequest("Missing required form part 'deployOptions'"); Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get(); Optional<SystemApplication> systemApplication = SystemApplication.matching(applicationId); if (systemApplication.isEmpty() || !systemApplication.get().hasApplicationPackage()) { return ErrorResponse.badRequest("Deployment of " + applicationId + " is not supported through this API"); } String vespaVersion = deployOptions.field("vespaVersion").asString(); if (!vespaVersion.isEmpty() && !vespaVersion.equals("null")) { return ErrorResponse.badRequest("Specifying version for " + applicationId + " is not permitted"); } VersionStatus versionStatus = controller.readVersionStatus(); if (versionStatus.isUpgrading()) { throw new IllegalArgumentException("Deployment of system applications during a system upgrade is not allowed"); } Optional<VespaVersion> systemVersion = versionStatus.systemVersion(); if (systemVersion.isEmpty()) { throw new IllegalArgumentException("Deployment of system applications is not permitted until system version is determined"); } ActivateResult result = controller.applications() .deploySystemApplicationPackage(systemApplication.get(), zone, systemVersion.get().versionNumber()); return new SlimeJsonResponse(toSlime(result)); } private HttpResponse deleteTenant(String tenantName, HttpRequest request) { boolean forget = request.getBooleanProperty("forget"); if (forget && !isOperator(request)) return ErrorResponse.forbidden("Only operators can forget a tenant"); controller.tenants().delete(TenantName.from(tenantName), Optional.of(accessControlRequests.credentials(TenantName.from(tenantName), toSlime(request.getData()).get(), request.getJDiscRequest())), forget); return new MessageResponse("Deleted tenant " + tenantName); } private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) { TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest()); controller.applications().deleteApplication(id, credentials); return new MessageResponse("Deleted application " + id); } private HttpResponse deleteInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) { TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); controller.applications().deleteInstance(id.instance(instanceName)); if (controller.applications().requireApplication(id).instances().isEmpty()) { Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest()); controller.applications().deleteApplication(id, credentials); } return new MessageResponse("Deleted instance " + id.instance(instanceName).toFullString()); } private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId id = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); controller.applications().deactivate(id.applicationId(), id.zoneId()); controller.jobController().last(id.applicationId(), JobType.deploymentTo(id.zoneId())) .filter(run -> ! run.hasEnded()) .ifPresent(last -> controller.jobController().abort(last.id(), "deployment deactivated by " + request.getJDiscRequest().getUserPrincipal().getName())); return new MessageResponse("Deactivated " + id); } /** Returns test config for indicated job, with production deployments of the default instance if the given is not in deployment spec. */ private HttpResponse testConfig(ApplicationId id, JobType type) { Application application = controller.applications().requireApplication(TenantAndApplicationId.from(id)); ApplicationId prodInstanceId = application.deploymentSpec().instance(id.instance()).isPresent() ? id : TenantAndApplicationId.from(id).defaultInstance(); HashSet<DeploymentId> deployments = controller.applications() .getInstance(prodInstanceId).stream() .flatMap(instance -> instance.productionDeployments().keySet().stream()) .map(zone -> new DeploymentId(prodInstanceId, zone)) .collect(Collectors.toCollection(HashSet::new)); ApplicationId toTest = type.isProduction() ? prodInstanceId : id; if ( ! type.isProduction()) deployments.add(new DeploymentId(toTest, type.zone())); Deployment deployment = application.require(toTest.instance()).deployments().get(type.zone()); if (deployment == null) throw new NotExistsException(toTest + " is not deployed in " + type.zone()); return new SlimeJsonResponse(testConfigSerializer.configSlime(id, type, false, deployment.version(), deployment.revision(), deployment.at(), controller.routing().readTestRunnerEndpointsOf(deployments), controller.applications().reachableContentClustersByZone(deployments))); } private HttpResponse requestServiceDump(String tenant, String application, String instance, String environment, String region, String hostname, HttpRequest request) { NodeRepository nodeRepository = controller.serviceRegistry().configServer().nodeRepository(); ZoneId zone = requireZone(environment, region); Slime report = getReport(nodeRepository, zone, tenant, application, instance, hostname).orElse(null); if (report != null) { Cursor cursor = report.get(); boolean force = request.getBooleanProperty("force"); if (!force && cursor.field("failedAt").asLong() == 0 && cursor.field("completedAt").asLong() == 0) { throw new IllegalArgumentException("Service dump already in progress for " + cursor.field("configId").asString()); } } Slime requestPayload; try { requestPayload = SlimeUtils.jsonToSlimeOrThrow(request.getData().readAllBytes()); } catch (Exception e) { throw new IllegalArgumentException("Missing or invalid JSON in request content", e); } Cursor requestPayloadCursor = requestPayload.get(); String configId = requestPayloadCursor.field("configId").asString(); long expiresAt = requestPayloadCursor.field("expiresAt").asLong(); if (configId.isEmpty()) { throw new IllegalArgumentException("Missing configId"); } Cursor artifactsCursor = requestPayloadCursor.field("artifacts"); int artifactEntries = artifactsCursor.entries(); if (artifactEntries == 0) { throw new IllegalArgumentException("Missing or empty 'artifacts'"); } Slime dumpRequest = new Slime(); Cursor dumpRequestCursor = dumpRequest.setObject(); dumpRequestCursor.setLong("createdMillis", controller.clock().millis()); dumpRequestCursor.setString("configId", configId); Cursor dumpRequestArtifactsCursor = dumpRequestCursor.setArray("artifacts"); for (int i = 0; i < artifactEntries; i++) { dumpRequestArtifactsCursor.addString(artifactsCursor.entry(i).asString()); } if (expiresAt > 0) { dumpRequestCursor.setLong("expiresAt", expiresAt); } Cursor dumpOptionsCursor = requestPayloadCursor.field("dumpOptions"); if (dumpOptionsCursor.children() > 0) { SlimeUtils.copyObject(dumpOptionsCursor, dumpRequestCursor.setObject("dumpOptions")); } var reportsUpdate = Map.of("serviceDump", new String(uncheck(() -> SlimeUtils.toJsonBytes(dumpRequest)))); nodeRepository.updateReports(zone, hostname, reportsUpdate); boolean wait = request.getBooleanProperty("wait"); if (!wait) return new MessageResponse("Request created"); return waitForServiceDumpResult(nodeRepository, zone, tenant, application, instance, hostname); } private HttpResponse getServiceDump(String tenant, String application, String instance, String environment, String region, String hostname, HttpRequest request) { NodeRepository nodeRepository = controller.serviceRegistry().configServer().nodeRepository(); ZoneId zone = requireZone(environment, region); Slime report = getReport(nodeRepository, zone, tenant, application, instance, hostname) .orElseThrow(() -> new NotExistsException("No service dump for node " + hostname)); return new SlimeJsonResponse(report); } private HttpResponse waitForServiceDumpResult(NodeRepository nodeRepository, ZoneId zone, String tenant, String application, String instance, String hostname) { int pollInterval = 2; Slime report; while (true) { report = getReport(nodeRepository, zone, tenant, application, instance, hostname).get(); Cursor cursor = report.get(); if (cursor.field("completedAt").asLong() > 0 || cursor.field("failedAt").asLong() > 0) { break; } final Slime copyForLambda = report; log.fine(() -> uncheck(() -> new String(SlimeUtils.toJsonBytes(copyForLambda)))); log.fine("Sleeping " + pollInterval + " seconds before checking report status again"); controller.sleeper().sleep(Duration.ofSeconds(pollInterval)); } return new SlimeJsonResponse(report); } private Optional<Slime> getReport(NodeRepository nodeRepository, ZoneId zone, String tenant, String application, String instance, String hostname) { Node node; try { node = nodeRepository.getNode(zone, hostname); } catch (IllegalArgumentException e) { throw new NotExistsException(hostname); } ApplicationId app = ApplicationId.from(tenant, application, instance); ApplicationId owner = node.owner().orElseThrow(() -> new IllegalArgumentException("Node has no owner")); if (!app.equals(owner)) { throw new IllegalArgumentException("Node is not owned by " + app.toFullString()); } String json = node.reports().get("serviceDump"); if (json == null) return Optional.empty(); return Optional.of(SlimeUtils.jsonToSlimeOrThrow(json)); } private static SourceRevision toSourceRevision(Inspector object) { if (!object.field("repository").valid() || !object.field("branch").valid() || !object.field("commit").valid()) { throw new IllegalArgumentException("Must specify \"repository\", \"branch\", and \"commit\"."); } return new SourceRevision(object.field("repository").asString(), object.field("branch").asString(), object.field("commit").asString()); } private Tenant getTenantOrThrow(String tenantName) { return controller.tenants().get(tenantName) .orElseThrow(() -> new NotExistsException(new TenantId(tenantName))); } private void toSlime(Cursor object, Tenant tenant, List<Application> applications, HttpRequest request) { object.setString("tenant", tenant.name().value()); object.setString("type", tenantType(tenant)); switch (tenant.type()) { case athenz: AthenzTenant athenzTenant = (AthenzTenant) tenant; object.setString("athensDomain", athenzTenant.domain().getName()); object.setString("property", athenzTenant.property().id()); athenzTenant.propertyId().ifPresent(id -> object.setString("propertyId", id.toString())); athenzTenant.contact().ifPresent(c -> { object.setString("propertyUrl", c.propertyUrl().toString()); object.setString("contactsUrl", c.url().toString()); object.setString("issueCreationUrl", c.issueTrackerUrl().toString()); Cursor contactsArray = object.setArray("contacts"); c.persons().forEach(persons -> { Cursor personArray = contactsArray.addArray(); persons.forEach(personArray::addString); }); }); break; case cloud: { CloudTenant cloudTenant = (CloudTenant) tenant; cloudTenant.creator().ifPresent(creator -> object.setString("creator", creator.getName())); Cursor pemDeveloperKeysArray = object.setArray("pemDeveloperKeys"); cloudTenant.developerKeys().forEach((key, user) -> { Cursor keyObject = pemDeveloperKeysArray.addObject(); keyObject.setString("key", KeyUtils.toPem(key)); keyObject.setString("user", user.getName()); }); toSlime(object, cloudTenant.tenantSecretStores()); toSlime(object.setObject("integrations").setObject("aws"), controller.serviceRegistry().roleService().getTenantRole(tenant.name()), cloudTenant.tenantSecretStores()); try { var usedQuota = applications.stream() .map(Application::quotaUsage) .reduce(QuotaUsage.none, QuotaUsage::add); toSlime(object.setObject("quota"), usedQuota); } catch (Exception e) { log.warning(String.format("Failed to get quota for tenant %s: %s", tenant.name(), Exceptions.toMessageString(e))); } cloudTenant.archiveAccess().awsRole().ifPresent(role -> object.setString("archiveAccessRole", role)); toSlime(cloudTenant.archiveAccess(), object.setObject("archiveAccess")); break; } case deleted: break; default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'."); } Cursor applicationArray = object.setArray("applications"); for (Application application : applications) { DeploymentStatus status = null; Collection<Instance> instances = showOnlyProductionInstances(request) ? application.productionInstances().values() : application.instances().values(); if (instances.isEmpty() && !showOnlyActiveInstances(request)) toSlime(application.id(), applicationArray.addObject(), request); for (Instance instance : instances) { if (showOnlyActiveInstances(request) && instance.deployments().isEmpty()) continue; if (recurseOverApplications(request)) { if (status == null) status = controller.jobController().deploymentStatus(application); toSlime(applicationArray.addObject(), instance, status, request); } else { toSlime(instance.id(), applicationArray.addObject(), request); } } } tenantMetaDataToSlime(tenant, applications, object.setObject("metaData")); } private void toSlime(ArchiveAccess archiveAccess, Cursor object) { archiveAccess.awsRole().ifPresent(role -> object.setString("awsRole", role)); archiveAccess.gcpMember().ifPresent(member -> object.setString("gcpMember", member)); } private void toSlime(Cursor object, QuotaUsage usage) { object.setDouble("budgetUsed", usage.rate()); } private void toSlime(ClusterResources resources, Cursor object) { object.setLong("nodes", resources.nodes()); object.setLong("groups", resources.groups()); toSlime(resources.nodeResources(), object.setObject("nodeResources")); double cost = ResourceMeterMaintainer.cost(resources, controller.serviceRegistry().zoneRegistry().system()); object.setDouble("cost", cost); } private void utilizationToSlime(Cluster.Utilization utilization, Cursor utilizationObject) { utilizationObject.setDouble("cpu", utilization.cpu()); utilizationObject.setDouble("idealCpu", utilization.idealCpu()); utilizationObject.setDouble("currentCpu", utilization.currentCpu()); utilizationObject.setDouble("peakCpu", utilization.peakCpu()); utilizationObject.setDouble("memory", utilization.memory()); utilizationObject.setDouble("idealMemory", utilization.idealMemory()); utilizationObject.setDouble("currentMemory", utilization.currentMemory()); utilizationObject.setDouble("peakMemory", utilization.peakMemory()); utilizationObject.setDouble("disk", utilization.disk()); utilizationObject.setDouble("idealDisk", utilization.idealDisk()); utilizationObject.setDouble("currentDisk", utilization.currentDisk()); utilizationObject.setDouble("peakDisk", utilization.peakDisk()); } private void scalingEventsToSlime(List<Cluster.ScalingEvent> scalingEvents, Cursor scalingEventsArray) { for (Cluster.ScalingEvent scalingEvent : scalingEvents) { Cursor scalingEventObject = scalingEventsArray.addObject(); toSlime(scalingEvent.from(), scalingEventObject.setObject("from")); toSlime(scalingEvent.to(), scalingEventObject.setObject("to")); scalingEventObject.setLong("at", scalingEvent.at().toEpochMilli()); scalingEvent.completion().ifPresent(completion -> scalingEventObject.setLong("completion", completion.toEpochMilli())); } } private void toSlime(NodeResources resources, Cursor object) { object.setDouble("vcpu", resources.vcpu()); object.setDouble("memoryGb", resources.memoryGb()); object.setDouble("diskGb", resources.diskGb()); object.setDouble("bandwidthGbps", resources.bandwidthGbps()); object.setString("diskSpeed", valueOf(resources.diskSpeed())); object.setString("storageType", valueOf(resources.storageType())); } private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) { object.setString("tenant", tenant.name().value()); Cursor metaData = object.setObject("metaData"); metaData.setString("type", tenantType(tenant)); switch (tenant.type()) { case athenz: AthenzTenant athenzTenant = (AthenzTenant) tenant; metaData.setString("athensDomain", athenzTenant.domain().getName()); metaData.setString("property", athenzTenant.property().id()); break; case cloud: break; case deleted: break; default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'."); } object.setString("url", withPath("/application/v4/tenant/" + tenant.name().value(), requestURI).toString()); } private void tenantMetaDataToSlime(Tenant tenant, List<Application> applications, Cursor object) { Optional<Instant> lastDev = applications.stream() .flatMap(application -> application.instances().values().stream()) .flatMap(instance -> instance.deployments().values().stream() .filter(deployment -> deployment.zone().environment() == Environment.dev) .map(deployment -> controller.jobController().lastDeploymentStart(instance.id(), deployment))) .max(Comparator.naturalOrder()) .or(() -> applications.stream() .flatMap(application -> application.instances().values().stream()) .flatMap(instance -> JobType.allIn(controller.zoneRegistry()).stream() .filter(job -> job.environment() == Environment.dev) .flatMap(jobType -> controller.jobController().last(instance.id(), jobType).stream())) .map(Run::start) .max(Comparator.naturalOrder())); Optional<Instant> lastSubmission = applications.stream() .flatMap(app -> app.revisions().last().flatMap(ApplicationVersion::buildTime).stream()) .max(Comparator.naturalOrder()); object.setLong("createdAtMillis", tenant.createdAt().toEpochMilli()); if (tenant.type() == Tenant.Type.deleted) object.setLong("deletedAtMillis", ((DeletedTenant) tenant).deletedAt().toEpochMilli()); lastDev.ifPresent(instant -> object.setLong("lastDeploymentToDevMillis", instant.toEpochMilli())); lastSubmission.ifPresent(instant -> object.setLong("lastSubmissionToProdMillis", instant.toEpochMilli())); tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.user) .ifPresent(instant -> object.setLong("lastLoginByUserMillis", instant.toEpochMilli())); tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.developer) .ifPresent(instant -> object.setLong("lastLoginByDeveloperMillis", instant.toEpochMilli())); tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.administrator) .ifPresent(instant -> object.setLong("lastLoginByAdministratorMillis", instant.toEpochMilli())); } /** Returns a copy of the given URI with the host and port from the given URI, the path set to the given path and the query set to given query*/ private URI withPathAndQuery(String newPath, String newQuery, URI uri) { try { return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, newQuery, null); } catch (URISyntaxException e) { throw new RuntimeException("Will not happen", e); } } /** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */ private URI withPath(String newPath, URI uri) { return withPathAndQuery(newPath, null, uri); } private String toPath(DeploymentId id) { return path("/application", "v4", "tenant", id.applicationId().tenant(), "application", id.applicationId().application(), "instance", id.applicationId().instance(), "environment", id.zoneId().environment(), "region", id.zoneId().region()); } private long asLong(String valueOrNull, long defaultWhenNull) { if (valueOrNull == null) return defaultWhenNull; try { return Long.parseLong(valueOrNull); } catch (NumberFormatException e) { throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'"); } } private Slime toSlime(InputStream jsonStream) { try { byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000); return SlimeUtils.jsonToSlime(jsonBytes); } catch (IOException e) { throw new RuntimeException(); } } private static Principal requireUserPrincipal(HttpRequest request) { Principal principal = request.getJDiscRequest().getUserPrincipal(); if (principal == null) throw new IllegalArgumentException("Expected a user principal"); return principal; } private Inspector mandatory(String key, Inspector object) { if ( ! object.field(key).valid()) throw new IllegalArgumentException("'" + key + "' is missing"); return object.field(key); } private Optional<String> optional(String key, Inspector object) { return SlimeUtils.optionalString(object.field(key)); } private static String path(Object... elements) { return Joiner.on("/").join(elements); } private void toSlime(TenantAndApplicationId id, Cursor object, HttpRequest request) { object.setString("tenant", id.tenant().value()); object.setString("application", id.application().value()); object.setString("url", withPath("/application/v4" + "/tenant/" + id.tenant().value() + "/application/" + id.application().value(), request.getUri()).toString()); } private void toSlime(ApplicationId id, Cursor object, HttpRequest request) { object.setString("tenant", id.tenant().value()); object.setString("application", id.application().value()); object.setString("instance", id.instance().value()); object.setString("url", withPath("/application/v4" + "/tenant/" + id.tenant().value() + "/application/" + id.application().value() + "/instance/" + id.instance().value(), request.getUri()).toString()); } private Slime toSlime(ActivateResult result) { Slime slime = new Slime(); Cursor object = slime.setObject(); object.setString("revisionId", result.revisionId().id()); object.setLong("applicationZipSize", result.applicationZipSizeBytes()); Cursor logArray = object.setArray("prepareMessages"); if (result.prepareResponse().log != null) { for (Log logMessage : result.prepareResponse().log) { Cursor logObject = logArray.addObject(); logObject.setLong("time", logMessage.time); logObject.setString("level", logMessage.level); logObject.setString("message", logMessage.message); } } Cursor changeObject = object.setObject("configChangeActions"); Cursor restartActionsArray = changeObject.setArray("restart"); for (RestartAction restartAction : result.prepareResponse().configChangeActions.restartActions) { Cursor restartActionObject = restartActionsArray.addObject(); restartActionObject.setString("clusterName", restartAction.clusterName); restartActionObject.setString("clusterType", restartAction.clusterType); restartActionObject.setString("serviceType", restartAction.serviceType); serviceInfosToSlime(restartAction.services, restartActionObject.setArray("services")); stringsToSlime(restartAction.messages, restartActionObject.setArray("messages")); } Cursor refeedActionsArray = changeObject.setArray("refeed"); for (RefeedAction refeedAction : result.prepareResponse().configChangeActions.refeedActions) { Cursor refeedActionObject = refeedActionsArray.addObject(); refeedActionObject.setString("name", refeedAction.name); refeedActionObject.setString("documentType", refeedAction.documentType); refeedActionObject.setString("clusterName", refeedAction.clusterName); serviceInfosToSlime(refeedAction.services, refeedActionObject.setArray("services")); stringsToSlime(refeedAction.messages, refeedActionObject.setArray("messages")); } return slime; } private void serviceInfosToSlime(List<ServiceInfo> serviceInfoList, Cursor array) { for (ServiceInfo serviceInfo : serviceInfoList) { Cursor serviceInfoObject = array.addObject(); serviceInfoObject.setString("serviceName", serviceInfo.serviceName); serviceInfoObject.setString("serviceType", serviceInfo.serviceType); serviceInfoObject.setString("configId", serviceInfo.configId); serviceInfoObject.setString("hostName", serviceInfo.hostName); } } private void stringsToSlime(List<String> strings, Cursor array) { for (String string : strings) array.addString(string); } private void toSlime(Cursor object, List<TenantSecretStore> tenantSecretStores) { Cursor secretStore = object.setArray("secretStores"); tenantSecretStores.forEach(store -> { toSlime(secretStore.addObject(), store); }); } private void toSlime(Cursor object, TenantRoles tenantRoles, List<TenantSecretStore> tenantSecretStores) { object.setString("tenantRole", tenantRoles.containerRole()); var stores = object.setArray("accounts"); tenantSecretStores.forEach(secretStore -> { toSlime(stores.addObject(), secretStore); }); } private void toSlime(Cursor object, TenantSecretStore secretStore) { object.setString("name", secretStore.getName()); object.setString("awsId", secretStore.getAwsId()); object.setString("role", secretStore.getRole()); } private String readToString(InputStream stream) { Scanner scanner = new Scanner(stream).useDelimiter("\\A"); if ( ! scanner.hasNext()) return null; return scanner.next(); } private static boolean recurseOverTenants(HttpRequest request) { return recurseOverApplications(request) || "tenant".equals(request.getProperty("recursive")); } private static boolean recurseOverApplications(HttpRequest request) { return recurseOverDeployments(request) || "application".equals(request.getProperty("recursive")); } private static boolean recurseOverDeployments(HttpRequest request) { return ImmutableSet.of("all", "true", "deployment").contains(request.getProperty("recursive")); } private static boolean showOnlyProductionInstances(HttpRequest request) { return "true".equals(request.getProperty("production")); } private static boolean showOnlyActiveInstances(HttpRequest request) { return "true".equals(request.getProperty("activeInstances")); } private static boolean includeDeleted(HttpRequest request) { return "true".equals(request.getProperty("includeDeleted")); } private static String tenantType(Tenant tenant) { return switch (tenant.type()) { case athenz: yield "ATHENS"; case cloud: yield "CLOUD"; case deleted: yield "DELETED"; }; } private static ApplicationId appIdFromPath(Path path) { return ApplicationId.from(path.get("tenant"), path.get("application"), path.get("instance")); } private JobType jobTypeFromPath(Path path) { return JobType.fromJobName(path.get("jobtype"), controller.zoneRegistry()); } private RunId runIdFromPath(Path path) { long number = Long.parseLong(path.get("number")); return new RunId(appIdFromPath(path), jobTypeFromPath(path), number); } private HttpResponse submit(String tenant, String application, HttpRequest request) { Map<String, byte[]> dataParts = parseDataParts(request); Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get(); long projectId = submitOptions.field("projectId").asLong(); projectId = projectId == 0 ? 1 : projectId; Optional<String> repository = optional("repository", submitOptions); Optional<String> branch = optional("branch", submitOptions); Optional<String> commit = optional("commit", submitOptions); Optional<SourceRevision> sourceRevision = repository.isPresent() && branch.isPresent() && commit.isPresent() ? Optional.of(new SourceRevision(repository.get(), branch.get(), commit.get())) : Optional.empty(); Optional<String> sourceUrl = optional("sourceUrl", submitOptions); Optional<String> authorEmail = optional("authorEmail", submitOptions); Optional<String> description = optional("description", submitOptions); int risk = (int) submitOptions.field("risk").asLong(); sourceUrl.map(URI::create).ifPresent(url -> { if (url.getHost() == null || url.getScheme() == null) throw new IllegalArgumentException("Source URL must include scheme and host"); }); ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP), true); byte[] testPackage = dataParts.get(EnvironmentResource.APPLICATION_TEST_ZIP); Submission submission = new Submission(applicationPackage, testPackage, sourceUrl, sourceRevision, authorEmail, description, risk); controller.applications().verifyApplicationIdentityConfiguration(TenantName.from(tenant), Optional.empty(), Optional.empty(), applicationPackage, Optional.of(requireUserPrincipal(request))); TenantAndApplicationId id = TenantAndApplicationId.from(tenant, application); ensureApplicationExists(id, request); return JobControllerApiHandlerHelper.submitResponse(controller.jobController(), id, submission, projectId); } private HttpResponse removeAllProdDeployments(String tenant, String application) { JobControllerApiHandlerHelper.submitResponse(controller.jobController(), TenantAndApplicationId.from(tenant, application), new Submission(ApplicationPackage.deploymentRemoval(), new byte[0], Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), 0), 0); return new MessageResponse("All deployments removed"); } private ZoneId requireZone(String environment, String region) { return requireZone(ZoneId.from(environment, region)); } private ZoneId requireZone(ZoneId zone) { if (zone.environment() == Environment.prod && zone.region().value().equals("controller")) { return zone; } if (!controller.zoneRegistry().hasZone(zone)) { throw new IllegalArgumentException("Zone " + zone + " does not exist in this system"); } return zone; } private static Map<String, byte[]> parseDataParts(HttpRequest request) { String contentHash = request.getHeader("X-Content-Hash"); if (contentHash == null) return new MultipartParser().parse(request); DigestInputStream digester = Signatures.sha256Digester(request.getData()); var dataParts = new MultipartParser().parse(request.getHeader("Content-Type"), digester, request.getUri()); if ( ! Arrays.equals(digester.getMessageDigest().digest(), Base64.getDecoder().decode(contentHash))) throw new IllegalArgumentException("Value of X-Content-Hash header does not match computed content hash"); return dataParts; } private static RotationId findRotationId(Instance instance, Optional<String> endpointId) { if (instance.rotations().isEmpty()) { throw new NotExistsException("global rotation does not exist for " + instance); } if (endpointId.isPresent()) { return instance.rotations().stream() .filter(r -> r.endpointId().id().equals(endpointId.get())) .map(AssignedRotation::rotationId) .findFirst() .orElseThrow(() -> new NotExistsException("endpoint " + endpointId.get() + " does not exist for " + instance)); } else if (instance.rotations().size() > 1) { throw new IllegalArgumentException(instance + " has multiple rotations. Query parameter 'endpointId' must be given"); } return instance.rotations().get(0).rotationId(); } private static String rotationStateString(RotationState state) { return switch (state) { case in: yield "IN"; case out: yield "OUT"; case unknown: yield "UNKNOWN"; }; } private static String endpointScopeString(Endpoint.Scope scope) { return switch (scope) { case weighted: yield "weighted"; case application: yield "application"; case global: yield "global"; case zone: yield "zone"; }; } private static String routingMethodString(RoutingMethod method) { return switch (method) { case exclusive: yield "exclusive"; case sharedLayer4: yield "sharedLayer4"; }; } private static <T> T getAttribute(HttpRequest request, String attributeName, Class<T> cls) { return Optional.ofNullable(request.getJDiscRequest().context().get(attributeName)) .filter(cls::isInstance) .map(cls::cast) .orElseThrow(() -> new IllegalArgumentException("Attribute '" + attributeName + "' was not set on request")); } /** Returns whether given request is by an operator */ private static boolean isOperator(HttpRequest request) { var securityContext = getAttribute(request, SecurityContext.ATTRIBUTE_NAME, SecurityContext.class); return securityContext.roles().stream() .map(Role::definition) .anyMatch(definition -> definition == RoleDefinition.hostedOperator); } private void ensureApplicationExists(TenantAndApplicationId id, HttpRequest request) { if (controller.applications().getApplication(id).isEmpty()) { log.fine("Application does not exist in public, creating: " + id); var credentials = accessControlRequests.credentials(id.tenant(), null /* not used on public */ , request.getJDiscRequest()); controller.applications().createApplication(id, credentials); } } private List<Deployment> sortedDeployments(Collection<Deployment> deployments, DeploymentInstanceSpec spec) { List<ZoneId> productionZones = spec.zones().stream() .filter(z -> z.region().isPresent()) .map(z -> ZoneId.from(z.environment(), z.region().get())) .toList(); return deployments.stream() .sorted(comparingInt(deployment -> productionZones.indexOf(deployment.zone()))) .collect(collectingAndThen(Collectors.toList(), Collections::unmodifiableList)); } }
... or is that unnecessary?
public HttpResponse auditAndHandle(HttpRequest request) { try { Path path = new Path(request.getUri()); return switch (request.getMethod()) { case GET: yield handleGET(path, request); case PUT: yield handlePUT(path, request); case POST: yield handlePOST(path, request); case PATCH: yield handlePATCH(path, request); case DELETE: yield handleDELETE(path, request); case OPTIONS: yield handleOPTIONS(); default: yield ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported"); }; } catch (RestApiException.Forbidden e) { return ErrorResponse.forbidden(Exceptions.toMessageString(e)); } catch (RestApiException.Unauthorized e) { return ErrorResponse.unauthorized(Exceptions.toMessageString(e)); } catch (NotExistsException e) { return ErrorResponse.notFoundError(Exceptions.toMessageString(e)); } catch (IllegalArgumentException e) { return ErrorResponse.badRequest(Exceptions.toMessageString(e)); } catch (ConfigServerException e) { return switch (e.code()) { case NOT_FOUND: yield ErrorResponse.notFoundError(Exceptions.toMessageString(e)); case ACTIVATION_CONFLICT: yield new ErrorResponse(CONFLICT, e.code().name(), Exceptions.toMessageString(e)); case INTERNAL_SERVER_ERROR: yield ErrorResponse.internalServerError(Exceptions.toMessageString(e)); default: yield new ErrorResponse(BAD_REQUEST, e.code().name(), Exceptions.toMessageString(e)); }; } catch (RuntimeException e) { log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e); return ErrorResponse.internalServerError("Unexpected error occurred"); } }
return ErrorResponse.internalServerError("Unexpected error occurred");
public HttpResponse auditAndHandle(HttpRequest request) { try { Path path = new Path(request.getUri()); return switch (request.getMethod()) { case GET: yield handleGET(path, request); case PUT: yield handlePUT(path, request); case POST: yield handlePOST(path, request); case PATCH: yield handlePATCH(path, request); case DELETE: yield handleDELETE(path, request); case OPTIONS: yield handleOPTIONS(); default: yield ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported"); }; } catch (RestApiException.Forbidden e) { return ErrorResponse.forbidden(Exceptions.toMessageString(e)); } catch (RestApiException.Unauthorized e) { return ErrorResponse.unauthorized(Exceptions.toMessageString(e)); } catch (NotExistsException e) { return ErrorResponse.notFoundError(Exceptions.toMessageString(e)); } catch (IllegalArgumentException e) { return ErrorResponse.badRequest(Exceptions.toMessageString(e)); } catch (ConfigServerException e) { return switch (e.code()) { case NOT_FOUND: yield ErrorResponse.notFoundError(Exceptions.toMessageString(e)); case ACTIVATION_CONFLICT: yield new ErrorResponse(CONFLICT, e.code().name(), Exceptions.toMessageString(e)); case INTERNAL_SERVER_ERROR: yield ErrorResponses.logThrowing(request, log, e); default: yield new ErrorResponse(BAD_REQUEST, e.code().name(), Exceptions.toMessageString(e)); }; } catch (RuntimeException e) { return ErrorResponses.logThrowing(request, log, e); } }
class ApplicationApiHandler extends AuditLoggingRequestHandler { private static final ObjectMapper jsonMapper = new ObjectMapper(); private final Controller controller; private final AccessControlRequests accessControlRequests; private final TestConfigSerializer testConfigSerializer; @Inject public ApplicationApiHandler(ThreadedHttpRequestHandler.Context parentCtx, Controller controller, AccessControlRequests accessControlRequests) { super(parentCtx, controller.auditLogger()); this.controller = controller; this.accessControlRequests = accessControlRequests; this.testConfigSerializer = new TestConfigSerializer(controller.system()); } @Override public Duration getTimeout() { return Duration.ofMinutes(20); } @Override private HttpResponse handleGET(Path path, HttpRequest request) { if (path.matches("/application/v4/")) return root(request); if (path.matches("/application/v4/notifications")) return notifications(request, Optional.ofNullable(request.getProperty("tenant")), true); if (path.matches("/application/v4/tenant")) return tenants(request); if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/request/operator")) return accessRequests(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/info")) return tenantInfo(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/info/profile")) return withCloudTenant(path.get("tenant"), this::tenantInfoProfile); if (path.matches("/application/v4/tenant/{tenant}/info/billing")) return withCloudTenant(path.get("tenant"), this::tenantInfoBilling); if (path.matches("/application/v4/tenant/{tenant}/info/contacts")) return withCloudTenant(path.get("tenant"), this::tenantInfoContacts); if (path.matches("/application/v4/tenant/{tenant}/notifications")) return notifications(request, Optional.of(path.get("tenant")), false); if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}/validate")) return validateSecretStore(path.get("tenant"), path.get("name"), request); if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), Optional.empty(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/compile-version")) return compileVersion(path.get("tenant"), path.get("application"), request.getProperty("allowMajor")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return JobControllerApiHandlerHelper.overviewResponse(controller, TenantAndApplicationId.from(path.get("tenant"), path.get("application")), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/package")) return applicationPackage(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/diff/{number}")) return applicationPackageDiff(path.get("tenant"), path.get("application"), path.get("number")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploying(path.get("tenant"), path.get("application"), "default", request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), "default", request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance")) return applications(path.get("tenant"), Optional.of(path.get("application")), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return instance(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.applications().requireApplication(TenantAndApplicationId.from(path.get("tenant"), path.get("application"))), controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)).descendingMap(), Optional.ofNullable(request.getProperty("limit")), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/package")) return devApplicationPackage(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/diff/{number}")) return devApplicationPackageDiff(runIdFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/test-config")) return testConfig(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}/logs")) return JobControllerApiHandlerHelper.vespaLogsResponse(controller.jobController(), runIdFromPath(path), asLong(request.getProperty("from"), 0), request.getBooleanProperty("tester")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return getReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{host}/status/{*}")) return status(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{host}/state/v1/{*}")) return stateV1(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/orchestrator")) return orchestrator(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/content/{*}")) return content(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return supportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/node/{node}/service-dump")) return getServiceDump(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("node"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/scaling")) return scaling(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/metrics")) return metrics(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId"))); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{host}/status/{*}")) return status(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId"))); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePUT(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/request/operator")) return requestSshAccess(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/approve/operator")) return approveAccessRequest(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/managed/operator")) return addManagedAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/info")) return updateTenantInfo(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/info/profile")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoProfile); if (path.matches("/application/v4/tenant/{tenant}/info/billing")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoBilling); if (path.matches("/application/v4/tenant/{tenant}/info/contacts")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoContacts); if (path.matches("/application/v4/tenant/{tenant}/archive-access")) return allowAwsArchiveAccess(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/archive-access/aws")) return allowAwsArchiveAccess(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/archive-access/gcp")) return allowGcpArchiveAccess(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return addSecretStore(path.get("tenant"), path.get("name"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePOST(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/key")) return addDeveloperKey(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), "default", false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), "default", true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), "default", request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return addDeployKey(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return submit(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return createInstance(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploy/{jobtype}")) return jobDeploy(appIdFromPath(path), jobTypeFromPath(path), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/submit")) return submit(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return trigger(appIdFromPath(path), jobTypeFromPath(path), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return pause(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindex")) return reindex(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return enableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return allowSupportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/node/{node}/service-dump")) return requestServiceDump(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("node"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePATCH(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return patchApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return patchApplication(path.get("tenant"), path.get("application"), request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleDELETE(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/managed/operator")) return removeManagedAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/key")) return removeDeveloperKey(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/archive-access")) return removeAwsArchiveAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/archive-access/aws")) return removeAwsArchiveAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/archive-access/gcp")) return removeGcpArchiveAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return deleteSecretStore(path.get("tenant"), path.get("name"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return removeAllProdDeployments(path.get("tenant"), path.get("application")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", "all"); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", path.get("choice")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return removeDeployKey(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit/{build}")) return cancelBuild(path.get("tenant"), path.get("application"), path.get("build")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return deleteInstance(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), "all"); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("choice")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.abortJobResponse(controller.jobController(), request, appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return resume(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return disableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return disallowSupportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleOPTIONS() { EmptyResponse response = new EmptyResponse(); response.headers().put("Allow", "GET,PUT,POST,PATCH,DELETE,OPTIONS"); return response; } private HttpResponse recursiveRoot(HttpRequest request) { Slime slime = new Slime(); Cursor tenantArray = slime.setArray(); List<Application> applications = controller.applications().asList(); for (Tenant tenant : controller.tenants().asList(includeDeleted(request))) toSlime(tenantArray.addObject(), tenant, applications.stream().filter(app -> app.id().tenant().equals(tenant.name())).collect(toList()), request); return new SlimeJsonResponse(slime); } private HttpResponse root(HttpRequest request) { return recurseOverTenants(request) ? recursiveRoot(request) : new ResourceResponse(request, "tenant"); } private HttpResponse tenants(HttpRequest request) { Slime slime = new Slime(); Cursor response = slime.setArray(); for (Tenant tenant : controller.tenants().asList(includeDeleted(request))) tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject()); return new SlimeJsonResponse(slime); } private HttpResponse tenant(String tenantName, HttpRequest request) { return controller.tenants().get(TenantName.from(tenantName), includeDeleted(request)) .map(tenant -> tenant(tenant, request)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist")); } private HttpResponse tenant(Tenant tenant, HttpRequest request) { Slime slime = new Slime(); toSlime(slime.setObject(), tenant, controller.applications().asList(tenant.name()), request); return new SlimeJsonResponse(slime); } private HttpResponse accessRequests(String tenantName, HttpRequest request) { var tenant = TenantName.from(tenantName); if (controller.tenants().require(tenant).type() != Tenant.Type.cloud) return ErrorResponse.badRequest("Can only see access requests for cloud tenants"); var accessControlService = controller.serviceRegistry().accessControlService(); var accessRoleInformation = accessControlService.getAccessRoleInformation(tenant); var managedAccess = accessControlService.getManagedAccess(tenant); var slime = new Slime(); var cursor = slime.setObject(); cursor.setBool("managedAccess", managedAccess); accessRoleInformation.getPendingRequest() .ifPresent(membershipRequest -> { var requestCursor = cursor.setObject("pendingRequest"); requestCursor.setString("requestTime", membershipRequest.getCreationTime()); requestCursor.setString("reason", membershipRequest.getReason()); }); var auditLogCursor = cursor.setArray("auditLog"); accessRoleInformation.getAuditLog() .forEach(auditLogEntry -> { var entryCursor = auditLogCursor.addObject(); entryCursor.setString("created", auditLogEntry.getCreationTime()); entryCursor.setString("approver", auditLogEntry.getApprover()); entryCursor.setString("reason", auditLogEntry.getReason()); entryCursor.setString("status", auditLogEntry.getAction()); }); return new SlimeJsonResponse(slime); } private HttpResponse requestSshAccess(String tenantName, HttpRequest request) { if (!isOperator(request)) { return ErrorResponse.forbidden("Only operators are allowed to request ssh access"); } if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) return ErrorResponse.badRequest("Can only request access for cloud tenants"); controller.serviceRegistry().accessControlService().requestSshAccess(TenantName.from(tenantName)); return new MessageResponse("OK"); } private HttpResponse approveAccessRequest(String tenantName, HttpRequest request) { var tenant = TenantName.from(tenantName); if (controller.tenants().require(tenant).type() != Tenant.Type.cloud) return ErrorResponse.badRequest("Can only see access requests for cloud tenants"); var inspector = toSlime(request.getData()).get(); var expiry = inspector.field("expiry").valid() ? Instant.ofEpochMilli(inspector.field("expiry").asLong()) : Instant.now().plus(1, ChronoUnit.DAYS); var approve = inspector.field("approve").asBool(); controller.serviceRegistry().accessControlService().decideSshAccess(tenant, expiry, OAuthCredentials.fromAuth0RequestContext(request.getJDiscRequest().context()), approve); return new MessageResponse("OK"); } private HttpResponse addManagedAccess(String tenantName) { return setManagedAccess(tenantName, true); } private HttpResponse removeManagedAccess(String tenantName) { return setManagedAccess(tenantName, false); } private HttpResponse setManagedAccess(String tenantName, boolean managedAccess) { var tenant = TenantName.from(tenantName); if (controller.tenants().require(tenant).type() != Tenant.Type.cloud) return ErrorResponse.badRequest("Can only set access privel for cloud tenants"); controller.serviceRegistry().accessControlService().setManagedAccess(tenant, managedAccess); var slime = new Slime(); slime.setObject().setBool("managedAccess", managedAccess); return new SlimeJsonResponse(slime); } private HttpResponse tenantInfo(String tenantName, HttpRequest request) { return controller.tenants().get(TenantName.from(tenantName)) .filter(tenant -> tenant.type() == Tenant.Type.cloud) .map(tenant -> tenantInfo(((CloudTenant)tenant).info(), request)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private HttpResponse withCloudTenant(String tenantName, Function<CloudTenant, SlimeJsonResponse> handler) { return controller.tenants().get(TenantName.from(tenantName)) .filter(tenant -> tenant.type() == Tenant.Type.cloud) .map(tenant -> handler.apply((CloudTenant) tenant)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private SlimeJsonResponse tenantInfo(TenantInfo info, HttpRequest request) { Slime slime = new Slime(); Cursor infoCursor = slime.setObject(); if (!info.isEmpty()) { infoCursor.setString("name", info.name()); infoCursor.setString("email", info.email()); infoCursor.setString("website", info.website()); infoCursor.setString("contactName", info.contact().name()); infoCursor.setString("contactEmail", info.contact().email()); toSlime(info.address(), infoCursor); toSlime(info.billingContact(), infoCursor); toSlime(info.contacts(), infoCursor); } return new SlimeJsonResponse(slime); } private SlimeJsonResponse tenantInfoProfile(CloudTenant cloudTenant) { var slime = new Slime(); var root = slime.setObject(); var info = cloudTenant.info(); if (!info.isEmpty()) { var contact = root.setObject("contact"); contact.setString("name", info.contact().name()); contact.setString("email", info.contact().email()); var tenant = root.setObject("tenant"); tenant.setString("company", info.name()); tenant.setString("website", info.website()); toSlime(info.address(), root); } return new SlimeJsonResponse(slime); } private SlimeJsonResponse withCloudTenant(String tenantName, HttpRequest request, BiFunction<CloudTenant, Inspector, SlimeJsonResponse> handler) { return controller.tenants().get(tenantName) .map(tenant -> handler.apply((CloudTenant) tenant, toSlime(request.getData()).get())) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private SlimeJsonResponse putTenantInfoProfile(CloudTenant cloudTenant, Inspector inspector) { var info = cloudTenant.info(); var mergedContact = TenantContact.empty() .withName(getString(inspector.field("contact").field("name"), info.contact().name())) .withEmail(getString(inspector.field("contact").field("email"), info.contact().email())); var mergedAddress = updateTenantInfoAddress(inspector.field("address"), info.address()); var mergedInfo = info .withName(getString(inspector.field("tenant").field("name"), info.name())) .withWebsite(getString(inspector.field("tenant").field("website"), info.website())) .withContact(mergedContact) .withAddress(mergedAddress); validateMergedTenantInfo(mergedInfo); controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private SlimeJsonResponse tenantInfoBilling(CloudTenant cloudTenant) { var slime = new Slime(); var root = slime.setObject(); var info = cloudTenant.info(); if (!info.isEmpty()) { var billingContact = info.billingContact(); var contact = root.setObject("contact"); contact.setString("name", billingContact.contact().name()); contact.setString("email", billingContact.contact().email()); contact.setString("phone", billingContact.contact().phone()); toSlime(billingContact.address(), root); } return new SlimeJsonResponse(slime); } private SlimeJsonResponse putTenantInfoBilling(CloudTenant cloudTenant, Inspector inspector) { var info = cloudTenant.info(); var contact = info.billingContact().contact(); var address = info.billingContact().address(); var mergedContact = updateTenantInfoContact(inspector.field("contact"), contact); var mergedAddress = updateTenantInfoAddress(inspector.field("address"), info.billingContact().address()); var mergedBilling = info.billingContact() .withContact(mergedContact) .withAddress(mergedAddress); var mergedInfo = info.withBilling(mergedBilling); controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private SlimeJsonResponse tenantInfoContacts(CloudTenant cloudTenant) { var slime = new Slime(); var root = slime.setObject(); toSlime(cloudTenant.info().contacts(), root); return new SlimeJsonResponse(slime); } private SlimeJsonResponse putTenantInfoContacts(CloudTenant cloudTenant, Inspector inspector) { var mergedInfo = cloudTenant.info() .withContacts(updateTenantInfoContacts(inspector.field("contacts"), cloudTenant.info().contacts())); controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private void validateMergedTenantInfo(TenantInfo mergedInfo) { if (mergedInfo.contact().name().isBlank()) { throw new IllegalArgumentException("'contactName' cannot be empty"); } if (mergedInfo.contact().email().isBlank()) { throw new IllegalArgumentException("'contactEmail' cannot be empty"); } if (! mergedInfo.contact().email().contains("@")) { throw new IllegalArgumentException("'contactEmail' needs to be an email address"); } if (! mergedInfo.website().isBlank()) { try { new URL(mergedInfo.website()); } catch (MalformedURLException e) { throw new IllegalArgumentException("'website' needs to be a valid address"); } } } private void toSlime(TenantAddress address, Cursor parentCursor) { if (address.isEmpty()) return; Cursor addressCursor = parentCursor.setObject("address"); addressCursor.setString("addressLines", address.address()); addressCursor.setString("postalCodeOrZip", address.code()); addressCursor.setString("city", address.city()); addressCursor.setString("stateRegionProvince", address.region()); addressCursor.setString("country", address.country()); } private void toSlime(TenantBilling billingContact, Cursor parentCursor) { if (billingContact.isEmpty()) return; Cursor addressCursor = parentCursor.setObject("billingContact"); addressCursor.setString("name", billingContact.contact().name()); addressCursor.setString("email", billingContact.contact().email()); addressCursor.setString("phone", billingContact.contact().phone()); toSlime(billingContact.address(), addressCursor); } private void toSlime(TenantContacts contacts, Cursor parentCursor) { Cursor contactsCursor = parentCursor.setArray("contacts"); contacts.all().forEach(contact -> { Cursor contactCursor = contactsCursor.addObject(); Cursor audiencesArray = contactCursor.setArray("audiences"); contact.audiences().forEach(audience -> audiencesArray.addString(toAudience(audience))); switch (contact.type()) { case EMAIL: var email = (TenantContacts.EmailContact) contact; contactCursor.setString("email", email.email()); return; default: throw new IllegalArgumentException("Serialization for contact type not implemented: " + contact.type()); } }); } private static TenantContacts.Audience fromAudience(String value) { return switch (value) { case "tenant": yield TenantContacts.Audience.TENANT; case "notifications": yield TenantContacts.Audience.NOTIFICATIONS; default: throw new IllegalArgumentException("Unknown contact audience '" + value + "'."); }; } private static String toAudience(TenantContacts.Audience audience) { return switch (audience) { case TENANT: yield "tenant"; case NOTIFICATIONS: yield "notifications"; }; } private HttpResponse updateTenantInfo(String tenantName, HttpRequest request) { return controller.tenants().get(TenantName.from(tenantName)) .filter(tenant -> tenant.type() == Tenant.Type.cloud) .map(tenant -> updateTenantInfo(((CloudTenant)tenant), request)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private String getString(Inspector field, String defaultVale) { return field.valid() ? field.asString().trim() : defaultVale; } private SlimeJsonResponse updateTenantInfo(CloudTenant tenant, HttpRequest request) { TenantInfo oldInfo = tenant.info(); Inspector insp = toSlime(request.getData()).get(); TenantContact mergedContact = TenantContact.empty() .withName(getString(insp.field("contactName"), oldInfo.contact().name())) .withEmail(getString(insp.field("contactEmail"), oldInfo.contact().email())); TenantInfo mergedInfo = TenantInfo.empty() .withName(getString(insp.field("name"), oldInfo.name())) .withEmail(getString(insp.field("email"), oldInfo.email())) .withWebsite(getString(insp.field("website"), oldInfo.website())) .withContact(mergedContact) .withAddress(updateTenantInfoAddress(insp.field("address"), oldInfo.address())) .withBilling(updateTenantInfoBillingContact(insp.field("billingContact"), oldInfo.billingContact())) .withContacts(updateTenantInfoContacts(insp.field("contacts"), oldInfo.contacts())); validateMergedTenantInfo(mergedInfo); controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private TenantAddress updateTenantInfoAddress(Inspector insp, TenantAddress oldAddress) { if (!insp.valid()) return oldAddress; TenantAddress address = TenantAddress.empty() .withCountry(getString(insp.field("country"), oldAddress.country())) .withRegion(getString(insp.field("stateRegionProvince"), oldAddress.region())) .withCity(getString(insp.field("city"), oldAddress.city())) .withCode(getString(insp.field("postalCodeOrZip"), oldAddress.code())) .withAddress(getString(insp.field("addressLines"), oldAddress.address())); List<String> fields = List.of(address.address(), address.code(), address.country(), address.city(), address.region()); if (fields.stream().allMatch(String::isBlank) || fields.stream().noneMatch(String::isBlank)) return address; throw new IllegalArgumentException("All address fields must be set"); } private TenantContact updateTenantInfoContact(Inspector insp, TenantContact oldContact) { if (!insp.valid()) return oldContact; String email = getString(insp.field("email"), oldContact.email()); if (!email.isBlank() && !email.contains("@")) { throw new IllegalArgumentException("'email' needs to be an email address"); } return TenantContact.empty() .withName(getString(insp.field("name"), oldContact.name())) .withEmail(getString(insp.field("email"), oldContact.email())) .withPhone(getString(insp.field("phone"), oldContact.phone())); } private TenantBilling updateTenantInfoBillingContact(Inspector insp, TenantBilling oldContact) { if (!insp.valid()) return oldContact; return TenantBilling.empty() .withContact(updateTenantInfoContact(insp, oldContact.contact())) .withAddress(updateTenantInfoAddress(insp.field("address"), oldContact.address())); } private TenantContacts updateTenantInfoContacts(Inspector insp, TenantContacts oldContacts) { if (!insp.valid()) return oldContacts; List<TenantContacts.EmailContact> contacts = SlimeUtils.entriesStream(insp).map(inspector -> { String email = inspector.field("email").asString().trim(); List<TenantContacts.Audience> audiences = SlimeUtils.entriesStream(inspector.field("audiences")) .map(audience -> fromAudience(audience.asString())) .toList(); if (!email.contains("@")) { throw new IllegalArgumentException("'email' needs to be an email address"); } return new TenantContacts.EmailContact(audiences, email); }).toList(); return new TenantContacts(contacts); } private HttpResponse notifications(HttpRequest request, Optional<String> tenant, boolean includeTenantFieldInResponse) { boolean productionOnly = showOnlyProductionInstances(request); boolean excludeMessages = "true".equals(request.getProperty("excludeMessages")); Slime slime = new Slime(); Cursor notificationsArray = slime.setObject().setArray("notifications"); tenant.map(t -> Stream.of(TenantName.from(t))) .orElseGet(() -> controller.notificationsDb().listTenantsWithNotifications().stream()) .flatMap(tenantName -> controller.notificationsDb().listNotifications(NotificationSource.from(tenantName), productionOnly).stream()) .filter(notification -> propertyEquals(request, "application", ApplicationName::from, notification.source().application()) && propertyEquals(request, "instance", InstanceName::from, notification.source().instance()) && propertyEquals(request, "zone", ZoneId::from, notification.source().zoneId()) && propertyEquals(request, "job", job -> JobType.fromJobName(job, controller.zoneRegistry()), notification.source().jobType()) && propertyEquals(request, "type", Notification.Type::valueOf, Optional.of(notification.type())) && propertyEquals(request, "level", Notification.Level::valueOf, Optional.of(notification.level()))) .forEach(notification -> toSlime(notificationsArray.addObject(), notification, includeTenantFieldInResponse, excludeMessages)); return new SlimeJsonResponse(slime); } private static <T> boolean propertyEquals(HttpRequest request, String property, Function<String, T> mapper, Optional<T> value) { return Optional.ofNullable(request.getProperty(property)) .map(propertyValue -> value.isPresent() && mapper.apply(propertyValue).equals(value.get())) .orElse(true); } private static void toSlime(Cursor cursor, Notification notification, boolean includeTenantFieldInResponse, boolean excludeMessages) { cursor.setLong("at", notification.at().toEpochMilli()); cursor.setString("level", notificationLevelAsString(notification.level())); cursor.setString("type", notificationTypeAsString(notification.type())); if (!excludeMessages) { Cursor messagesArray = cursor.setArray("messages"); notification.messages().forEach(messagesArray::addString); } if (includeTenantFieldInResponse) cursor.setString("tenant", notification.source().tenant().value()); notification.source().application().ifPresent(application -> cursor.setString("application", application.value())); notification.source().instance().ifPresent(instance -> cursor.setString("instance", instance.value())); notification.source().zoneId().ifPresent(zoneId -> { cursor.setString("environment", zoneId.environment().value()); cursor.setString("region", zoneId.region().value()); }); notification.source().clusterId().ifPresent(clusterId -> cursor.setString("clusterId", clusterId.value())); notification.source().jobType().ifPresent(jobType -> cursor.setString("jobName", jobType.jobName())); notification.source().runNumber().ifPresent(runNumber -> cursor.setLong("runNumber", runNumber)); } private static String notificationTypeAsString(Notification.Type type) { return switch (type) { case submission, applicationPackage: yield "applicationPackage"; case testPackage: yield "testPackage"; case deployment: yield "deployment"; case feedBlock: yield "feedBlock"; case reindex: yield "reindex"; }; } private static String notificationLevelAsString(Notification.Level level) { return switch (level) { case info: yield "info"; case warning: yield "warning"; case error: yield "error"; }; } private HttpResponse applications(String tenantName, Optional<String> applicationName, HttpRequest request) { TenantName tenant = TenantName.from(tenantName); getTenantOrThrow(tenantName); List<Application> applications = applicationName.isEmpty() ? controller.applications().asList(tenant) : controller.applications().getApplication(TenantAndApplicationId.from(tenantName, applicationName.get())) .map(List::of) .orElseThrow(() -> new NotExistsException("Application '" + applicationName.get() + "' does not exist")); Slime slime = new Slime(); Cursor applicationArray = slime.setArray(); for (Application application : applications) { Cursor applicationObject = applicationArray.addObject(); applicationObject.setString("tenant", application.id().tenant().value()); applicationObject.setString("application", application.id().application().value()); applicationObject.setString("url", withPath("/application/v4" + "/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value(), request.getUri()).toString()); Cursor instanceArray = applicationObject.setArray("instances"); for (InstanceName instance : showOnlyProductionInstances(request) ? application.productionInstances().keySet() : application.instances().keySet()) { Cursor instanceObject = instanceArray.addObject(); instanceObject.setString("instance", instance.value()); instanceObject.setString("url", withPath("/application/v4" + "/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value() + "/instance/" + instance.value(), request.getUri()).toString()); } } return new SlimeJsonResponse(slime); } private HttpResponse devApplicationPackage(ApplicationId id, JobType type) { ZoneId zone = type.zone(); RevisionId revision = controller.jobController().last(id, type).get().versions().targetRevision(); byte[] applicationPackage = controller.applications().applicationStore().get(new DeploymentId(id, zone), revision); return new ZipResponse(id.toFullString() + "." + zone.value() + ".zip", applicationPackage); } private HttpResponse devApplicationPackageDiff(RunId runId) { DeploymentId deploymentId = new DeploymentId(runId.application(), runId.job().type().zone()); return controller.applications().applicationStore().getDevDiff(deploymentId, runId.number()) .map(ByteArrayResponse::new) .orElseThrow(() -> new NotExistsException("No application package diff found for " + runId)); } private HttpResponse applicationPackage(String tenantName, String applicationName, HttpRequest request) { TenantAndApplicationId tenantAndApplication = TenantAndApplicationId.from(tenantName, applicationName); final long build; String requestedBuild = request.getProperty("build"); if (requestedBuild != null) { if (requestedBuild.equals("latestDeployed")) { build = controller.applications().requireApplication(tenantAndApplication).latestDeployedRevision() .map(RevisionId::number) .orElseThrow(() -> new NotExistsException("no application package has been deployed in production for " + tenantAndApplication)); } else { try { build = Validation.requireAtLeast(Long.parseLong(request.getProperty("build")), "build number", 1L); } catch (NumberFormatException e) { throw new IllegalArgumentException("invalid value for request parameter 'build'", e); } } } else { build = controller.applications().requireApplication(tenantAndApplication).revisions().last() .map(version -> version.id().number()) .orElseThrow(() -> new NotExistsException("no application package has been submitted for " + tenantAndApplication)); } RevisionId revision = RevisionId.forProduction(build); boolean tests = request.getBooleanProperty("tests"); byte[] applicationPackage = tests ? controller.applications().applicationStore().getTester(tenantAndApplication.tenant(), tenantAndApplication.application(), revision) : controller.applications().applicationStore().get(new DeploymentId(tenantAndApplication.defaultInstance(), ZoneId.defaultId()), revision); String filename = tenantAndApplication + (tests ? "-tests" : "-build") + revision.number() + ".zip"; return new ZipResponse(filename, applicationPackage); } private HttpResponse applicationPackageDiff(String tenant, String application, String number) { TenantAndApplicationId tenantAndApplication = TenantAndApplicationId.from(tenant, application); return controller.applications().applicationStore().getDiff(tenantAndApplication.tenant(), tenantAndApplication.application(), Long.parseLong(number)) .map(ByteArrayResponse::new) .orElseThrow(() -> new NotExistsException("No application package diff found for '" + tenantAndApplication + "' with build number " + number)); } private HttpResponse application(String tenantName, String applicationName, HttpRequest request) { Slime slime = new Slime(); toSlime(slime.setObject(), getApplication(tenantName, applicationName), request); return new SlimeJsonResponse(slime); } private HttpResponse compileVersion(String tenantName, String applicationName, String allowMajorParam) { Slime slime = new Slime(); OptionalInt allowMajor = OptionalInt.empty(); if (allowMajorParam != null) { try { allowMajor = OptionalInt.of(Integer.parseInt(allowMajorParam)); } catch (NumberFormatException e) { throw new IllegalArgumentException("Invalid major version '" + allowMajorParam + "'", e); } } Version compileVersion = controller.applications().compileVersion(TenantAndApplicationId.from(tenantName, applicationName), allowMajor); slime.setObject().setString("compileVersion", compileVersion.toFullString()); return new SlimeJsonResponse(slime); } private HttpResponse instance(String tenantName, String applicationName, String instanceName, HttpRequest request) { Slime slime = new Slime(); toSlime(slime.setObject(), getInstance(tenantName, applicationName, instanceName), controller.jobController().deploymentStatus(getApplication(tenantName, applicationName)), request); return new SlimeJsonResponse(slime); } private HttpResponse addDeveloperKey(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); Principal user = request.getJDiscRequest().getUserPrincipal(); String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString(); PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey); Slime root = new Slime(); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> { tenant = tenant.withDeveloperKey(developerKey, user); toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys()); controller.tenants().store(tenant); }); return new SlimeJsonResponse(root); } private HttpResponse validateSecretStore(String tenantName, String secretStoreName, HttpRequest request) { var awsRegion = request.getProperty("aws-region"); var parameterName = request.getProperty("parameter-name"); var applicationId = ApplicationId.fromFullString(request.getProperty("application-id")); var zoneId = requireZone(ZoneId.from(request.getProperty("zone"))); var deploymentId = new DeploymentId(applicationId, zoneId); var tenant = controller.tenants().require(applicationId.tenant(), CloudTenant.class); var tenantSecretStore = tenant.tenantSecretStores() .stream() .filter(secretStore -> secretStore.getName().equals(secretStoreName)) .findFirst(); if (tenantSecretStore.isEmpty()) return ErrorResponse.notFoundError("No secret store '" + secretStoreName + "' configured for tenant '" + tenantName + "'"); var response = controller.serviceRegistry().configServer().validateSecretStore(deploymentId, tenantSecretStore.get(), awsRegion, parameterName); try { var responseRoot = new Slime(); var responseCursor = responseRoot.setObject(); responseCursor.setString("target", deploymentId.toString()); var responseResultCursor = responseCursor.setObject("result"); var responseSlime = SlimeUtils.jsonToSlime(response); SlimeUtils.copyObject(responseSlime.get(), responseResultCursor); return new SlimeJsonResponse(responseRoot); } catch (JsonParseException e) { return ErrorResponse.internalServerError(response); } } private HttpResponse removeDeveloperKey(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString(); PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey); Principal user = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class).developerKeys().get(developerKey); Slime root = new Slime(); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> { tenant = tenant.withoutDeveloperKey(developerKey); toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys()); controller.tenants().store(tenant); }); return new SlimeJsonResponse(root); } private void toSlime(Cursor keysArray, Map<PublicKey, ? extends Principal> keys) { keys.forEach((key, principal) -> { Cursor keyObject = keysArray.addObject(); keyObject.setString("key", KeyUtils.toPem(key)); keyObject.setString("user", principal.getName()); }); } private HttpResponse addDeployKey(String tenantName, String applicationName, HttpRequest request) { String pemDeployKey = toSlime(request.getData()).get().field("key").asString(); PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey); Slime root = new Slime(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> { application = application.withDeployKey(deployKey); application.get().deployKeys().stream() .map(KeyUtils::toPem) .forEach(root.setObject().setArray("keys")::addString); controller.applications().store(application); }); return new SlimeJsonResponse(root); } private HttpResponse removeDeployKey(String tenantName, String applicationName, HttpRequest request) { String pemDeployKey = toSlime(request.getData()).get().field("key").asString(); PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey); Slime root = new Slime(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> { application = application.withoutDeployKey(deployKey); application.get().deployKeys().stream() .map(KeyUtils::toPem) .forEach(root.setObject().setArray("keys")::addString); controller.applications().store(application); }); return new SlimeJsonResponse(root); } private HttpResponse addSecretStore(String tenantName, String name, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); var data = toSlime(request.getData()).get(); var awsId = mandatory("awsId", data).asString(); var externalId = mandatory("externalId", data).asString(); var role = mandatory("role", data).asString(); var tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class); var tenantSecretStore = new TenantSecretStore(name, awsId, role); if (!tenantSecretStore.isValid()) { return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is invalid"); } if (tenant.tenantSecretStores().contains(tenantSecretStore)) { return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is already configured"); } controller.serviceRegistry().roleService().createTenantPolicy(TenantName.from(tenantName), name, awsId, role); controller.serviceRegistry().tenantSecretService().addSecretStore(tenant.name(), tenantSecretStore, externalId); controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withSecretStore(tenantSecretStore); controller.tenants().store(lockedTenant); }); tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class); var slime = new Slime(); toSlime(slime.setObject(), tenant.tenantSecretStores()); return new SlimeJsonResponse(slime); } private HttpResponse deleteSecretStore(String tenantName, String name, HttpRequest request) { var tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class); var optionalSecretStore = tenant.tenantSecretStores().stream() .filter(secretStore -> secretStore.getName().equals(name)) .findFirst(); if (optionalSecretStore.isEmpty()) return ErrorResponse.notFoundError("Could not delete secret store '" + name + "': Secret store not found"); var tenantSecretStore = optionalSecretStore.get(); controller.serviceRegistry().tenantSecretService().deleteSecretStore(tenant.name(), tenantSecretStore); controller.serviceRegistry().roleService().deleteTenantPolicy(tenant.name(), tenantSecretStore.getName(), tenantSecretStore.getRole()); controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withoutSecretStore(tenantSecretStore); controller.tenants().store(lockedTenant); }); tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class); var slime = new Slime(); toSlime(slime.setObject(), tenant.tenantSecretStores()); return new SlimeJsonResponse(slime); } private HttpResponse allowAwsArchiveAccess(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); var data = toSlime(request.getData()).get(); var role = mandatory("role", data).asString(); if (role.isBlank()) { return ErrorResponse.badRequest("AWS archive access role can't be whitespace only"); } controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> { var access = lockedTenant.get().archiveAccess(); lockedTenant = lockedTenant.withArchiveAccess(access.withAWSRole(role)); controller.tenants().store(lockedTenant); }); return new MessageResponse("AWS archive access role set to '" + role + "' for tenant " + tenantName + "."); } private HttpResponse removeAwsArchiveAccess(String tenantName) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> { var access = lockedTenant.get().archiveAccess(); lockedTenant = lockedTenant.withArchiveAccess(access.removeAWSRole()); controller.tenants().store(lockedTenant); }); return new MessageResponse("AWS archive access role removed for tenant " + tenantName + "."); } private HttpResponse allowGcpArchiveAccess(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); var data = toSlime(request.getData()).get(); var member = mandatory("member", data).asString(); if (member.isBlank()) { return ErrorResponse.badRequest("GCP archive access role can't be whitespace only"); } controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> { var access = lockedTenant.get().archiveAccess(); lockedTenant = lockedTenant.withArchiveAccess(access.withGCPMember(member)); controller.tenants().store(lockedTenant); }); return new MessageResponse("GCP archive access member set to '" + member + "' for tenant " + tenantName + "."); } private HttpResponse removeGcpArchiveAccess(String tenantName) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> { var access = lockedTenant.get().archiveAccess(); lockedTenant = lockedTenant.withArchiveAccess(access.removeGCPMember()); controller.tenants().store(lockedTenant); }); return new MessageResponse("GCP archive access member removed for tenant " + tenantName + "."); } private HttpResponse patchApplication(String tenantName, String applicationName, HttpRequest request) { Inspector requestObject = toSlime(request.getData()).get(); StringJoiner messageBuilder = new StringJoiner("\n").setEmptyValue("No applicable changes."); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> { Inspector majorVersionField = requestObject.field("majorVersion"); if (majorVersionField.valid()) { Integer majorVersion = majorVersionField.asLong() == 0 ? null : (int) majorVersionField.asLong(); application = application.withMajorVersion(majorVersion); messageBuilder.add("Set major version to " + (majorVersion == null ? "empty" : majorVersion)); } Inspector pemDeployKeyField = requestObject.field("pemDeployKey"); if (pemDeployKeyField.valid()) { String pemDeployKey = pemDeployKeyField.asString(); PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey); application = application.withDeployKey(deployKey); messageBuilder.add("Added deploy key " + pemDeployKey); } controller.applications().store(application); }); return new MessageResponse(messageBuilder.toString()); } private Application getApplication(String tenantName, String applicationName) { TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName); return controller.applications().getApplication(applicationId) .orElseThrow(() -> new NotExistsException(applicationId + " not found")); } private Instance getInstance(String tenantName, String applicationName, String instanceName) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); return controller.applications().getInstance(applicationId) .orElseThrow(() -> new NotExistsException(applicationId + " not found")); } private HttpResponse nodes(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone, NodeFilter.all().applications(id)); Slime slime = new Slime(); Cursor nodesArray = slime.setObject().setArray("nodes"); for (Node node : nodes) { Cursor nodeObject = nodesArray.addObject(); nodeObject.setString("hostname", node.hostname().value()); nodeObject.setString("state", valueOf(node.state())); node.reservedTo().ifPresent(tenant -> nodeObject.setString("reservedTo", tenant.value())); nodeObject.setString("orchestration", valueOf(node.serviceState())); nodeObject.setString("version", node.currentVersion().toString()); node.flavor().ifPresent(flavor -> nodeObject.setString("flavor", flavor)); toSlime(node.resources(), nodeObject); nodeObject.setString("clusterId", node.clusterId()); nodeObject.setString("clusterType", valueOf(node.clusterType())); nodeObject.setBool("down", node.down()); nodeObject.setBool("retired", node.retired() || node.wantToRetire()); nodeObject.setBool("restarting", node.wantedRestartGeneration() > node.restartGeneration()); nodeObject.setBool("rebooting", node.wantedRebootGeneration() > node.rebootGeneration()); nodeObject.setString("group", node.group()); nodeObject.setLong("index", node.index()); } return new SlimeJsonResponse(slime); } private HttpResponse clusters(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); com.yahoo.vespa.hosted.controller.api.integration.configserver.Application application = controller.serviceRegistry().configServer().nodeRepository().getApplication(zone, id); Slime slime = new Slime(); Cursor clustersObject = slime.setObject().setObject("clusters"); for (Cluster cluster : application.clusters().values()) { Cursor clusterObject = clustersObject.setObject(cluster.id().value()); clusterObject.setString("type", cluster.type().name()); toSlime(cluster.min(), clusterObject.setObject("min")); toSlime(cluster.max(), clusterObject.setObject("max")); toSlime(cluster.current(), clusterObject.setObject("current")); if (cluster.target().isPresent() && ! cluster.target().get().justNumbers().equals(cluster.current().justNumbers())) toSlime(cluster.target().get(), clusterObject.setObject("target")); cluster.suggested().ifPresent(suggested -> toSlime(suggested, clusterObject.setObject("suggested"))); utilizationToSlime(cluster.utilization(), clusterObject.setObject("utilization")); scalingEventsToSlime(cluster.scalingEvents(), clusterObject.setArray("scalingEvents")); clusterObject.setString("autoscalingStatusCode", cluster.autoscalingStatusCode()); clusterObject.setString("autoscalingStatus", cluster.autoscalingStatus()); clusterObject.setLong("scalingDuration", cluster.scalingDuration().toMillis()); clusterObject.setDouble("maxQueryGrowthRate", cluster.maxQueryGrowthRate()); clusterObject.setDouble("currentQueryFractionOfMax", cluster.currentQueryFractionOfMax()); } return new SlimeJsonResponse(slime); } private static String valueOf(Node.State state) { return switch (state) { case failed: yield "failed"; case parked: yield "parked"; case dirty: yield "dirty"; case ready: yield "ready"; case active: yield "active"; case inactive: yield "inactive"; case reserved: yield "reserved"; case provisioned: yield "provisioned"; case breakfixed: yield "breakfixed"; case deprovisioned: yield "deprovisioned"; default: throw new IllegalArgumentException("Unexpected node state '" + state + "'."); }; } static String valueOf(Node.ServiceState state) { switch (state) { case expectedUp: return "expectedUp"; case allowedDown: return "allowedDown"; case permanentlyDown: return "permanentlyDown"; case unorchestrated: return "unorchestrated"; case unknown: break; } return "unknown"; } private static String valueOf(Node.ClusterType type) { return switch (type) { case admin: yield "admin"; case content: yield "content"; case container: yield "container"; case combined: yield "combined"; case unknown: throw new IllegalArgumentException("Unexpected node cluster type '" + type + "'."); }; } private static String valueOf(NodeResources.DiskSpeed diskSpeed) { return switch (diskSpeed) { case fast : yield "fast"; case slow : yield "slow"; case any : yield "any"; }; } private static String valueOf(NodeResources.StorageType storageType) { return switch (storageType) { case remote : yield "remote"; case local : yield "local"; case any : yield "any"; }; } private HttpResponse logs(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) { ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); DeploymentId deployment = new DeploymentId(application, zone); InputStream logStream = controller.serviceRegistry().configServer().getLogs(deployment, queryParameters); return new HttpResponse(200) { @Override public void render(OutputStream outputStream) throws IOException { try (logStream) { logStream.transferTo(outputStream); } } @Override public long maxPendingBytes() { return 1 << 26; } }; } private HttpResponse supportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) { DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); SupportAccess supportAccess = controller.supportAccess().forDeployment(deployment); return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(supportAccess, controller.clock().instant())); } private HttpResponse allowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Principal principal = requireUserPrincipal(request); Instant now = controller.clock().instant(); SupportAccess allowed = controller.supportAccess().allow(deployment, now.plus(7, ChronoUnit.DAYS), principal.getName()); return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(allowed, now)); } private HttpResponse disallowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Principal principal = requireUserPrincipal(request); SupportAccess disallowed = controller.supportAccess().disallow(deployment, principal.getName()); controller.applications().deploymentTrigger().reTriggerOrAddToQueue(deployment, "re-triggered to disallow support access, by " + request.getJDiscRequest().getUserPrincipal().getName()); return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(disallowed, controller.clock().instant())); } private HttpResponse metrics(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); DeploymentId deployment = new DeploymentId(application, zone); List<ProtonMetrics> protonMetrics = controller.serviceRegistry().configServer().getProtonMetrics(deployment); return buildResponseFromProtonMetrics(protonMetrics); } private HttpResponse scaling(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { var from = Optional.ofNullable(request.getProperty("from")) .map(Long::valueOf) .map(Instant::ofEpochSecond) .orElse(Instant.EPOCH); var until = Optional.ofNullable(request.getProperty("until")) .map(Long::valueOf) .map(Instant::ofEpochSecond) .orElse(Instant.now(controller.clock())); var application = ApplicationId.from(tenantName, applicationName, instanceName); var zone = requireZone(environment, region); var deployment = new DeploymentId(application, zone); var events = controller.serviceRegistry().resourceDatabase().scalingEvents(from, until, deployment); var slime = new Slime(); var root = slime.setObject(); for (var entry : events.entrySet()) { var serviceRoot = root.setArray(entry.getKey().clusterId().value()); scalingEventsToSlime(entry.getValue(), serviceRoot); } return new SlimeJsonResponse(slime); } private JsonResponse buildResponseFromProtonMetrics(List<ProtonMetrics> protonMetrics) { try { var jsonObject = jsonMapper.createObjectNode(); var jsonArray = jsonMapper.createArrayNode(); for (ProtonMetrics metrics : protonMetrics) { jsonArray.add(metrics.toJson()); } jsonObject.set("metrics", jsonArray); return new JsonResponse(200, jsonMapper.writerWithDefaultPrettyPrinter().writeValueAsString(jsonObject)); } catch (JsonProcessingException e) { log.log(Level.WARNING, "Unable to build JsonResponse with Proton data: " + e.getMessage(), e); return new JsonResponse(500, ""); } } private HttpResponse trigger(ApplicationId id, JobType type, HttpRequest request) { ZoneRegistry zones = controller.zoneRegistry(); type = switch (type.environment()) { case test -> JobType.systemTest(zones, zones.systemZone().getCloudName()); case staging -> JobType.stagingTest(zones, zones.systemZone().getCloudName()); default -> type; }; Inspector requestObject = toSlime(request.getData()).get(); boolean requireTests = ! requestObject.field("skipTests").asBool(); boolean reTrigger = requestObject.field("reTrigger").asBool(); boolean upgradeRevision = ! requestObject.field("skipRevision").asBool(); boolean upgradePlatform = ! requestObject.field("skipUpgrade").asBool(); String triggered = reTrigger ? controller.applications().deploymentTrigger() .reTrigger(id, type, "re-triggered by " + request.getJDiscRequest().getUserPrincipal().getName()).type().jobName() : controller.applications().deploymentTrigger() .forceTrigger(id, type, "triggered by " + request.getJDiscRequest().getUserPrincipal().getName(), requireTests, upgradeRevision, upgradePlatform) .stream().map(job -> job.type().jobName()).collect(joining(", ")); String suppressedUpgrades = ( ! upgradeRevision || ! upgradePlatform ? ", without " : "") + (upgradeRevision ? "" : "revision") + ( ! upgradeRevision && ! upgradePlatform ? " and " : "") + (upgradePlatform ? "" : "platform") + ( ! upgradeRevision || ! upgradePlatform ? " upgrade" : ""); return new MessageResponse(triggered.isEmpty() ? "Job " + type.jobName() + " for " + id + " not triggered" : "Triggered " + triggered + " for " + id + suppressedUpgrades); } private HttpResponse pause(ApplicationId id, JobType type) { Instant until = controller.clock().instant().plus(DeploymentTrigger.maxPause); controller.applications().deploymentTrigger().pauseJob(id, type, until); return new MessageResponse(type.jobName() + " for " + id + " paused for " + DeploymentTrigger.maxPause); } private HttpResponse resume(ApplicationId id, JobType type) { controller.applications().deploymentTrigger().resumeJob(id, type); return new MessageResponse(type.jobName() + " for " + id + " resumed"); } private void toSlime(Cursor object, Application application, HttpRequest request) { object.setString("tenant", application.id().tenant().value()); object.setString("application", application.id().application().value()); object.setString("deployments", withPath("/application/v4" + "/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value() + "/job/", request.getUri()).toString()); DeploymentStatus status = controller.jobController().deploymentStatus(application); application.revisions().last().ifPresent(version -> JobControllerApiHandlerHelper.toSlime(object.setObject("latestVersion"), version)); application.projectId().ifPresent(id -> object.setLong("projectId", id)); application.instances().values().stream().findFirst().ifPresent(instance -> { if ( ! instance.change().isEmpty()) toSlime(object.setObject("deploying"), instance.change(), application); if ( ! status.outstandingChange(instance.name()).isEmpty()) toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), application); }); application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion)); Cursor instancesArray = object.setArray("instances"); for (Instance instance : showOnlyProductionInstances(request) ? application.productionInstances().values() : application.instances().values()) toSlime(instancesArray.addObject(), status, instance, application.deploymentSpec(), request); application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString); Cursor metricsObject = object.setObject("metrics"); metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality()); metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality()); Cursor activity = object.setObject("activity"); application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value())); application.owner().ifPresent(owner -> object.setString("owner", owner.username())); application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value())); } private void toSlime(Cursor object, DeploymentStatus status, Instance instance, DeploymentSpec deploymentSpec, HttpRequest request) { object.setString("instance", instance.name().value()); if (deploymentSpec.instance(instance.name()).isPresent()) { Collection<JobStatus> jobStatus = status.instanceJobs(instance.name()).values(); if ( ! instance.change().isEmpty()) toSlime(object.setObject("deploying"), instance.change(), status.application()); if ( ! status.outstandingChange(instance.name()).isEmpty()) toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), status.application()); Cursor changeBlockers = object.setArray("changeBlockers"); deploymentSpec.instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> { Cursor changeBlockerObject = changeBlockers.addObject(); changeBlockerObject.setBool("versions", changeBlocker.blocksVersions()); changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions()); changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId()); Cursor days = changeBlockerObject.setArray("days"); changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong); Cursor hours = changeBlockerObject.setArray("hours"); changeBlocker.window().hours().forEach(hours::addLong); })); } addRotationId(object, instance); List<Deployment> deployments = deploymentSpec.instance(instance.name()) .map(spec -> sortedDeployments(instance.deployments().values(), spec)) .orElse(List.copyOf(instance.deployments().values())); Cursor deploymentsArray = object.setArray("deployments"); for (Deployment deployment : deployments) { Cursor deploymentObject = deploymentsArray.addObject(); if (deployment.zone().environment() == Environment.prod && ! instance.rotations().isEmpty()) toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject); if (recurseOverDeployments(request)) toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request); else { deploymentObject.setString("environment", deployment.zone().environment().value()); deploymentObject.setString("region", deployment.zone().region().value()); deploymentObject.setString("url", withPath(request.getUri().getPath() + "/instance/" + instance.name().value() + "/environment/" + deployment.zone().environment().value() + "/region/" + deployment.zone().region().value(), request.getUri()).toString()); } } } private void addRotationId(Cursor object, Instance instance) { instance.rotations().stream() .map(AssignedRotation::rotationId) .findFirst() .ifPresent(rotation -> object.setString("rotationId", rotation.asString())); } private void toSlime(Cursor object, Instance instance, DeploymentStatus status, HttpRequest request) { Application application = status.application(); object.setString("tenant", instance.id().tenant().value()); object.setString("application", instance.id().application().value()); object.setString("instance", instance.id().instance().value()); object.setString("deployments", withPath("/application/v4" + "/tenant/" + instance.id().tenant().value() + "/application/" + instance.id().application().value() + "/instance/" + instance.id().instance().value() + "/job/", request.getUri()).toString()); application.revisions().last().ifPresent(version -> { version.sourceUrl().ifPresent(url -> object.setString("sourceUrl", url)); version.commit().ifPresent(commit -> object.setString("commit", commit)); }); application.projectId().ifPresent(id -> object.setLong("projectId", id)); if (application.deploymentSpec().instance(instance.name()).isPresent()) { Collection<JobStatus> jobStatus = status.instanceJobs(instance.name()).values(); if ( ! instance.change().isEmpty()) toSlime(object.setObject("deploying"), instance.change(), application); if ( ! status.outstandingChange(instance.name()).isEmpty()) toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), application); Cursor changeBlockers = object.setArray("changeBlockers"); application.deploymentSpec().instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> { Cursor changeBlockerObject = changeBlockers.addObject(); changeBlockerObject.setBool("versions", changeBlocker.blocksVersions()); changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions()); changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId()); Cursor days = changeBlockerObject.setArray("days"); changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong); Cursor hours = changeBlockerObject.setArray("hours"); changeBlocker.window().hours().forEach(hours::addLong); })); } application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion)); addRotationId(object, instance); List<Deployment> deployments = application.deploymentSpec().instance(instance.name()) .map(spec -> sortedDeployments(instance.deployments().values(), spec)) .orElse(List.copyOf(instance.deployments().values())); Cursor instancesArray = object.setArray("instances"); for (Deployment deployment : deployments) { Cursor deploymentObject = instancesArray.addObject(); if (deployment.zone().environment() == Environment.prod) { if (instance.rotations().size() == 1) { toSlime(instance.rotationStatus().of(instance.rotations().get(0).rotationId(), deployment), deploymentObject); } if ( ! recurseOverDeployments(request) && ! instance.rotations().isEmpty()) { toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject); } } if (recurseOverDeployments(request)) toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request); else { deploymentObject.setString("environment", deployment.zone().environment().value()); deploymentObject.setString("region", deployment.zone().region().value()); deploymentObject.setString("instance", instance.id().instance().value()); deploymentObject.setString("url", withPath(request.getUri().getPath() + "/environment/" + deployment.zone().environment().value() + "/region/" + deployment.zone().region().value(), request.getUri()).toString()); } } Stream.concat(status.jobSteps().keySet().stream() .filter(job -> job.application().instance().equals(instance.name())) .filter(job -> job.type().isProduction() && job.type().isDeployment()), controller.jobController().active(instance.id()).stream() .map(run -> run.id().job()) .filter(job -> job.type().environment().isManuallyDeployed())) .map(job -> job.type().zone()) .filter(zone -> ! instance.deployments().containsKey(zone)) .forEach(zone -> { Cursor deploymentObject = instancesArray.addObject(); deploymentObject.setString("environment", zone.environment().value()); deploymentObject.setString("region", zone.region().value()); }); application.deployKeys().stream().findFirst().ifPresent(key -> object.setString("pemDeployKey", KeyUtils.toPem(key))); application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString); Cursor metricsObject = object.setObject("metrics"); metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality()); metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality()); Cursor activity = object.setObject("activity"); application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value())); application.owner().ifPresent(owner -> object.setString("owner", owner.username())); application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value())); } private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); Instance instance = controller.applications().getInstance(id) .orElseThrow(() -> new NotExistsException(id + " not found")); DeploymentId deploymentId = new DeploymentId(instance.id(), requireZone(environment, region)); Deployment deployment = instance.deployments().get(deploymentId.zoneId()); if (deployment == null) throw new NotExistsException(instance + " is not deployed in " + deploymentId.zoneId()); Slime slime = new Slime(); toSlime(slime.setObject(), deploymentId, deployment, request); return new SlimeJsonResponse(slime); } private void toSlime(Cursor object, Change change, Application application) { change.platform().ifPresent(version -> object.setString("version", version.toString())); change.revision().ifPresent(revision -> JobControllerApiHandlerHelper.toSlime(object.setObject("revision"), application.revisions().get(revision))); } private void toSlime(Endpoint endpoint, Cursor object) { object.setString("cluster", endpoint.cluster().value()); object.setBool("tls", endpoint.tls()); object.setString("url", endpoint.url().toString()); object.setString("scope", endpointScopeString(endpoint.scope())); object.setString("routingMethod", routingMethodString(endpoint.routingMethod())); object.setBool("legacy", endpoint.legacy()); } private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) { response.setString("tenant", deploymentId.applicationId().tenant().value()); response.setString("application", deploymentId.applicationId().application().value()); response.setString("instance", deploymentId.applicationId().instance().value()); response.setString("environment", deploymentId.zoneId().environment().value()); response.setString("region", deploymentId.zoneId().region().value()); var application = controller.applications().requireApplication(TenantAndApplicationId.from(deploymentId.applicationId())); boolean legacyEndpoints = request.getBooleanProperty("includeLegacyEndpoints"); var endpointArray = response.setArray("endpoints"); EndpointList zoneEndpoints = controller.routing().readEndpointsOf(deploymentId) .scope(Endpoint.Scope.zone); if (!legacyEndpoints) { zoneEndpoints = zoneEndpoints.not().legacy().direct(); } for (var endpoint : zoneEndpoints) { toSlime(endpoint, endpointArray.addObject()); } EndpointList declaredEndpoints = controller.routing().declaredEndpointsOf(application) .targets(deploymentId); if (!legacyEndpoints) { declaredEndpoints = declaredEndpoints.not().legacy().direct(); } for (var endpoint : declaredEndpoints) { toSlime(endpoint, endpointArray.addObject()); } response.setString("clusters", withPath(toPath(deploymentId) + "/clusters", request.getUri()).toString()); response.setString("nodes", withPathAndQuery("/zone/v2/" + deploymentId.zoneId().environment() + "/" + deploymentId.zoneId().region() + "/nodes/v2/node/", "recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString()); response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString()); response.setString("version", deployment.version().toFullString()); response.setString("revision", application.revisions().get(deployment.revision()).stringId()); response.setLong("build", deployment.revision().number()); Instant lastDeploymentStart = controller.jobController().lastDeploymentStart(deploymentId.applicationId(), deployment); response.setLong("deployTimeEpochMs", lastDeploymentStart.toEpochMilli()); controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zoneId()) .ifPresent(deploymentTimeToLive -> response.setLong("expiryTimeEpochMs", lastDeploymentStart.plus(deploymentTimeToLive).toEpochMilli())); application.projectId().ifPresent(i -> response.setString("screwdriverId", String.valueOf(i))); var instance = application.instances().get(deploymentId.applicationId().instance()); if (instance != null) { if (!instance.rotations().isEmpty() && deployment.zone().environment() == Environment.prod) toSlime(instance.rotations(), instance.rotationStatus(), deployment, response); if (!deployment.zone().environment().isManuallyDeployed()) { DeploymentStatus status = controller.jobController().deploymentStatus(application); JobId jobId = new JobId(instance.id(), JobType.deploymentTo(deployment.zone())); Optional.ofNullable(status.jobSteps().get(jobId)) .ifPresent(stepStatus -> { JobControllerApiHandlerHelper.toSlime(response.setObject("applicationVersion"), application.revisions().get(deployment.revision())); if ( ! status.jobsToRun().containsKey(stepStatus.job().get())) response.setString("status", "complete"); else if (stepStatus.readyAt(instance.change()).map(controller.clock().instant()::isBefore).orElse(true)) response.setString("status", "pending"); else response.setString("status", "running"); }); } else { var deploymentRun = controller.jobController().last(deploymentId.applicationId(), JobType.deploymentTo(deploymentId.zoneId())); deploymentRun.ifPresent(run -> { response.setString("status", run.hasEnded() ? "complete" : "running"); }); } } response.setDouble("quota", deployment.quota().rate()); deployment.cost().ifPresent(cost -> response.setDouble("cost", cost)); controller.archiveBucketDb().archiveUriFor(deploymentId.zoneId(), deploymentId.applicationId().tenant(), false) .ifPresent(archiveUri -> response.setString("archiveUri", archiveUri.toString())); Cursor activity = response.setObject("activity"); deployment.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); deployment.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); deployment.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); deployment.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); DeploymentMetrics metrics = deployment.metrics(); Cursor metricsObject = response.setObject("metrics"); metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond()); metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond()); metricsObject.setDouble("documentCount", metrics.documentCount()); metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis()); metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis()); metrics.instant().ifPresent(instant -> metricsObject.setLong("lastUpdated", instant.toEpochMilli())); } private void toSlime(RotationState state, Cursor object) { Cursor bcpStatus = object.setObject("bcpStatus"); bcpStatus.setString("rotationStatus", rotationStateString(state)); } private void toSlime(List<AssignedRotation> rotations, RotationStatus status, Deployment deployment, Cursor object) { var array = object.setArray("endpointStatus"); for (var rotation : rotations) { var statusObject = array.addObject(); var targets = status.of(rotation.rotationId()); statusObject.setString("endpointId", rotation.endpointId().id()); statusObject.setString("rotationId", rotation.rotationId().asString()); statusObject.setString("clusterId", rotation.clusterId().value()); statusObject.setString("status", rotationStateString(status.of(rotation.rotationId(), deployment))); statusObject.setLong("lastUpdated", targets.lastUpdated().toEpochMilli()); } } private URI monitoringSystemUri(DeploymentId deploymentId) { return controller.zoneRegistry().getMonitoringSystemUri(deploymentId); } private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) { Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName)); ZoneId zone = requireZone(environment, region); Deployment deployment = instance.deployments().get(zone); if (deployment == null) { throw new NotExistsException(instance + " has no deployment in " + zone); } DeploymentId deploymentId = new DeploymentId(instance.id(), zone); RoutingStatus.Agent agent = isOperator(request) ? RoutingStatus.Agent.operator : RoutingStatus.Agent.tenant; RoutingStatus.Value status = inService ? RoutingStatus.Value.in : RoutingStatus.Value.out; controller.routing().of(deploymentId).setRoutingStatus(status, agent); return new MessageResponse(Text.format("Successfully set %s in %s %s service", instance.id().toShortString(), zone, inService ? "in" : "out of")); } private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Slime slime = new Slime(); Cursor array = slime.setObject().setArray("globalrotationoverride"); Optional<Endpoint> primaryEndpoint = controller.routing().readDeclaredEndpointsOf(deploymentId.applicationId()) .requiresRotation() .primary(); if (primaryEndpoint.isPresent()) { DeploymentRoutingContext context = controller.routing().of(deploymentId); RoutingStatus status = context.routingStatus(); array.addString(primaryEndpoint.get().upstreamName(deploymentId)); Cursor statusObject = array.addObject(); statusObject.setString("status", status.value().name()); statusObject.setString("reason", ""); statusObject.setString("agent", status.agent().name()); statusObject.setLong("timestamp", status.changedAt().getEpochSecond()); } return new SlimeJsonResponse(slime); } private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region, Optional<String> endpointId) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); Instance instance = controller.applications().requireInstance(applicationId); ZoneId zone = requireZone(environment, region); RotationId rotation = findRotationId(instance, endpointId); Deployment deployment = instance.deployments().get(zone); if (deployment == null) { throw new NotExistsException(instance + " has no deployment in " + zone); } Slime slime = new Slime(); Cursor response = slime.setObject(); toSlime(instance.rotationStatus().of(rotation, deployment), response); return new SlimeJsonResponse(slime); } private HttpResponse deploying(String tenantName, String applicationName, String instanceName, HttpRequest request) { Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName)); Slime slime = new Slime(); Cursor root = slime.setObject(); if ( ! instance.change().isEmpty()) { instance.change().platform().ifPresent(version -> root.setString("platform", version.toString())); instance.change().revision().ifPresent(revision -> root.setString("application", revision.toString())); root.setBool("pinned", instance.change().isPinned()); } return new SlimeJsonResponse(slime); } private HttpResponse suspended(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); boolean suspended = controller.applications().isSuspended(deploymentId); Slime slime = new Slime(); Cursor response = slime.setObject(); response.setBool("suspended", suspended); return new SlimeJsonResponse(slime); } private HttpResponse status(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String host, HttpURL.Path restPath, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); return controller.serviceRegistry().configServer().getServiceNodePage(deploymentId, serviceName, DomainName.of(host), HttpURL.Path.parse("/status").append(restPath), Query.empty().add(request.getJDiscRequest().parameters())); } private HttpResponse orchestrator(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); return controller.serviceRegistry().configServer().getServiceNodes(deploymentId); } private HttpResponse stateV1(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String host, HttpURL.Path rest, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Query query = Query.empty().add(request.getJDiscRequest().parameters()); query = query.set("forwarded-url", HttpURL.from(request.getUri()).withQuery(Query.empty()).asURI().toString()); return controller.serviceRegistry().configServer().getServiceNodePage( deploymentId, serviceName, DomainName.of(host), HttpURL.Path.parse("/state/v1").append(rest), query); } private HttpResponse content(String tenantName, String applicationName, String instanceName, String environment, String region, HttpURL.Path restPath, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); return controller.serviceRegistry().configServer().getApplicationPackageContent(deploymentId, restPath, request.getUri()); } private HttpResponse updateTenant(String tenantName, HttpRequest request) { getTenantOrThrow(tenantName); TenantName tenant = TenantName.from(tenantName); Inspector requestObject = toSlime(request.getData()).get(); controller.tenants().update(accessControlRequests.specification(tenant, requestObject), accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest())); return tenant(controller.tenants().require(TenantName.from(tenantName)), request); } private HttpResponse createTenant(String tenantName, HttpRequest request) { TenantName tenant = TenantName.from(tenantName); Inspector requestObject = toSlime(request.getData()).get(); controller.tenants().create(accessControlRequests.specification(tenant, requestObject), accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest())); if (controller.system().isPublic()) { User user = getAttribute(request, User.ATTRIBUTE_NAME, User.class); TenantInfo info = controller.tenants().require(tenant, CloudTenant.class) .info() .withContact(TenantContact.from(user.name(), user.email())); controller.tenants().lockOrThrow(tenant, LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(info); controller.tenants().store(lockedTenant); }); } return tenant(controller.tenants().require(TenantName.from(tenantName)), request); } private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) { Inspector requestObject = toSlime(request.getData()).get(); TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); Credentials credentials = accessControlRequests.credentials(id.tenant(), requestObject, request.getJDiscRequest()); Application application = controller.applications().createApplication(id, credentials); Slime slime = new Slime(); toSlime(id, slime.setObject(), request); return new SlimeJsonResponse(slime); } private HttpResponse createInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) { TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName); if (controller.applications().getApplication(applicationId).isEmpty()) createApplication(tenantName, applicationName, request); controller.applications().createInstance(applicationId.instance(instanceName)); Slime slime = new Slime(); toSlime(applicationId.instance(instanceName), slime.setObject(), request); return new SlimeJsonResponse(slime); } /** Trigger deployment of the given Vespa version if a valid one is given, e.g., "7.8.9". */ private HttpResponse deployPlatform(String tenantName, String applicationName, String instanceName, boolean pin, HttpRequest request) { String versionString = readToString(request.getData()); ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); StringBuilder response = new StringBuilder(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { Version version = Version.fromString(versionString); VersionStatus versionStatus = controller.readVersionStatus(); if (version.equals(Version.emptyVersion)) version = controller.systemVersion(versionStatus); if (!versionStatus.isActive(version)) throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " + "Version is not active in this system. " + "Active versions: " + versionStatus.versions() .stream() .map(VespaVersion::versionNumber) .map(Version::toString) .collect(joining(", "))); Change change = Change.of(version); if (pin) change = change.withPin(); controller.applications().deploymentTrigger().forceChange(id, change); response.append("Triggered ").append(change).append(" for ").append(id); }); return new MessageResponse(response.toString()); } /** Trigger deployment to the last known application package for the given application. */ private HttpResponse deployApplication(String tenantName, String applicationName, String instanceName, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); Inspector buildField = toSlime(request.getData()).get().field("build"); long build = buildField.valid() ? buildField.asLong() : -1; StringBuilder response = new StringBuilder(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { RevisionId revision = build == -1 ? application.get().revisions().last().get().id() : getRevision(application.get(), build); Change change = Change.of(revision); controller.applications().deploymentTrigger().forceChange(id, change); response.append("Triggered ").append(change).append(" for ").append(id); }); return new MessageResponse(response.toString()); } private RevisionId getRevision(Application application, long build) { return application.revisions().withPackage().stream() .map(ApplicationVersion::id) .filter(version -> version.number() == build) .findFirst() .filter(version -> controller.applications().applicationStore().hasBuild(application.id().tenant(), application.id().application(), build)) .orElseThrow(() -> new IllegalArgumentException("Build number '" + build + "' was not found")); } private HttpResponse cancelBuild(String tenantName, String applicationName, String build){ TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); RevisionId revision = RevisionId.forProduction(Long.parseLong(build)); controller.applications().lockApplicationOrThrow(id, application -> { controller.applications().store(application.withRevisions(revisions -> revisions.with(revisions.get(revision).skipped()))); }); return new MessageResponse("Marked build '" + build + "' as non-deployable"); } /** Cancel ongoing change for given application, e.g., everything with {"cancel":"all"} */ private HttpResponse cancelDeploy(String tenantName, String applicationName, String instanceName, String choice) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); StringBuilder response = new StringBuilder(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { Change change = application.get().require(id.instance()).change(); if (change.isEmpty()) { response.append("No deployment in progress for ").append(id).append(" at this time"); return; } ChangesToCancel cancel = ChangesToCancel.valueOf(choice.toUpperCase()); controller.applications().deploymentTrigger().cancelChange(id, cancel); response.append("Changed deployment from '").append(change).append("' to '").append(controller.applications().requireInstance(id).change()).append("' for ").append(id); }); return new MessageResponse(response.toString()); } /** Schedule reindexing of an application, or a subset of clusters, possibly on a subset of documents. */ private HttpResponse reindex(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); List<String> clusterNames = Optional.ofNullable(request.getProperty("clusterId")).stream() .flatMap(clusters -> Stream.of(clusters.split(","))) .filter(cluster -> ! cluster.isBlank()) .toList(); List<String> documentTypes = Optional.ofNullable(request.getProperty("documentType")).stream() .flatMap(types -> Stream.of(types.split(","))) .filter(type -> ! type.isBlank()) .toList(); Double speed = request.hasProperty("speed") ? Double.parseDouble(request.getProperty("speed")) : null; boolean indexedOnly = request.getBooleanProperty("indexedOnly"); controller.applications().reindex(id, zone, clusterNames, documentTypes, indexedOnly, speed); return new MessageResponse("Requested reindexing of " + id + " in " + zone + (clusterNames.isEmpty() ? "" : ", on clusters " + String.join(", ", clusterNames)) + (documentTypes.isEmpty() ? "" : ", for types " + String.join(", ", documentTypes)) + (indexedOnly ? ", for indexed types" : "") + (speed != null ? ", with speed " + speed : "")); } /** Gets reindexing status of an application in a zone. */ private HttpResponse getReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); ApplicationReindexing reindexing = controller.applications().applicationReindexing(id, zone); Slime slime = new Slime(); Cursor root = slime.setObject(); root.setBool("enabled", reindexing.enabled()); Cursor clustersArray = root.setArray("clusters"); reindexing.clusters().entrySet().stream().sorted(comparingByKey()) .forEach(cluster -> { Cursor clusterObject = clustersArray.addObject(); clusterObject.setString("name", cluster.getKey()); Cursor pendingArray = clusterObject.setArray("pending"); cluster.getValue().pending().entrySet().stream().sorted(comparingByKey()) .forEach(pending -> { Cursor pendingObject = pendingArray.addObject(); pendingObject.setString("type", pending.getKey()); pendingObject.setLong("requiredGeneration", pending.getValue()); }); Cursor readyArray = clusterObject.setArray("ready"); cluster.getValue().ready().entrySet().stream().sorted(comparingByKey()) .forEach(ready -> { Cursor readyObject = readyArray.addObject(); readyObject.setString("type", ready.getKey()); setStatus(readyObject, ready.getValue()); }); }); return new SlimeJsonResponse(slime); } void setStatus(Cursor statusObject, ApplicationReindexing.Status status) { status.readyAt().ifPresent(readyAt -> statusObject.setLong("readyAtMillis", readyAt.toEpochMilli())); status.startedAt().ifPresent(startedAt -> statusObject.setLong("startedAtMillis", startedAt.toEpochMilli())); status.endedAt().ifPresent(endedAt -> statusObject.setLong("endedAtMillis", endedAt.toEpochMilli())); status.state().map(ApplicationApiHandler::toString).ifPresent(state -> statusObject.setString("state", state)); status.message().ifPresent(message -> statusObject.setString("message", message)); status.progress().ifPresent(progress -> statusObject.setDouble("progress", progress)); status.speed().ifPresent(speed -> statusObject.setDouble("speed", speed)); } private static String toString(ApplicationReindexing.State state) { return switch (state) { case PENDING: yield "pending"; case RUNNING: yield "running"; case FAILED: yield "failed"; case SUCCESSFUL: yield "successful"; }; } /** Enables reindexing of an application in a zone. */ private HttpResponse enableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); controller.applications().enableReindexing(id, zone); return new MessageResponse("Enabled reindexing of " + id + " in " + zone); } /** Disables reindexing of an application in a zone. */ private HttpResponse disableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); controller.applications().disableReindexing(id, zone); return new MessageResponse("Disabled reindexing of " + id + " in " + zone); } /** Schedule restart of deployment, or specific host in a deployment */ private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); RestartFilter restartFilter = new RestartFilter() .withHostName(Optional.ofNullable(request.getProperty("hostname")).map(HostName::of)) .withClusterType(Optional.ofNullable(request.getProperty("clusterType")).map(ClusterSpec.Type::from)) .withClusterId(Optional.ofNullable(request.getProperty("clusterId")).map(ClusterSpec.Id::from)); controller.applications().restart(deploymentId, restartFilter); return new MessageResponse("Requested restart of " + deploymentId); } /** Set suspension status of the given deployment. */ private HttpResponse suspend(String tenantName, String applicationName, String instanceName, String environment, String region, boolean suspend) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); controller.applications().setSuspension(deploymentId, suspend); return new MessageResponse((suspend ? "Suspended" : "Resumed") + " orchestration of " + deploymentId); } private HttpResponse jobDeploy(ApplicationId id, JobType type, HttpRequest request) { if ( ! type.environment().isManuallyDeployed() && ! isOperator(request)) throw new IllegalArgumentException("Direct deployments are only allowed to manually deployed environments."); Map<String, byte[]> dataParts = parseDataParts(request); if ( ! dataParts.containsKey("applicationZip")) throw new IllegalArgumentException("Missing required form part 'applicationZip'"); ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP)); controller.applications().verifyApplicationIdentityConfiguration(id.tenant(), Optional.of(id.instance()), Optional.of(type.zone()), applicationPackage, Optional.of(requireUserPrincipal(request))); Optional<Version> version = Optional.ofNullable(dataParts.get("deployOptions")) .map(json -> SlimeUtils.jsonToSlime(json).get()) .flatMap(options -> optional("vespaVersion", options)) .map(Version::fromString); ensureApplicationExists(TenantAndApplicationId.from(id), request); boolean dryRun = Optional.ofNullable(dataParts.get("deployOptions")) .map(json -> SlimeUtils.jsonToSlime(json).get()) .flatMap(options -> optional("dryRun", options)) .map(Boolean::valueOf) .orElse(false); controller.jobController().deploy(id, type, version, applicationPackage, dryRun); RunId runId = controller.jobController().last(id, type).get().id(); Slime slime = new Slime(); Cursor rootObject = slime.setObject(); rootObject.setString("message", "Deployment started in " + runId + ". This may take about 15 minutes the first time."); rootObject.setLong("run", runId.number()); return new SlimeJsonResponse(slime); } private HttpResponse deploy(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); Map<String, byte[]> dataParts = parseDataParts(request); if ( ! dataParts.containsKey("deployOptions")) return ErrorResponse.badRequest("Missing required form part 'deployOptions'"); Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get(); Optional<SystemApplication> systemApplication = SystemApplication.matching(applicationId); if (systemApplication.isEmpty() || !systemApplication.get().hasApplicationPackage()) { return ErrorResponse.badRequest("Deployment of " + applicationId + " is not supported through this API"); } String vespaVersion = deployOptions.field("vespaVersion").asString(); if (!vespaVersion.isEmpty() && !vespaVersion.equals("null")) { return ErrorResponse.badRequest("Specifying version for " + applicationId + " is not permitted"); } VersionStatus versionStatus = controller.readVersionStatus(); if (versionStatus.isUpgrading()) { throw new IllegalArgumentException("Deployment of system applications during a system upgrade is not allowed"); } Optional<VespaVersion> systemVersion = versionStatus.systemVersion(); if (systemVersion.isEmpty()) { throw new IllegalArgumentException("Deployment of system applications is not permitted until system version is determined"); } ActivateResult result = controller.applications() .deploySystemApplicationPackage(systemApplication.get(), zone, systemVersion.get().versionNumber()); return new SlimeJsonResponse(toSlime(result)); } private HttpResponse deleteTenant(String tenantName, HttpRequest request) { boolean forget = request.getBooleanProperty("forget"); if (forget && !isOperator(request)) return ErrorResponse.forbidden("Only operators can forget a tenant"); controller.tenants().delete(TenantName.from(tenantName), Optional.of(accessControlRequests.credentials(TenantName.from(tenantName), toSlime(request.getData()).get(), request.getJDiscRequest())), forget); return new MessageResponse("Deleted tenant " + tenantName); } private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) { TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest()); controller.applications().deleteApplication(id, credentials); return new MessageResponse("Deleted application " + id); } private HttpResponse deleteInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) { TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); controller.applications().deleteInstance(id.instance(instanceName)); if (controller.applications().requireApplication(id).instances().isEmpty()) { Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest()); controller.applications().deleteApplication(id, credentials); } return new MessageResponse("Deleted instance " + id.instance(instanceName).toFullString()); } private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId id = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); controller.applications().deactivate(id.applicationId(), id.zoneId()); controller.jobController().last(id.applicationId(), JobType.deploymentTo(id.zoneId())) .filter(run -> ! run.hasEnded()) .ifPresent(last -> controller.jobController().abort(last.id(), "deployment deactivated by " + request.getJDiscRequest().getUserPrincipal().getName())); return new MessageResponse("Deactivated " + id); } /** Returns test config for indicated job, with production deployments of the default instance if the given is not in deployment spec. */ private HttpResponse testConfig(ApplicationId id, JobType type) { Application application = controller.applications().requireApplication(TenantAndApplicationId.from(id)); ApplicationId prodInstanceId = application.deploymentSpec().instance(id.instance()).isPresent() ? id : TenantAndApplicationId.from(id).defaultInstance(); HashSet<DeploymentId> deployments = controller.applications() .getInstance(prodInstanceId).stream() .flatMap(instance -> instance.productionDeployments().keySet().stream()) .map(zone -> new DeploymentId(prodInstanceId, zone)) .collect(Collectors.toCollection(HashSet::new)); ApplicationId toTest = type.isProduction() ? prodInstanceId : id; if ( ! type.isProduction()) deployments.add(new DeploymentId(toTest, type.zone())); Deployment deployment = application.require(toTest.instance()).deployments().get(type.zone()); if (deployment == null) throw new NotExistsException(toTest + " is not deployed in " + type.zone()); return new SlimeJsonResponse(testConfigSerializer.configSlime(id, type, false, deployment.version(), deployment.revision(), deployment.at(), controller.routing().readTestRunnerEndpointsOf(deployments), controller.applications().reachableContentClustersByZone(deployments))); } private HttpResponse requestServiceDump(String tenant, String application, String instance, String environment, String region, String hostname, HttpRequest request) { NodeRepository nodeRepository = controller.serviceRegistry().configServer().nodeRepository(); ZoneId zone = requireZone(environment, region); Slime report = getReport(nodeRepository, zone, tenant, application, instance, hostname).orElse(null); if (report != null) { Cursor cursor = report.get(); boolean force = request.getBooleanProperty("force"); if (!force && cursor.field("failedAt").asLong() == 0 && cursor.field("completedAt").asLong() == 0) { throw new IllegalArgumentException("Service dump already in progress for " + cursor.field("configId").asString()); } } Slime requestPayload; try { requestPayload = SlimeUtils.jsonToSlimeOrThrow(request.getData().readAllBytes()); } catch (Exception e) { throw new IllegalArgumentException("Missing or invalid JSON in request content", e); } Cursor requestPayloadCursor = requestPayload.get(); String configId = requestPayloadCursor.field("configId").asString(); long expiresAt = requestPayloadCursor.field("expiresAt").asLong(); if (configId.isEmpty()) { throw new IllegalArgumentException("Missing configId"); } Cursor artifactsCursor = requestPayloadCursor.field("artifacts"); int artifactEntries = artifactsCursor.entries(); if (artifactEntries == 0) { throw new IllegalArgumentException("Missing or empty 'artifacts'"); } Slime dumpRequest = new Slime(); Cursor dumpRequestCursor = dumpRequest.setObject(); dumpRequestCursor.setLong("createdMillis", controller.clock().millis()); dumpRequestCursor.setString("configId", configId); Cursor dumpRequestArtifactsCursor = dumpRequestCursor.setArray("artifacts"); for (int i = 0; i < artifactEntries; i++) { dumpRequestArtifactsCursor.addString(artifactsCursor.entry(i).asString()); } if (expiresAt > 0) { dumpRequestCursor.setLong("expiresAt", expiresAt); } Cursor dumpOptionsCursor = requestPayloadCursor.field("dumpOptions"); if (dumpOptionsCursor.children() > 0) { SlimeUtils.copyObject(dumpOptionsCursor, dumpRequestCursor.setObject("dumpOptions")); } var reportsUpdate = Map.of("serviceDump", new String(uncheck(() -> SlimeUtils.toJsonBytes(dumpRequest)))); nodeRepository.updateReports(zone, hostname, reportsUpdate); boolean wait = request.getBooleanProperty("wait"); if (!wait) return new MessageResponse("Request created"); return waitForServiceDumpResult(nodeRepository, zone, tenant, application, instance, hostname); } private HttpResponse getServiceDump(String tenant, String application, String instance, String environment, String region, String hostname, HttpRequest request) { NodeRepository nodeRepository = controller.serviceRegistry().configServer().nodeRepository(); ZoneId zone = requireZone(environment, region); Slime report = getReport(nodeRepository, zone, tenant, application, instance, hostname) .orElseThrow(() -> new NotExistsException("No service dump for node " + hostname)); return new SlimeJsonResponse(report); } private HttpResponse waitForServiceDumpResult(NodeRepository nodeRepository, ZoneId zone, String tenant, String application, String instance, String hostname) { int pollInterval = 2; Slime report; while (true) { report = getReport(nodeRepository, zone, tenant, application, instance, hostname).get(); Cursor cursor = report.get(); if (cursor.field("completedAt").asLong() > 0 || cursor.field("failedAt").asLong() > 0) { break; } final Slime copyForLambda = report; log.fine(() -> uncheck(() -> new String(SlimeUtils.toJsonBytes(copyForLambda)))); log.fine("Sleeping " + pollInterval + " seconds before checking report status again"); controller.sleeper().sleep(Duration.ofSeconds(pollInterval)); } return new SlimeJsonResponse(report); } private Optional<Slime> getReport(NodeRepository nodeRepository, ZoneId zone, String tenant, String application, String instance, String hostname) { Node node; try { node = nodeRepository.getNode(zone, hostname); } catch (IllegalArgumentException e) { throw new NotExistsException(hostname); } ApplicationId app = ApplicationId.from(tenant, application, instance); ApplicationId owner = node.owner().orElseThrow(() -> new IllegalArgumentException("Node has no owner")); if (!app.equals(owner)) { throw new IllegalArgumentException("Node is not owned by " + app.toFullString()); } String json = node.reports().get("serviceDump"); if (json == null) return Optional.empty(); return Optional.of(SlimeUtils.jsonToSlimeOrThrow(json)); } private static SourceRevision toSourceRevision(Inspector object) { if (!object.field("repository").valid() || !object.field("branch").valid() || !object.field("commit").valid()) { throw new IllegalArgumentException("Must specify \"repository\", \"branch\", and \"commit\"."); } return new SourceRevision(object.field("repository").asString(), object.field("branch").asString(), object.field("commit").asString()); } private Tenant getTenantOrThrow(String tenantName) { return controller.tenants().get(tenantName) .orElseThrow(() -> new NotExistsException(new TenantId(tenantName))); } private void toSlime(Cursor object, Tenant tenant, List<Application> applications, HttpRequest request) { object.setString("tenant", tenant.name().value()); object.setString("type", tenantType(tenant)); switch (tenant.type()) { case athenz: AthenzTenant athenzTenant = (AthenzTenant) tenant; object.setString("athensDomain", athenzTenant.domain().getName()); object.setString("property", athenzTenant.property().id()); athenzTenant.propertyId().ifPresent(id -> object.setString("propertyId", id.toString())); athenzTenant.contact().ifPresent(c -> { object.setString("propertyUrl", c.propertyUrl().toString()); object.setString("contactsUrl", c.url().toString()); object.setString("issueCreationUrl", c.issueTrackerUrl().toString()); Cursor contactsArray = object.setArray("contacts"); c.persons().forEach(persons -> { Cursor personArray = contactsArray.addArray(); persons.forEach(personArray::addString); }); }); break; case cloud: { CloudTenant cloudTenant = (CloudTenant) tenant; cloudTenant.creator().ifPresent(creator -> object.setString("creator", creator.getName())); Cursor pemDeveloperKeysArray = object.setArray("pemDeveloperKeys"); cloudTenant.developerKeys().forEach((key, user) -> { Cursor keyObject = pemDeveloperKeysArray.addObject(); keyObject.setString("key", KeyUtils.toPem(key)); keyObject.setString("user", user.getName()); }); toSlime(object, cloudTenant.tenantSecretStores()); toSlime(object.setObject("integrations").setObject("aws"), controller.serviceRegistry().roleService().getTenantRole(tenant.name()), cloudTenant.tenantSecretStores()); try { var usedQuota = applications.stream() .map(Application::quotaUsage) .reduce(QuotaUsage.none, QuotaUsage::add); toSlime(object.setObject("quota"), usedQuota); } catch (Exception e) { log.warning(String.format("Failed to get quota for tenant %s: %s", tenant.name(), Exceptions.toMessageString(e))); } cloudTenant.archiveAccess().awsRole().ifPresent(role -> object.setString("archiveAccessRole", role)); toSlime(cloudTenant.archiveAccess(), object.setObject("archiveAccess")); break; } case deleted: break; default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'."); } Cursor applicationArray = object.setArray("applications"); for (Application application : applications) { DeploymentStatus status = null; Collection<Instance> instances = showOnlyProductionInstances(request) ? application.productionInstances().values() : application.instances().values(); if (instances.isEmpty() && !showOnlyActiveInstances(request)) toSlime(application.id(), applicationArray.addObject(), request); for (Instance instance : instances) { if (showOnlyActiveInstances(request) && instance.deployments().isEmpty()) continue; if (recurseOverApplications(request)) { if (status == null) status = controller.jobController().deploymentStatus(application); toSlime(applicationArray.addObject(), instance, status, request); } else { toSlime(instance.id(), applicationArray.addObject(), request); } } } tenantMetaDataToSlime(tenant, applications, object.setObject("metaData")); } private void toSlime(ArchiveAccess archiveAccess, Cursor object) { archiveAccess.awsRole().ifPresent(role -> object.setString("awsRole", role)); archiveAccess.gcpMember().ifPresent(member -> object.setString("gcpMember", member)); } private void toSlime(Cursor object, QuotaUsage usage) { object.setDouble("budgetUsed", usage.rate()); } private void toSlime(ClusterResources resources, Cursor object) { object.setLong("nodes", resources.nodes()); object.setLong("groups", resources.groups()); toSlime(resources.nodeResources(), object.setObject("nodeResources")); double cost = ResourceMeterMaintainer.cost(resources, controller.serviceRegistry().zoneRegistry().system()); object.setDouble("cost", cost); } private void utilizationToSlime(Cluster.Utilization utilization, Cursor utilizationObject) { utilizationObject.setDouble("cpu", utilization.cpu()); utilizationObject.setDouble("idealCpu", utilization.idealCpu()); utilizationObject.setDouble("currentCpu", utilization.currentCpu()); utilizationObject.setDouble("peakCpu", utilization.peakCpu()); utilizationObject.setDouble("memory", utilization.memory()); utilizationObject.setDouble("idealMemory", utilization.idealMemory()); utilizationObject.setDouble("currentMemory", utilization.currentMemory()); utilizationObject.setDouble("peakMemory", utilization.peakMemory()); utilizationObject.setDouble("disk", utilization.disk()); utilizationObject.setDouble("idealDisk", utilization.idealDisk()); utilizationObject.setDouble("currentDisk", utilization.currentDisk()); utilizationObject.setDouble("peakDisk", utilization.peakDisk()); } private void scalingEventsToSlime(List<Cluster.ScalingEvent> scalingEvents, Cursor scalingEventsArray) { for (Cluster.ScalingEvent scalingEvent : scalingEvents) { Cursor scalingEventObject = scalingEventsArray.addObject(); toSlime(scalingEvent.from(), scalingEventObject.setObject("from")); toSlime(scalingEvent.to(), scalingEventObject.setObject("to")); scalingEventObject.setLong("at", scalingEvent.at().toEpochMilli()); scalingEvent.completion().ifPresent(completion -> scalingEventObject.setLong("completion", completion.toEpochMilli())); } } private void toSlime(NodeResources resources, Cursor object) { object.setDouble("vcpu", resources.vcpu()); object.setDouble("memoryGb", resources.memoryGb()); object.setDouble("diskGb", resources.diskGb()); object.setDouble("bandwidthGbps", resources.bandwidthGbps()); object.setString("diskSpeed", valueOf(resources.diskSpeed())); object.setString("storageType", valueOf(resources.storageType())); } private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) { object.setString("tenant", tenant.name().value()); Cursor metaData = object.setObject("metaData"); metaData.setString("type", tenantType(tenant)); switch (tenant.type()) { case athenz: AthenzTenant athenzTenant = (AthenzTenant) tenant; metaData.setString("athensDomain", athenzTenant.domain().getName()); metaData.setString("property", athenzTenant.property().id()); break; case cloud: break; case deleted: break; default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'."); } object.setString("url", withPath("/application/v4/tenant/" + tenant.name().value(), requestURI).toString()); } private void tenantMetaDataToSlime(Tenant tenant, List<Application> applications, Cursor object) { Optional<Instant> lastDev = applications.stream() .flatMap(application -> application.instances().values().stream()) .flatMap(instance -> instance.deployments().values().stream() .filter(deployment -> deployment.zone().environment() == Environment.dev) .map(deployment -> controller.jobController().lastDeploymentStart(instance.id(), deployment))) .max(Comparator.naturalOrder()) .or(() -> applications.stream() .flatMap(application -> application.instances().values().stream()) .flatMap(instance -> JobType.allIn(controller.zoneRegistry()).stream() .filter(job -> job.environment() == Environment.dev) .flatMap(jobType -> controller.jobController().last(instance.id(), jobType).stream())) .map(Run::start) .max(Comparator.naturalOrder())); Optional<Instant> lastSubmission = applications.stream() .flatMap(app -> app.revisions().last().flatMap(ApplicationVersion::buildTime).stream()) .max(Comparator.naturalOrder()); object.setLong("createdAtMillis", tenant.createdAt().toEpochMilli()); if (tenant.type() == Tenant.Type.deleted) object.setLong("deletedAtMillis", ((DeletedTenant) tenant).deletedAt().toEpochMilli()); lastDev.ifPresent(instant -> object.setLong("lastDeploymentToDevMillis", instant.toEpochMilli())); lastSubmission.ifPresent(instant -> object.setLong("lastSubmissionToProdMillis", instant.toEpochMilli())); tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.user) .ifPresent(instant -> object.setLong("lastLoginByUserMillis", instant.toEpochMilli())); tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.developer) .ifPresent(instant -> object.setLong("lastLoginByDeveloperMillis", instant.toEpochMilli())); tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.administrator) .ifPresent(instant -> object.setLong("lastLoginByAdministratorMillis", instant.toEpochMilli())); } /** Returns a copy of the given URI with the host and port from the given URI, the path set to the given path and the query set to given query*/ private URI withPathAndQuery(String newPath, String newQuery, URI uri) { try { return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, newQuery, null); } catch (URISyntaxException e) { throw new RuntimeException("Will not happen", e); } } /** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */ private URI withPath(String newPath, URI uri) { return withPathAndQuery(newPath, null, uri); } private String toPath(DeploymentId id) { return path("/application", "v4", "tenant", id.applicationId().tenant(), "application", id.applicationId().application(), "instance", id.applicationId().instance(), "environment", id.zoneId().environment(), "region", id.zoneId().region()); } private long asLong(String valueOrNull, long defaultWhenNull) { if (valueOrNull == null) return defaultWhenNull; try { return Long.parseLong(valueOrNull); } catch (NumberFormatException e) { throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'"); } } private Slime toSlime(InputStream jsonStream) { try { byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000); return SlimeUtils.jsonToSlime(jsonBytes); } catch (IOException e) { throw new RuntimeException(); } } private static Principal requireUserPrincipal(HttpRequest request) { Principal principal = request.getJDiscRequest().getUserPrincipal(); if (principal == null) throw new RestApiException.InternalServerError("Expected a user principal"); return principal; } private Inspector mandatory(String key, Inspector object) { if ( ! object.field(key).valid()) throw new IllegalArgumentException("'" + key + "' is missing"); return object.field(key); } private Optional<String> optional(String key, Inspector object) { return SlimeUtils.optionalString(object.field(key)); } private static String path(Object... elements) { return Joiner.on("/").join(elements); } private void toSlime(TenantAndApplicationId id, Cursor object, HttpRequest request) { object.setString("tenant", id.tenant().value()); object.setString("application", id.application().value()); object.setString("url", withPath("/application/v4" + "/tenant/" + id.tenant().value() + "/application/" + id.application().value(), request.getUri()).toString()); } private void toSlime(ApplicationId id, Cursor object, HttpRequest request) { object.setString("tenant", id.tenant().value()); object.setString("application", id.application().value()); object.setString("instance", id.instance().value()); object.setString("url", withPath("/application/v4" + "/tenant/" + id.tenant().value() + "/application/" + id.application().value() + "/instance/" + id.instance().value(), request.getUri()).toString()); } private Slime toSlime(ActivateResult result) { Slime slime = new Slime(); Cursor object = slime.setObject(); object.setString("revisionId", result.revisionId().id()); object.setLong("applicationZipSize", result.applicationZipSizeBytes()); Cursor logArray = object.setArray("prepareMessages"); if (result.prepareResponse().log != null) { for (Log logMessage : result.prepareResponse().log) { Cursor logObject = logArray.addObject(); logObject.setLong("time", logMessage.time); logObject.setString("level", logMessage.level); logObject.setString("message", logMessage.message); } } Cursor changeObject = object.setObject("configChangeActions"); Cursor restartActionsArray = changeObject.setArray("restart"); for (RestartAction restartAction : result.prepareResponse().configChangeActions.restartActions) { Cursor restartActionObject = restartActionsArray.addObject(); restartActionObject.setString("clusterName", restartAction.clusterName); restartActionObject.setString("clusterType", restartAction.clusterType); restartActionObject.setString("serviceType", restartAction.serviceType); serviceInfosToSlime(restartAction.services, restartActionObject.setArray("services")); stringsToSlime(restartAction.messages, restartActionObject.setArray("messages")); } Cursor refeedActionsArray = changeObject.setArray("refeed"); for (RefeedAction refeedAction : result.prepareResponse().configChangeActions.refeedActions) { Cursor refeedActionObject = refeedActionsArray.addObject(); refeedActionObject.setString("name", refeedAction.name); refeedActionObject.setString("documentType", refeedAction.documentType); refeedActionObject.setString("clusterName", refeedAction.clusterName); serviceInfosToSlime(refeedAction.services, refeedActionObject.setArray("services")); stringsToSlime(refeedAction.messages, refeedActionObject.setArray("messages")); } return slime; } private void serviceInfosToSlime(List<ServiceInfo> serviceInfoList, Cursor array) { for (ServiceInfo serviceInfo : serviceInfoList) { Cursor serviceInfoObject = array.addObject(); serviceInfoObject.setString("serviceName", serviceInfo.serviceName); serviceInfoObject.setString("serviceType", serviceInfo.serviceType); serviceInfoObject.setString("configId", serviceInfo.configId); serviceInfoObject.setString("hostName", serviceInfo.hostName); } } private void stringsToSlime(List<String> strings, Cursor array) { for (String string : strings) array.addString(string); } private void toSlime(Cursor object, List<TenantSecretStore> tenantSecretStores) { Cursor secretStore = object.setArray("secretStores"); tenantSecretStores.forEach(store -> { toSlime(secretStore.addObject(), store); }); } private void toSlime(Cursor object, TenantRoles tenantRoles, List<TenantSecretStore> tenantSecretStores) { object.setString("tenantRole", tenantRoles.containerRole()); var stores = object.setArray("accounts"); tenantSecretStores.forEach(secretStore -> { toSlime(stores.addObject(), secretStore); }); } private void toSlime(Cursor object, TenantSecretStore secretStore) { object.setString("name", secretStore.getName()); object.setString("awsId", secretStore.getAwsId()); object.setString("role", secretStore.getRole()); } private String readToString(InputStream stream) { Scanner scanner = new Scanner(stream).useDelimiter("\\A"); if ( ! scanner.hasNext()) return null; return scanner.next(); } private static boolean recurseOverTenants(HttpRequest request) { return recurseOverApplications(request) || "tenant".equals(request.getProperty("recursive")); } private static boolean recurseOverApplications(HttpRequest request) { return recurseOverDeployments(request) || "application".equals(request.getProperty("recursive")); } private static boolean recurseOverDeployments(HttpRequest request) { return ImmutableSet.of("all", "true", "deployment").contains(request.getProperty("recursive")); } private static boolean showOnlyProductionInstances(HttpRequest request) { return "true".equals(request.getProperty("production")); } private static boolean showOnlyActiveInstances(HttpRequest request) { return "true".equals(request.getProperty("activeInstances")); } private static boolean includeDeleted(HttpRequest request) { return "true".equals(request.getProperty("includeDeleted")); } private static String tenantType(Tenant tenant) { return switch (tenant.type()) { case athenz: yield "ATHENS"; case cloud: yield "CLOUD"; case deleted: yield "DELETED"; }; } private static ApplicationId appIdFromPath(Path path) { return ApplicationId.from(path.get("tenant"), path.get("application"), path.get("instance")); } private JobType jobTypeFromPath(Path path) { return JobType.fromJobName(path.get("jobtype"), controller.zoneRegistry()); } private RunId runIdFromPath(Path path) { long number = Long.parseLong(path.get("number")); return new RunId(appIdFromPath(path), jobTypeFromPath(path), number); } private HttpResponse submit(String tenant, String application, HttpRequest request) { Map<String, byte[]> dataParts = parseDataParts(request); Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get(); long projectId = submitOptions.field("projectId").asLong(); projectId = projectId == 0 ? 1 : projectId; Optional<String> repository = optional("repository", submitOptions); Optional<String> branch = optional("branch", submitOptions); Optional<String> commit = optional("commit", submitOptions); Optional<SourceRevision> sourceRevision = repository.isPresent() && branch.isPresent() && commit.isPresent() ? Optional.of(new SourceRevision(repository.get(), branch.get(), commit.get())) : Optional.empty(); Optional<String> sourceUrl = optional("sourceUrl", submitOptions); Optional<String> authorEmail = optional("authorEmail", submitOptions); Optional<String> description = optional("description", submitOptions); int risk = (int) submitOptions.field("risk").asLong(); sourceUrl.map(URI::create).ifPresent(url -> { if (url.getHost() == null || url.getScheme() == null) throw new IllegalArgumentException("Source URL must include scheme and host"); }); ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP), true); byte[] testPackage = dataParts.get(EnvironmentResource.APPLICATION_TEST_ZIP); Submission submission = new Submission(applicationPackage, testPackage, sourceUrl, sourceRevision, authorEmail, description, risk); controller.applications().verifyApplicationIdentityConfiguration(TenantName.from(tenant), Optional.empty(), Optional.empty(), applicationPackage, Optional.of(requireUserPrincipal(request))); TenantAndApplicationId id = TenantAndApplicationId.from(tenant, application); ensureApplicationExists(id, request); return JobControllerApiHandlerHelper.submitResponse(controller.jobController(), id, submission, projectId); } private HttpResponse removeAllProdDeployments(String tenant, String application) { JobControllerApiHandlerHelper.submitResponse(controller.jobController(), TenantAndApplicationId.from(tenant, application), new Submission(ApplicationPackage.deploymentRemoval(), new byte[0], Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), 0), 0); return new MessageResponse("All deployments removed"); } private ZoneId requireZone(String environment, String region) { return requireZone(ZoneId.from(environment, region)); } private ZoneId requireZone(ZoneId zone) { if (zone.environment() == Environment.prod && zone.region().value().equals("controller")) { return zone; } if (!controller.zoneRegistry().hasZone(zone)) { throw new IllegalArgumentException("Zone " + zone + " does not exist in this system"); } return zone; } private static Map<String, byte[]> parseDataParts(HttpRequest request) { String contentHash = request.getHeader("X-Content-Hash"); if (contentHash == null) return new MultipartParser().parse(request); DigestInputStream digester = Signatures.sha256Digester(request.getData()); var dataParts = new MultipartParser().parse(request.getHeader("Content-Type"), digester, request.getUri()); if ( ! Arrays.equals(digester.getMessageDigest().digest(), Base64.getDecoder().decode(contentHash))) throw new IllegalArgumentException("Value of X-Content-Hash header does not match computed content hash"); return dataParts; } private static RotationId findRotationId(Instance instance, Optional<String> endpointId) { if (instance.rotations().isEmpty()) { throw new NotExistsException("global rotation does not exist for " + instance); } if (endpointId.isPresent()) { return instance.rotations().stream() .filter(r -> r.endpointId().id().equals(endpointId.get())) .map(AssignedRotation::rotationId) .findFirst() .orElseThrow(() -> new NotExistsException("endpoint " + endpointId.get() + " does not exist for " + instance)); } else if (instance.rotations().size() > 1) { throw new IllegalArgumentException(instance + " has multiple rotations. Query parameter 'endpointId' must be given"); } return instance.rotations().get(0).rotationId(); } private static String rotationStateString(RotationState state) { return switch (state) { case in: yield "IN"; case out: yield "OUT"; case unknown: yield "UNKNOWN"; }; } private static String endpointScopeString(Endpoint.Scope scope) { return switch (scope) { case weighted: yield "weighted"; case application: yield "application"; case global: yield "global"; case zone: yield "zone"; }; } private static String routingMethodString(RoutingMethod method) { return switch (method) { case exclusive: yield "exclusive"; case sharedLayer4: yield "sharedLayer4"; }; } private static <T> T getAttribute(HttpRequest request, String attributeName, Class<T> cls) { return Optional.ofNullable(request.getJDiscRequest().context().get(attributeName)) .filter(cls::isInstance) .map(cls::cast) .orElseThrow(() -> new IllegalArgumentException("Attribute '" + attributeName + "' was not set on request")); } /** Returns whether given request is by an operator */ private static boolean isOperator(HttpRequest request) { var securityContext = getAttribute(request, SecurityContext.ATTRIBUTE_NAME, SecurityContext.class); return securityContext.roles().stream() .map(Role::definition) .anyMatch(definition -> definition == RoleDefinition.hostedOperator); } private void ensureApplicationExists(TenantAndApplicationId id, HttpRequest request) { if (controller.applications().getApplication(id).isEmpty()) { log.fine("Application does not exist in public, creating: " + id); var credentials = accessControlRequests.credentials(id.tenant(), null /* not used on public */ , request.getJDiscRequest()); controller.applications().createApplication(id, credentials); } } private List<Deployment> sortedDeployments(Collection<Deployment> deployments, DeploymentInstanceSpec spec) { List<ZoneId> productionZones = spec.zones().stream() .filter(z -> z.region().isPresent()) .map(z -> ZoneId.from(z.environment(), z.region().get())) .toList(); return deployments.stream() .sorted(comparingInt(deployment -> productionZones.indexOf(deployment.zone()))) .collect(collectingAndThen(Collectors.toList(), Collections::unmodifiableList)); } }
class ApplicationApiHandler extends AuditLoggingRequestHandler { private static final ObjectMapper jsonMapper = new ObjectMapper(); private final Controller controller; private final AccessControlRequests accessControlRequests; private final TestConfigSerializer testConfigSerializer; @Inject public ApplicationApiHandler(ThreadedHttpRequestHandler.Context parentCtx, Controller controller, AccessControlRequests accessControlRequests) { super(parentCtx, controller.auditLogger()); this.controller = controller; this.accessControlRequests = accessControlRequests; this.testConfigSerializer = new TestConfigSerializer(controller.system()); } @Override public Duration getTimeout() { return Duration.ofMinutes(20); } @Override private HttpResponse handleGET(Path path, HttpRequest request) { if (path.matches("/application/v4/")) return root(request); if (path.matches("/application/v4/notifications")) return notifications(request, Optional.ofNullable(request.getProperty("tenant")), true); if (path.matches("/application/v4/tenant")) return tenants(request); if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/request/operator")) return accessRequests(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/info")) return tenantInfo(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/info/profile")) return withCloudTenant(path.get("tenant"), this::tenantInfoProfile); if (path.matches("/application/v4/tenant/{tenant}/info/billing")) return withCloudTenant(path.get("tenant"), this::tenantInfoBilling); if (path.matches("/application/v4/tenant/{tenant}/info/contacts")) return withCloudTenant(path.get("tenant"), this::tenantInfoContacts); if (path.matches("/application/v4/tenant/{tenant}/notifications")) return notifications(request, Optional.of(path.get("tenant")), false); if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}/validate")) return validateSecretStore(path.get("tenant"), path.get("name"), request); if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), Optional.empty(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/compile-version")) return compileVersion(path.get("tenant"), path.get("application"), request.getProperty("allowMajor")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return JobControllerApiHandlerHelper.overviewResponse(controller, TenantAndApplicationId.from(path.get("tenant"), path.get("application")), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/package")) return applicationPackage(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/diff/{number}")) return applicationPackageDiff(path.get("tenant"), path.get("application"), path.get("number")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploying(path.get("tenant"), path.get("application"), "default", request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), "default", request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance")) return applications(path.get("tenant"), Optional.of(path.get("application")), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return instance(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.applications().requireApplication(TenantAndApplicationId.from(path.get("tenant"), path.get("application"))), controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)).descendingMap(), Optional.ofNullable(request.getProperty("limit")), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/package")) return devApplicationPackage(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/diff/{number}")) return devApplicationPackageDiff(runIdFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/test-config")) return testConfig(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}/logs")) return JobControllerApiHandlerHelper.vespaLogsResponse(controller.jobController(), runIdFromPath(path), asLong(request.getProperty("from"), 0), request.getBooleanProperty("tester")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return getReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{host}/status/{*}")) return status(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{host}/state/v1/{*}")) return stateV1(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/orchestrator")) return orchestrator(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/content/{*}")) return content(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return supportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/node/{node}/service-dump")) return getServiceDump(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("node"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/scaling")) return scaling(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/metrics")) return metrics(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId"))); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{host}/status/{*}")) return status(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId"))); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePUT(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/request/operator")) return requestSshAccess(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/approve/operator")) return approveAccessRequest(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/managed/operator")) return addManagedAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/info")) return updateTenantInfo(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/info/profile")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoProfile); if (path.matches("/application/v4/tenant/{tenant}/info/billing")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoBilling); if (path.matches("/application/v4/tenant/{tenant}/info/contacts")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoContacts); if (path.matches("/application/v4/tenant/{tenant}/archive-access")) return allowAwsArchiveAccess(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/archive-access/aws")) return allowAwsArchiveAccess(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/archive-access/gcp")) return allowGcpArchiveAccess(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return addSecretStore(path.get("tenant"), path.get("name"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePOST(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/key")) return addDeveloperKey(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), "default", false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), "default", true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), "default", request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return addDeployKey(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return submit(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return createInstance(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploy/{jobtype}")) return jobDeploy(appIdFromPath(path), jobTypeFromPath(path), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/submit")) return submit(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return trigger(appIdFromPath(path), jobTypeFromPath(path), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return pause(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindex")) return reindex(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return enableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return allowSupportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/node/{node}/service-dump")) return requestServiceDump(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("node"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePATCH(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return patchApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return patchApplication(path.get("tenant"), path.get("application"), request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleDELETE(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/managed/operator")) return removeManagedAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/key")) return removeDeveloperKey(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/archive-access")) return removeAwsArchiveAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/archive-access/aws")) return removeAwsArchiveAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/archive-access/gcp")) return removeGcpArchiveAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return deleteSecretStore(path.get("tenant"), path.get("name"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return removeAllProdDeployments(path.get("tenant"), path.get("application")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", "all"); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", path.get("choice")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return removeDeployKey(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit/{build}")) return cancelBuild(path.get("tenant"), path.get("application"), path.get("build")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return deleteInstance(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), "all"); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("choice")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.abortJobResponse(controller.jobController(), request, appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return resume(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return disableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return disallowSupportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleOPTIONS() { EmptyResponse response = new EmptyResponse(); response.headers().put("Allow", "GET,PUT,POST,PATCH,DELETE,OPTIONS"); return response; } private HttpResponse recursiveRoot(HttpRequest request) { Slime slime = new Slime(); Cursor tenantArray = slime.setArray(); List<Application> applications = controller.applications().asList(); for (Tenant tenant : controller.tenants().asList(includeDeleted(request))) toSlime(tenantArray.addObject(), tenant, applications.stream().filter(app -> app.id().tenant().equals(tenant.name())).collect(toList()), request); return new SlimeJsonResponse(slime); } private HttpResponse root(HttpRequest request) { return recurseOverTenants(request) ? recursiveRoot(request) : new ResourceResponse(request, "tenant"); } private HttpResponse tenants(HttpRequest request) { Slime slime = new Slime(); Cursor response = slime.setArray(); for (Tenant tenant : controller.tenants().asList(includeDeleted(request))) tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject()); return new SlimeJsonResponse(slime); } private HttpResponse tenant(String tenantName, HttpRequest request) { return controller.tenants().get(TenantName.from(tenantName), includeDeleted(request)) .map(tenant -> tenant(tenant, request)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist")); } private HttpResponse tenant(Tenant tenant, HttpRequest request) { Slime slime = new Slime(); toSlime(slime.setObject(), tenant, controller.applications().asList(tenant.name()), request); return new SlimeJsonResponse(slime); } private HttpResponse accessRequests(String tenantName, HttpRequest request) { var tenant = TenantName.from(tenantName); if (controller.tenants().require(tenant).type() != Tenant.Type.cloud) return ErrorResponse.badRequest("Can only see access requests for cloud tenants"); var accessControlService = controller.serviceRegistry().accessControlService(); var accessRoleInformation = accessControlService.getAccessRoleInformation(tenant); var managedAccess = accessControlService.getManagedAccess(tenant); var slime = new Slime(); var cursor = slime.setObject(); cursor.setBool("managedAccess", managedAccess); accessRoleInformation.getPendingRequest() .ifPresent(membershipRequest -> { var requestCursor = cursor.setObject("pendingRequest"); requestCursor.setString("requestTime", membershipRequest.getCreationTime()); requestCursor.setString("reason", membershipRequest.getReason()); }); var auditLogCursor = cursor.setArray("auditLog"); accessRoleInformation.getAuditLog() .forEach(auditLogEntry -> { var entryCursor = auditLogCursor.addObject(); entryCursor.setString("created", auditLogEntry.getCreationTime()); entryCursor.setString("approver", auditLogEntry.getApprover()); entryCursor.setString("reason", auditLogEntry.getReason()); entryCursor.setString("status", auditLogEntry.getAction()); }); return new SlimeJsonResponse(slime); } private HttpResponse requestSshAccess(String tenantName, HttpRequest request) { if (!isOperator(request)) { return ErrorResponse.forbidden("Only operators are allowed to request ssh access"); } if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) return ErrorResponse.badRequest("Can only request access for cloud tenants"); controller.serviceRegistry().accessControlService().requestSshAccess(TenantName.from(tenantName)); return new MessageResponse("OK"); } private HttpResponse approveAccessRequest(String tenantName, HttpRequest request) { var tenant = TenantName.from(tenantName); if (controller.tenants().require(tenant).type() != Tenant.Type.cloud) return ErrorResponse.badRequest("Can only see access requests for cloud tenants"); var inspector = toSlime(request.getData()).get(); var expiry = inspector.field("expiry").valid() ? Instant.ofEpochMilli(inspector.field("expiry").asLong()) : Instant.now().plus(1, ChronoUnit.DAYS); var approve = inspector.field("approve").asBool(); controller.serviceRegistry().accessControlService().decideSshAccess(tenant, expiry, OAuthCredentials.fromAuth0RequestContext(request.getJDiscRequest().context()), approve); return new MessageResponse("OK"); } private HttpResponse addManagedAccess(String tenantName) { return setManagedAccess(tenantName, true); } private HttpResponse removeManagedAccess(String tenantName) { return setManagedAccess(tenantName, false); } private HttpResponse setManagedAccess(String tenantName, boolean managedAccess) { var tenant = TenantName.from(tenantName); if (controller.tenants().require(tenant).type() != Tenant.Type.cloud) return ErrorResponse.badRequest("Can only set access privel for cloud tenants"); controller.serviceRegistry().accessControlService().setManagedAccess(tenant, managedAccess); var slime = new Slime(); slime.setObject().setBool("managedAccess", managedAccess); return new SlimeJsonResponse(slime); } private HttpResponse tenantInfo(String tenantName, HttpRequest request) { return controller.tenants().get(TenantName.from(tenantName)) .filter(tenant -> tenant.type() == Tenant.Type.cloud) .map(tenant -> tenantInfo(((CloudTenant)tenant).info(), request)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private HttpResponse withCloudTenant(String tenantName, Function<CloudTenant, SlimeJsonResponse> handler) { return controller.tenants().get(TenantName.from(tenantName)) .filter(tenant -> tenant.type() == Tenant.Type.cloud) .map(tenant -> handler.apply((CloudTenant) tenant)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private SlimeJsonResponse tenantInfo(TenantInfo info, HttpRequest request) { Slime slime = new Slime(); Cursor infoCursor = slime.setObject(); if (!info.isEmpty()) { infoCursor.setString("name", info.name()); infoCursor.setString("email", info.email()); infoCursor.setString("website", info.website()); infoCursor.setString("contactName", info.contact().name()); infoCursor.setString("contactEmail", info.contact().email()); toSlime(info.address(), infoCursor); toSlime(info.billingContact(), infoCursor); toSlime(info.contacts(), infoCursor); } return new SlimeJsonResponse(slime); } private SlimeJsonResponse tenantInfoProfile(CloudTenant cloudTenant) { var slime = new Slime(); var root = slime.setObject(); var info = cloudTenant.info(); if (!info.isEmpty()) { var contact = root.setObject("contact"); contact.setString("name", info.contact().name()); contact.setString("email", info.contact().email()); var tenant = root.setObject("tenant"); tenant.setString("company", info.name()); tenant.setString("website", info.website()); toSlime(info.address(), root); } return new SlimeJsonResponse(slime); } private SlimeJsonResponse withCloudTenant(String tenantName, HttpRequest request, BiFunction<CloudTenant, Inspector, SlimeJsonResponse> handler) { return controller.tenants().get(tenantName) .map(tenant -> handler.apply((CloudTenant) tenant, toSlime(request.getData()).get())) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private SlimeJsonResponse putTenantInfoProfile(CloudTenant cloudTenant, Inspector inspector) { var info = cloudTenant.info(); var mergedContact = TenantContact.empty() .withName(getString(inspector.field("contact").field("name"), info.contact().name())) .withEmail(getString(inspector.field("contact").field("email"), info.contact().email())); var mergedAddress = updateTenantInfoAddress(inspector.field("address"), info.address()); var mergedInfo = info .withName(getString(inspector.field("tenant").field("name"), info.name())) .withWebsite(getString(inspector.field("tenant").field("website"), info.website())) .withContact(mergedContact) .withAddress(mergedAddress); validateMergedTenantInfo(mergedInfo); controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private SlimeJsonResponse tenantInfoBilling(CloudTenant cloudTenant) { var slime = new Slime(); var root = slime.setObject(); var info = cloudTenant.info(); if (!info.isEmpty()) { var billingContact = info.billingContact(); var contact = root.setObject("contact"); contact.setString("name", billingContact.contact().name()); contact.setString("email", billingContact.contact().email()); contact.setString("phone", billingContact.contact().phone()); toSlime(billingContact.address(), root); } return new SlimeJsonResponse(slime); } private SlimeJsonResponse putTenantInfoBilling(CloudTenant cloudTenant, Inspector inspector) { var info = cloudTenant.info(); var contact = info.billingContact().contact(); var address = info.billingContact().address(); var mergedContact = updateTenantInfoContact(inspector.field("contact"), contact); var mergedAddress = updateTenantInfoAddress(inspector.field("address"), info.billingContact().address()); var mergedBilling = info.billingContact() .withContact(mergedContact) .withAddress(mergedAddress); var mergedInfo = info.withBilling(mergedBilling); controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private SlimeJsonResponse tenantInfoContacts(CloudTenant cloudTenant) { var slime = new Slime(); var root = slime.setObject(); toSlime(cloudTenant.info().contacts(), root); return new SlimeJsonResponse(slime); } private SlimeJsonResponse putTenantInfoContacts(CloudTenant cloudTenant, Inspector inspector) { var mergedInfo = cloudTenant.info() .withContacts(updateTenantInfoContacts(inspector.field("contacts"), cloudTenant.info().contacts())); controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private void validateMergedTenantInfo(TenantInfo mergedInfo) { if (mergedInfo.contact().name().isBlank()) { throw new IllegalArgumentException("'contactName' cannot be empty"); } if (mergedInfo.contact().email().isBlank()) { throw new IllegalArgumentException("'contactEmail' cannot be empty"); } if (! mergedInfo.contact().email().contains("@")) { throw new IllegalArgumentException("'contactEmail' needs to be an email address"); } if (! mergedInfo.website().isBlank()) { try { new URL(mergedInfo.website()); } catch (MalformedURLException e) { throw new IllegalArgumentException("'website' needs to be a valid address"); } } } private void toSlime(TenantAddress address, Cursor parentCursor) { if (address.isEmpty()) return; Cursor addressCursor = parentCursor.setObject("address"); addressCursor.setString("addressLines", address.address()); addressCursor.setString("postalCodeOrZip", address.code()); addressCursor.setString("city", address.city()); addressCursor.setString("stateRegionProvince", address.region()); addressCursor.setString("country", address.country()); } private void toSlime(TenantBilling billingContact, Cursor parentCursor) { if (billingContact.isEmpty()) return; Cursor addressCursor = parentCursor.setObject("billingContact"); addressCursor.setString("name", billingContact.contact().name()); addressCursor.setString("email", billingContact.contact().email()); addressCursor.setString("phone", billingContact.contact().phone()); toSlime(billingContact.address(), addressCursor); } private void toSlime(TenantContacts contacts, Cursor parentCursor) { Cursor contactsCursor = parentCursor.setArray("contacts"); contacts.all().forEach(contact -> { Cursor contactCursor = contactsCursor.addObject(); Cursor audiencesArray = contactCursor.setArray("audiences"); contact.audiences().forEach(audience -> audiencesArray.addString(toAudience(audience))); switch (contact.type()) { case EMAIL: var email = (TenantContacts.EmailContact) contact; contactCursor.setString("email", email.email()); return; default: throw new IllegalArgumentException("Serialization for contact type not implemented: " + contact.type()); } }); } private static TenantContacts.Audience fromAudience(String value) { return switch (value) { case "tenant": yield TenantContacts.Audience.TENANT; case "notifications": yield TenantContacts.Audience.NOTIFICATIONS; default: throw new IllegalArgumentException("Unknown contact audience '" + value + "'."); }; } private static String toAudience(TenantContacts.Audience audience) { return switch (audience) { case TENANT: yield "tenant"; case NOTIFICATIONS: yield "notifications"; }; } private HttpResponse updateTenantInfo(String tenantName, HttpRequest request) { return controller.tenants().get(TenantName.from(tenantName)) .filter(tenant -> tenant.type() == Tenant.Type.cloud) .map(tenant -> updateTenantInfo(((CloudTenant)tenant), request)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private String getString(Inspector field, String defaultVale) { return field.valid() ? field.asString().trim() : defaultVale; } private SlimeJsonResponse updateTenantInfo(CloudTenant tenant, HttpRequest request) { TenantInfo oldInfo = tenant.info(); Inspector insp = toSlime(request.getData()).get(); TenantContact mergedContact = TenantContact.empty() .withName(getString(insp.field("contactName"), oldInfo.contact().name())) .withEmail(getString(insp.field("contactEmail"), oldInfo.contact().email())); TenantInfo mergedInfo = TenantInfo.empty() .withName(getString(insp.field("name"), oldInfo.name())) .withEmail(getString(insp.field("email"), oldInfo.email())) .withWebsite(getString(insp.field("website"), oldInfo.website())) .withContact(mergedContact) .withAddress(updateTenantInfoAddress(insp.field("address"), oldInfo.address())) .withBilling(updateTenantInfoBillingContact(insp.field("billingContact"), oldInfo.billingContact())) .withContacts(updateTenantInfoContacts(insp.field("contacts"), oldInfo.contacts())); validateMergedTenantInfo(mergedInfo); controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private TenantAddress updateTenantInfoAddress(Inspector insp, TenantAddress oldAddress) { if (!insp.valid()) return oldAddress; TenantAddress address = TenantAddress.empty() .withCountry(getString(insp.field("country"), oldAddress.country())) .withRegion(getString(insp.field("stateRegionProvince"), oldAddress.region())) .withCity(getString(insp.field("city"), oldAddress.city())) .withCode(getString(insp.field("postalCodeOrZip"), oldAddress.code())) .withAddress(getString(insp.field("addressLines"), oldAddress.address())); List<String> fields = List.of(address.address(), address.code(), address.country(), address.city(), address.region()); if (fields.stream().allMatch(String::isBlank) || fields.stream().noneMatch(String::isBlank)) return address; throw new IllegalArgumentException("All address fields must be set"); } private TenantContact updateTenantInfoContact(Inspector insp, TenantContact oldContact) { if (!insp.valid()) return oldContact; String email = getString(insp.field("email"), oldContact.email()); if (!email.isBlank() && !email.contains("@")) { throw new IllegalArgumentException("'email' needs to be an email address"); } return TenantContact.empty() .withName(getString(insp.field("name"), oldContact.name())) .withEmail(getString(insp.field("email"), oldContact.email())) .withPhone(getString(insp.field("phone"), oldContact.phone())); } private TenantBilling updateTenantInfoBillingContact(Inspector insp, TenantBilling oldContact) { if (!insp.valid()) return oldContact; return TenantBilling.empty() .withContact(updateTenantInfoContact(insp, oldContact.contact())) .withAddress(updateTenantInfoAddress(insp.field("address"), oldContact.address())); } private TenantContacts updateTenantInfoContacts(Inspector insp, TenantContacts oldContacts) { if (!insp.valid()) return oldContacts; List<TenantContacts.EmailContact> contacts = SlimeUtils.entriesStream(insp).map(inspector -> { String email = inspector.field("email").asString().trim(); List<TenantContacts.Audience> audiences = SlimeUtils.entriesStream(inspector.field("audiences")) .map(audience -> fromAudience(audience.asString())) .toList(); if (!email.contains("@")) { throw new IllegalArgumentException("'email' needs to be an email address"); } return new TenantContacts.EmailContact(audiences, email); }).toList(); return new TenantContacts(contacts); } private HttpResponse notifications(HttpRequest request, Optional<String> tenant, boolean includeTenantFieldInResponse) { boolean productionOnly = showOnlyProductionInstances(request); boolean excludeMessages = "true".equals(request.getProperty("excludeMessages")); Slime slime = new Slime(); Cursor notificationsArray = slime.setObject().setArray("notifications"); tenant.map(t -> Stream.of(TenantName.from(t))) .orElseGet(() -> controller.notificationsDb().listTenantsWithNotifications().stream()) .flatMap(tenantName -> controller.notificationsDb().listNotifications(NotificationSource.from(tenantName), productionOnly).stream()) .filter(notification -> propertyEquals(request, "application", ApplicationName::from, notification.source().application()) && propertyEquals(request, "instance", InstanceName::from, notification.source().instance()) && propertyEquals(request, "zone", ZoneId::from, notification.source().zoneId()) && propertyEquals(request, "job", job -> JobType.fromJobName(job, controller.zoneRegistry()), notification.source().jobType()) && propertyEquals(request, "type", Notification.Type::valueOf, Optional.of(notification.type())) && propertyEquals(request, "level", Notification.Level::valueOf, Optional.of(notification.level()))) .forEach(notification -> toSlime(notificationsArray.addObject(), notification, includeTenantFieldInResponse, excludeMessages)); return new SlimeJsonResponse(slime); } private static <T> boolean propertyEquals(HttpRequest request, String property, Function<String, T> mapper, Optional<T> value) { return Optional.ofNullable(request.getProperty(property)) .map(propertyValue -> value.isPresent() && mapper.apply(propertyValue).equals(value.get())) .orElse(true); } private static void toSlime(Cursor cursor, Notification notification, boolean includeTenantFieldInResponse, boolean excludeMessages) { cursor.setLong("at", notification.at().toEpochMilli()); cursor.setString("level", notificationLevelAsString(notification.level())); cursor.setString("type", notificationTypeAsString(notification.type())); if (!excludeMessages) { Cursor messagesArray = cursor.setArray("messages"); notification.messages().forEach(messagesArray::addString); } if (includeTenantFieldInResponse) cursor.setString("tenant", notification.source().tenant().value()); notification.source().application().ifPresent(application -> cursor.setString("application", application.value())); notification.source().instance().ifPresent(instance -> cursor.setString("instance", instance.value())); notification.source().zoneId().ifPresent(zoneId -> { cursor.setString("environment", zoneId.environment().value()); cursor.setString("region", zoneId.region().value()); }); notification.source().clusterId().ifPresent(clusterId -> cursor.setString("clusterId", clusterId.value())); notification.source().jobType().ifPresent(jobType -> cursor.setString("jobName", jobType.jobName())); notification.source().runNumber().ifPresent(runNumber -> cursor.setLong("runNumber", runNumber)); } private static String notificationTypeAsString(Notification.Type type) { return switch (type) { case submission, applicationPackage: yield "applicationPackage"; case testPackage: yield "testPackage"; case deployment: yield "deployment"; case feedBlock: yield "feedBlock"; case reindex: yield "reindex"; }; } private static String notificationLevelAsString(Notification.Level level) { return switch (level) { case info: yield "info"; case warning: yield "warning"; case error: yield "error"; }; } private HttpResponse applications(String tenantName, Optional<String> applicationName, HttpRequest request) { TenantName tenant = TenantName.from(tenantName); getTenantOrThrow(tenantName); List<Application> applications = applicationName.isEmpty() ? controller.applications().asList(tenant) : controller.applications().getApplication(TenantAndApplicationId.from(tenantName, applicationName.get())) .map(List::of) .orElseThrow(() -> new NotExistsException("Application '" + applicationName.get() + "' does not exist")); Slime slime = new Slime(); Cursor applicationArray = slime.setArray(); for (Application application : applications) { Cursor applicationObject = applicationArray.addObject(); applicationObject.setString("tenant", application.id().tenant().value()); applicationObject.setString("application", application.id().application().value()); applicationObject.setString("url", withPath("/application/v4" + "/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value(), request.getUri()).toString()); Cursor instanceArray = applicationObject.setArray("instances"); for (InstanceName instance : showOnlyProductionInstances(request) ? application.productionInstances().keySet() : application.instances().keySet()) { Cursor instanceObject = instanceArray.addObject(); instanceObject.setString("instance", instance.value()); instanceObject.setString("url", withPath("/application/v4" + "/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value() + "/instance/" + instance.value(), request.getUri()).toString()); } } return new SlimeJsonResponse(slime); } private HttpResponse devApplicationPackage(ApplicationId id, JobType type) { ZoneId zone = type.zone(); RevisionId revision = controller.jobController().last(id, type).get().versions().targetRevision(); byte[] applicationPackage = controller.applications().applicationStore().get(new DeploymentId(id, zone), revision); return new ZipResponse(id.toFullString() + "." + zone.value() + ".zip", applicationPackage); } private HttpResponse devApplicationPackageDiff(RunId runId) { DeploymentId deploymentId = new DeploymentId(runId.application(), runId.job().type().zone()); return controller.applications().applicationStore().getDevDiff(deploymentId, runId.number()) .map(ByteArrayResponse::new) .orElseThrow(() -> new NotExistsException("No application package diff found for " + runId)); } private HttpResponse applicationPackage(String tenantName, String applicationName, HttpRequest request) { TenantAndApplicationId tenantAndApplication = TenantAndApplicationId.from(tenantName, applicationName); final long build; String requestedBuild = request.getProperty("build"); if (requestedBuild != null) { if (requestedBuild.equals("latestDeployed")) { build = controller.applications().requireApplication(tenantAndApplication).latestDeployedRevision() .map(RevisionId::number) .orElseThrow(() -> new NotExistsException("no application package has been deployed in production for " + tenantAndApplication)); } else { try { build = Validation.requireAtLeast(Long.parseLong(request.getProperty("build")), "build number", 1L); } catch (NumberFormatException e) { throw new IllegalArgumentException("invalid value for request parameter 'build'", e); } } } else { build = controller.applications().requireApplication(tenantAndApplication).revisions().last() .map(version -> version.id().number()) .orElseThrow(() -> new NotExistsException("no application package has been submitted for " + tenantAndApplication)); } RevisionId revision = RevisionId.forProduction(build); boolean tests = request.getBooleanProperty("tests"); byte[] applicationPackage = tests ? controller.applications().applicationStore().getTester(tenantAndApplication.tenant(), tenantAndApplication.application(), revision) : controller.applications().applicationStore().get(new DeploymentId(tenantAndApplication.defaultInstance(), ZoneId.defaultId()), revision); String filename = tenantAndApplication + (tests ? "-tests" : "-build") + revision.number() + ".zip"; return new ZipResponse(filename, applicationPackage); } private HttpResponse applicationPackageDiff(String tenant, String application, String number) { TenantAndApplicationId tenantAndApplication = TenantAndApplicationId.from(tenant, application); return controller.applications().applicationStore().getDiff(tenantAndApplication.tenant(), tenantAndApplication.application(), Long.parseLong(number)) .map(ByteArrayResponse::new) .orElseThrow(() -> new NotExistsException("No application package diff found for '" + tenantAndApplication + "' with build number " + number)); } private HttpResponse application(String tenantName, String applicationName, HttpRequest request) { Slime slime = new Slime(); toSlime(slime.setObject(), getApplication(tenantName, applicationName), request); return new SlimeJsonResponse(slime); } private HttpResponse compileVersion(String tenantName, String applicationName, String allowMajorParam) { Slime slime = new Slime(); OptionalInt allowMajor = OptionalInt.empty(); if (allowMajorParam != null) { try { allowMajor = OptionalInt.of(Integer.parseInt(allowMajorParam)); } catch (NumberFormatException e) { throw new IllegalArgumentException("Invalid major version '" + allowMajorParam + "'", e); } } Version compileVersion = controller.applications().compileVersion(TenantAndApplicationId.from(tenantName, applicationName), allowMajor); slime.setObject().setString("compileVersion", compileVersion.toFullString()); return new SlimeJsonResponse(slime); } private HttpResponse instance(String tenantName, String applicationName, String instanceName, HttpRequest request) { Slime slime = new Slime(); toSlime(slime.setObject(), getInstance(tenantName, applicationName, instanceName), controller.jobController().deploymentStatus(getApplication(tenantName, applicationName)), request); return new SlimeJsonResponse(slime); } private HttpResponse addDeveloperKey(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); Principal user = request.getJDiscRequest().getUserPrincipal(); String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString(); PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey); Slime root = new Slime(); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> { tenant = tenant.withDeveloperKey(developerKey, user); toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys()); controller.tenants().store(tenant); }); return new SlimeJsonResponse(root); } private HttpResponse validateSecretStore(String tenantName, String secretStoreName, HttpRequest request) { var awsRegion = request.getProperty("aws-region"); var parameterName = request.getProperty("parameter-name"); var applicationId = ApplicationId.fromFullString(request.getProperty("application-id")); var zoneId = requireZone(ZoneId.from(request.getProperty("zone"))); var deploymentId = new DeploymentId(applicationId, zoneId); var tenant = controller.tenants().require(applicationId.tenant(), CloudTenant.class); var tenantSecretStore = tenant.tenantSecretStores() .stream() .filter(secretStore -> secretStore.getName().equals(secretStoreName)) .findFirst(); if (tenantSecretStore.isEmpty()) return ErrorResponse.notFoundError("No secret store '" + secretStoreName + "' configured for tenant '" + tenantName + "'"); var response = controller.serviceRegistry().configServer().validateSecretStore(deploymentId, tenantSecretStore.get(), awsRegion, parameterName); try { var responseRoot = new Slime(); var responseCursor = responseRoot.setObject(); responseCursor.setString("target", deploymentId.toString()); var responseResultCursor = responseCursor.setObject("result"); var responseSlime = SlimeUtils.jsonToSlime(response); SlimeUtils.copyObject(responseSlime.get(), responseResultCursor); return new SlimeJsonResponse(responseRoot); } catch (JsonParseException e) { return ErrorResponses.logThrowing(request, log, e); } } private HttpResponse removeDeveloperKey(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString(); PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey); Principal user = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class).developerKeys().get(developerKey); Slime root = new Slime(); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> { tenant = tenant.withoutDeveloperKey(developerKey); toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys()); controller.tenants().store(tenant); }); return new SlimeJsonResponse(root); } private void toSlime(Cursor keysArray, Map<PublicKey, ? extends Principal> keys) { keys.forEach((key, principal) -> { Cursor keyObject = keysArray.addObject(); keyObject.setString("key", KeyUtils.toPem(key)); keyObject.setString("user", principal.getName()); }); } private HttpResponse addDeployKey(String tenantName, String applicationName, HttpRequest request) { String pemDeployKey = toSlime(request.getData()).get().field("key").asString(); PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey); Slime root = new Slime(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> { application = application.withDeployKey(deployKey); application.get().deployKeys().stream() .map(KeyUtils::toPem) .forEach(root.setObject().setArray("keys")::addString); controller.applications().store(application); }); return new SlimeJsonResponse(root); } private HttpResponse removeDeployKey(String tenantName, String applicationName, HttpRequest request) { String pemDeployKey = toSlime(request.getData()).get().field("key").asString(); PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey); Slime root = new Slime(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> { application = application.withoutDeployKey(deployKey); application.get().deployKeys().stream() .map(KeyUtils::toPem) .forEach(root.setObject().setArray("keys")::addString); controller.applications().store(application); }); return new SlimeJsonResponse(root); } private HttpResponse addSecretStore(String tenantName, String name, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); var data = toSlime(request.getData()).get(); var awsId = mandatory("awsId", data).asString(); var externalId = mandatory("externalId", data).asString(); var role = mandatory("role", data).asString(); var tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class); var tenantSecretStore = new TenantSecretStore(name, awsId, role); if (!tenantSecretStore.isValid()) { return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is invalid"); } if (tenant.tenantSecretStores().contains(tenantSecretStore)) { return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is already configured"); } controller.serviceRegistry().roleService().createTenantPolicy(TenantName.from(tenantName), name, awsId, role); controller.serviceRegistry().tenantSecretService().addSecretStore(tenant.name(), tenantSecretStore, externalId); controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withSecretStore(tenantSecretStore); controller.tenants().store(lockedTenant); }); tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class); var slime = new Slime(); toSlime(slime.setObject(), tenant.tenantSecretStores()); return new SlimeJsonResponse(slime); } private HttpResponse deleteSecretStore(String tenantName, String name, HttpRequest request) { var tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class); var optionalSecretStore = tenant.tenantSecretStores().stream() .filter(secretStore -> secretStore.getName().equals(name)) .findFirst(); if (optionalSecretStore.isEmpty()) return ErrorResponse.notFoundError("Could not delete secret store '" + name + "': Secret store not found"); var tenantSecretStore = optionalSecretStore.get(); controller.serviceRegistry().tenantSecretService().deleteSecretStore(tenant.name(), tenantSecretStore); controller.serviceRegistry().roleService().deleteTenantPolicy(tenant.name(), tenantSecretStore.getName(), tenantSecretStore.getRole()); controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withoutSecretStore(tenantSecretStore); controller.tenants().store(lockedTenant); }); tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class); var slime = new Slime(); toSlime(slime.setObject(), tenant.tenantSecretStores()); return new SlimeJsonResponse(slime); } private HttpResponse allowAwsArchiveAccess(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); var data = toSlime(request.getData()).get(); var role = mandatory("role", data).asString(); if (role.isBlank()) { return ErrorResponse.badRequest("AWS archive access role can't be whitespace only"); } controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> { var access = lockedTenant.get().archiveAccess(); lockedTenant = lockedTenant.withArchiveAccess(access.withAWSRole(role)); controller.tenants().store(lockedTenant); }); return new MessageResponse("AWS archive access role set to '" + role + "' for tenant " + tenantName + "."); } private HttpResponse removeAwsArchiveAccess(String tenantName) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> { var access = lockedTenant.get().archiveAccess(); lockedTenant = lockedTenant.withArchiveAccess(access.removeAWSRole()); controller.tenants().store(lockedTenant); }); return new MessageResponse("AWS archive access role removed for tenant " + tenantName + "."); } private HttpResponse allowGcpArchiveAccess(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); var data = toSlime(request.getData()).get(); var member = mandatory("member", data).asString(); if (member.isBlank()) { return ErrorResponse.badRequest("GCP archive access role can't be whitespace only"); } controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> { var access = lockedTenant.get().archiveAccess(); lockedTenant = lockedTenant.withArchiveAccess(access.withGCPMember(member)); controller.tenants().store(lockedTenant); }); return new MessageResponse("GCP archive access member set to '" + member + "' for tenant " + tenantName + "."); } private HttpResponse removeGcpArchiveAccess(String tenantName) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> { var access = lockedTenant.get().archiveAccess(); lockedTenant = lockedTenant.withArchiveAccess(access.removeGCPMember()); controller.tenants().store(lockedTenant); }); return new MessageResponse("GCP archive access member removed for tenant " + tenantName + "."); } private HttpResponse patchApplication(String tenantName, String applicationName, HttpRequest request) { Inspector requestObject = toSlime(request.getData()).get(); StringJoiner messageBuilder = new StringJoiner("\n").setEmptyValue("No applicable changes."); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> { Inspector majorVersionField = requestObject.field("majorVersion"); if (majorVersionField.valid()) { Integer majorVersion = majorVersionField.asLong() == 0 ? null : (int) majorVersionField.asLong(); application = application.withMajorVersion(majorVersion); messageBuilder.add("Set major version to " + (majorVersion == null ? "empty" : majorVersion)); } Inspector pemDeployKeyField = requestObject.field("pemDeployKey"); if (pemDeployKeyField.valid()) { String pemDeployKey = pemDeployKeyField.asString(); PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey); application = application.withDeployKey(deployKey); messageBuilder.add("Added deploy key " + pemDeployKey); } controller.applications().store(application); }); return new MessageResponse(messageBuilder.toString()); } private Application getApplication(String tenantName, String applicationName) { TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName); return controller.applications().getApplication(applicationId) .orElseThrow(() -> new NotExistsException(applicationId + " not found")); } private Instance getInstance(String tenantName, String applicationName, String instanceName) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); return controller.applications().getInstance(applicationId) .orElseThrow(() -> new NotExistsException(applicationId + " not found")); } private HttpResponse nodes(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone, NodeFilter.all().applications(id)); Slime slime = new Slime(); Cursor nodesArray = slime.setObject().setArray("nodes"); for (Node node : nodes) { Cursor nodeObject = nodesArray.addObject(); nodeObject.setString("hostname", node.hostname().value()); nodeObject.setString("state", valueOf(node.state())); node.reservedTo().ifPresent(tenant -> nodeObject.setString("reservedTo", tenant.value())); nodeObject.setString("orchestration", valueOf(node.serviceState())); nodeObject.setString("version", node.currentVersion().toString()); node.flavor().ifPresent(flavor -> nodeObject.setString("flavor", flavor)); toSlime(node.resources(), nodeObject); nodeObject.setString("clusterId", node.clusterId()); nodeObject.setString("clusterType", valueOf(node.clusterType())); nodeObject.setBool("down", node.down()); nodeObject.setBool("retired", node.retired() || node.wantToRetire()); nodeObject.setBool("restarting", node.wantedRestartGeneration() > node.restartGeneration()); nodeObject.setBool("rebooting", node.wantedRebootGeneration() > node.rebootGeneration()); nodeObject.setString("group", node.group()); nodeObject.setLong("index", node.index()); } return new SlimeJsonResponse(slime); } private HttpResponse clusters(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); com.yahoo.vespa.hosted.controller.api.integration.configserver.Application application = controller.serviceRegistry().configServer().nodeRepository().getApplication(zone, id); Slime slime = new Slime(); Cursor clustersObject = slime.setObject().setObject("clusters"); for (Cluster cluster : application.clusters().values()) { Cursor clusterObject = clustersObject.setObject(cluster.id().value()); clusterObject.setString("type", cluster.type().name()); toSlime(cluster.min(), clusterObject.setObject("min")); toSlime(cluster.max(), clusterObject.setObject("max")); toSlime(cluster.current(), clusterObject.setObject("current")); if (cluster.target().isPresent() && ! cluster.target().get().justNumbers().equals(cluster.current().justNumbers())) toSlime(cluster.target().get(), clusterObject.setObject("target")); cluster.suggested().ifPresent(suggested -> toSlime(suggested, clusterObject.setObject("suggested"))); utilizationToSlime(cluster.utilization(), clusterObject.setObject("utilization")); scalingEventsToSlime(cluster.scalingEvents(), clusterObject.setArray("scalingEvents")); clusterObject.setString("autoscalingStatusCode", cluster.autoscalingStatusCode()); clusterObject.setString("autoscalingStatus", cluster.autoscalingStatus()); clusterObject.setLong("scalingDuration", cluster.scalingDuration().toMillis()); clusterObject.setDouble("maxQueryGrowthRate", cluster.maxQueryGrowthRate()); clusterObject.setDouble("currentQueryFractionOfMax", cluster.currentQueryFractionOfMax()); } return new SlimeJsonResponse(slime); } private static String valueOf(Node.State state) { return switch (state) { case failed: yield "failed"; case parked: yield "parked"; case dirty: yield "dirty"; case ready: yield "ready"; case active: yield "active"; case inactive: yield "inactive"; case reserved: yield "reserved"; case provisioned: yield "provisioned"; case breakfixed: yield "breakfixed"; case deprovisioned: yield "deprovisioned"; default: throw new IllegalArgumentException("Unexpected node state '" + state + "'."); }; } static String valueOf(Node.ServiceState state) { switch (state) { case expectedUp: return "expectedUp"; case allowedDown: return "allowedDown"; case permanentlyDown: return "permanentlyDown"; case unorchestrated: return "unorchestrated"; case unknown: break; } return "unknown"; } private static String valueOf(Node.ClusterType type) { return switch (type) { case admin: yield "admin"; case content: yield "content"; case container: yield "container"; case combined: yield "combined"; case unknown: throw new IllegalArgumentException("Unexpected node cluster type '" + type + "'."); }; } private static String valueOf(NodeResources.DiskSpeed diskSpeed) { return switch (diskSpeed) { case fast : yield "fast"; case slow : yield "slow"; case any : yield "any"; }; } private static String valueOf(NodeResources.StorageType storageType) { return switch (storageType) { case remote : yield "remote"; case local : yield "local"; case any : yield "any"; }; } private HttpResponse logs(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) { ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); DeploymentId deployment = new DeploymentId(application, zone); InputStream logStream = controller.serviceRegistry().configServer().getLogs(deployment, queryParameters); return new HttpResponse(200) { @Override public void render(OutputStream outputStream) throws IOException { try (logStream) { logStream.transferTo(outputStream); } } @Override public long maxPendingBytes() { return 1 << 26; } }; } private HttpResponse supportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) { DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); SupportAccess supportAccess = controller.supportAccess().forDeployment(deployment); return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(supportAccess, controller.clock().instant())); } private HttpResponse allowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Principal principal = requireUserPrincipal(request); Instant now = controller.clock().instant(); SupportAccess allowed = controller.supportAccess().allow(deployment, now.plus(7, ChronoUnit.DAYS), principal.getName()); return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(allowed, now)); } private HttpResponse disallowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Principal principal = requireUserPrincipal(request); SupportAccess disallowed = controller.supportAccess().disallow(deployment, principal.getName()); controller.applications().deploymentTrigger().reTriggerOrAddToQueue(deployment, "re-triggered to disallow support access, by " + request.getJDiscRequest().getUserPrincipal().getName()); return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(disallowed, controller.clock().instant())); } private HttpResponse metrics(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); DeploymentId deployment = new DeploymentId(application, zone); List<ProtonMetrics> protonMetrics = controller.serviceRegistry().configServer().getProtonMetrics(deployment); return buildResponseFromProtonMetrics(protonMetrics); } private HttpResponse scaling(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { var from = Optional.ofNullable(request.getProperty("from")) .map(Long::valueOf) .map(Instant::ofEpochSecond) .orElse(Instant.EPOCH); var until = Optional.ofNullable(request.getProperty("until")) .map(Long::valueOf) .map(Instant::ofEpochSecond) .orElse(Instant.now(controller.clock())); var application = ApplicationId.from(tenantName, applicationName, instanceName); var zone = requireZone(environment, region); var deployment = new DeploymentId(application, zone); var events = controller.serviceRegistry().resourceDatabase().scalingEvents(from, until, deployment); var slime = new Slime(); var root = slime.setObject(); for (var entry : events.entrySet()) { var serviceRoot = root.setArray(entry.getKey().clusterId().value()); scalingEventsToSlime(entry.getValue(), serviceRoot); } return new SlimeJsonResponse(slime); } private JsonResponse buildResponseFromProtonMetrics(List<ProtonMetrics> protonMetrics) { try { var jsonObject = jsonMapper.createObjectNode(); var jsonArray = jsonMapper.createArrayNode(); for (ProtonMetrics metrics : protonMetrics) { jsonArray.add(metrics.toJson()); } jsonObject.set("metrics", jsonArray); return new JsonResponse(200, jsonMapper.writerWithDefaultPrettyPrinter().writeValueAsString(jsonObject)); } catch (JsonProcessingException e) { log.log(Level.WARNING, "Unable to build JsonResponse with Proton data: " + e.getMessage(), e); return new JsonResponse(500, ""); } } private HttpResponse trigger(ApplicationId id, JobType type, HttpRequest request) { ZoneRegistry zones = controller.zoneRegistry(); type = switch (type.environment()) { case test -> JobType.systemTest(zones, zones.systemZone().getCloudName()); case staging -> JobType.stagingTest(zones, zones.systemZone().getCloudName()); default -> type; }; Inspector requestObject = toSlime(request.getData()).get(); boolean requireTests = ! requestObject.field("skipTests").asBool(); boolean reTrigger = requestObject.field("reTrigger").asBool(); boolean upgradeRevision = ! requestObject.field("skipRevision").asBool(); boolean upgradePlatform = ! requestObject.field("skipUpgrade").asBool(); String triggered = reTrigger ? controller.applications().deploymentTrigger() .reTrigger(id, type, "re-triggered by " + request.getJDiscRequest().getUserPrincipal().getName()).type().jobName() : controller.applications().deploymentTrigger() .forceTrigger(id, type, "triggered by " + request.getJDiscRequest().getUserPrincipal().getName(), requireTests, upgradeRevision, upgradePlatform) .stream().map(job -> job.type().jobName()).collect(joining(", ")); String suppressedUpgrades = ( ! upgradeRevision || ! upgradePlatform ? ", without " : "") + (upgradeRevision ? "" : "revision") + ( ! upgradeRevision && ! upgradePlatform ? " and " : "") + (upgradePlatform ? "" : "platform") + ( ! upgradeRevision || ! upgradePlatform ? " upgrade" : ""); return new MessageResponse(triggered.isEmpty() ? "Job " + type.jobName() + " for " + id + " not triggered" : "Triggered " + triggered + " for " + id + suppressedUpgrades); } private HttpResponse pause(ApplicationId id, JobType type) { Instant until = controller.clock().instant().plus(DeploymentTrigger.maxPause); controller.applications().deploymentTrigger().pauseJob(id, type, until); return new MessageResponse(type.jobName() + " for " + id + " paused for " + DeploymentTrigger.maxPause); } private HttpResponse resume(ApplicationId id, JobType type) { controller.applications().deploymentTrigger().resumeJob(id, type); return new MessageResponse(type.jobName() + " for " + id + " resumed"); } private void toSlime(Cursor object, Application application, HttpRequest request) { object.setString("tenant", application.id().tenant().value()); object.setString("application", application.id().application().value()); object.setString("deployments", withPath("/application/v4" + "/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value() + "/job/", request.getUri()).toString()); DeploymentStatus status = controller.jobController().deploymentStatus(application); application.revisions().last().ifPresent(version -> JobControllerApiHandlerHelper.toSlime(object.setObject("latestVersion"), version)); application.projectId().ifPresent(id -> object.setLong("projectId", id)); application.instances().values().stream().findFirst().ifPresent(instance -> { if ( ! instance.change().isEmpty()) toSlime(object.setObject("deploying"), instance.change(), application); if ( ! status.outstandingChange(instance.name()).isEmpty()) toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), application); }); application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion)); Cursor instancesArray = object.setArray("instances"); for (Instance instance : showOnlyProductionInstances(request) ? application.productionInstances().values() : application.instances().values()) toSlime(instancesArray.addObject(), status, instance, application.deploymentSpec(), request); application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString); Cursor metricsObject = object.setObject("metrics"); metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality()); metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality()); Cursor activity = object.setObject("activity"); application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value())); application.owner().ifPresent(owner -> object.setString("owner", owner.username())); application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value())); } private void toSlime(Cursor object, DeploymentStatus status, Instance instance, DeploymentSpec deploymentSpec, HttpRequest request) { object.setString("instance", instance.name().value()); if (deploymentSpec.instance(instance.name()).isPresent()) { Collection<JobStatus> jobStatus = status.instanceJobs(instance.name()).values(); if ( ! instance.change().isEmpty()) toSlime(object.setObject("deploying"), instance.change(), status.application()); if ( ! status.outstandingChange(instance.name()).isEmpty()) toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), status.application()); Cursor changeBlockers = object.setArray("changeBlockers"); deploymentSpec.instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> { Cursor changeBlockerObject = changeBlockers.addObject(); changeBlockerObject.setBool("versions", changeBlocker.blocksVersions()); changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions()); changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId()); Cursor days = changeBlockerObject.setArray("days"); changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong); Cursor hours = changeBlockerObject.setArray("hours"); changeBlocker.window().hours().forEach(hours::addLong); })); } addRotationId(object, instance); List<Deployment> deployments = deploymentSpec.instance(instance.name()) .map(spec -> sortedDeployments(instance.deployments().values(), spec)) .orElse(List.copyOf(instance.deployments().values())); Cursor deploymentsArray = object.setArray("deployments"); for (Deployment deployment : deployments) { Cursor deploymentObject = deploymentsArray.addObject(); if (deployment.zone().environment() == Environment.prod && ! instance.rotations().isEmpty()) toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject); if (recurseOverDeployments(request)) toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request); else { deploymentObject.setString("environment", deployment.zone().environment().value()); deploymentObject.setString("region", deployment.zone().region().value()); deploymentObject.setString("url", withPath(request.getUri().getPath() + "/instance/" + instance.name().value() + "/environment/" + deployment.zone().environment().value() + "/region/" + deployment.zone().region().value(), request.getUri()).toString()); } } } private void addRotationId(Cursor object, Instance instance) { instance.rotations().stream() .map(AssignedRotation::rotationId) .findFirst() .ifPresent(rotation -> object.setString("rotationId", rotation.asString())); } private void toSlime(Cursor object, Instance instance, DeploymentStatus status, HttpRequest request) { Application application = status.application(); object.setString("tenant", instance.id().tenant().value()); object.setString("application", instance.id().application().value()); object.setString("instance", instance.id().instance().value()); object.setString("deployments", withPath("/application/v4" + "/tenant/" + instance.id().tenant().value() + "/application/" + instance.id().application().value() + "/instance/" + instance.id().instance().value() + "/job/", request.getUri()).toString()); application.revisions().last().ifPresent(version -> { version.sourceUrl().ifPresent(url -> object.setString("sourceUrl", url)); version.commit().ifPresent(commit -> object.setString("commit", commit)); }); application.projectId().ifPresent(id -> object.setLong("projectId", id)); if (application.deploymentSpec().instance(instance.name()).isPresent()) { Collection<JobStatus> jobStatus = status.instanceJobs(instance.name()).values(); if ( ! instance.change().isEmpty()) toSlime(object.setObject("deploying"), instance.change(), application); if ( ! status.outstandingChange(instance.name()).isEmpty()) toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), application); Cursor changeBlockers = object.setArray("changeBlockers"); application.deploymentSpec().instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> { Cursor changeBlockerObject = changeBlockers.addObject(); changeBlockerObject.setBool("versions", changeBlocker.blocksVersions()); changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions()); changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId()); Cursor days = changeBlockerObject.setArray("days"); changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong); Cursor hours = changeBlockerObject.setArray("hours"); changeBlocker.window().hours().forEach(hours::addLong); })); } application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion)); addRotationId(object, instance); List<Deployment> deployments = application.deploymentSpec().instance(instance.name()) .map(spec -> sortedDeployments(instance.deployments().values(), spec)) .orElse(List.copyOf(instance.deployments().values())); Cursor instancesArray = object.setArray("instances"); for (Deployment deployment : deployments) { Cursor deploymentObject = instancesArray.addObject(); if (deployment.zone().environment() == Environment.prod) { if (instance.rotations().size() == 1) { toSlime(instance.rotationStatus().of(instance.rotations().get(0).rotationId(), deployment), deploymentObject); } if ( ! recurseOverDeployments(request) && ! instance.rotations().isEmpty()) { toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject); } } if (recurseOverDeployments(request)) toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request); else { deploymentObject.setString("environment", deployment.zone().environment().value()); deploymentObject.setString("region", deployment.zone().region().value()); deploymentObject.setString("instance", instance.id().instance().value()); deploymentObject.setString("url", withPath(request.getUri().getPath() + "/environment/" + deployment.zone().environment().value() + "/region/" + deployment.zone().region().value(), request.getUri()).toString()); } } Stream.concat(status.jobSteps().keySet().stream() .filter(job -> job.application().instance().equals(instance.name())) .filter(job -> job.type().isProduction() && job.type().isDeployment()), controller.jobController().active(instance.id()).stream() .map(run -> run.id().job()) .filter(job -> job.type().environment().isManuallyDeployed())) .map(job -> job.type().zone()) .filter(zone -> ! instance.deployments().containsKey(zone)) .forEach(zone -> { Cursor deploymentObject = instancesArray.addObject(); deploymentObject.setString("environment", zone.environment().value()); deploymentObject.setString("region", zone.region().value()); }); application.deployKeys().stream().findFirst().ifPresent(key -> object.setString("pemDeployKey", KeyUtils.toPem(key))); application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString); Cursor metricsObject = object.setObject("metrics"); metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality()); metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality()); Cursor activity = object.setObject("activity"); application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value())); application.owner().ifPresent(owner -> object.setString("owner", owner.username())); application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value())); } private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); Instance instance = controller.applications().getInstance(id) .orElseThrow(() -> new NotExistsException(id + " not found")); DeploymentId deploymentId = new DeploymentId(instance.id(), requireZone(environment, region)); Deployment deployment = instance.deployments().get(deploymentId.zoneId()); if (deployment == null) throw new NotExistsException(instance + " is not deployed in " + deploymentId.zoneId()); Slime slime = new Slime(); toSlime(slime.setObject(), deploymentId, deployment, request); return new SlimeJsonResponse(slime); } private void toSlime(Cursor object, Change change, Application application) { change.platform().ifPresent(version -> object.setString("version", version.toString())); change.revision().ifPresent(revision -> JobControllerApiHandlerHelper.toSlime(object.setObject("revision"), application.revisions().get(revision))); } private void toSlime(Endpoint endpoint, Cursor object) { object.setString("cluster", endpoint.cluster().value()); object.setBool("tls", endpoint.tls()); object.setString("url", endpoint.url().toString()); object.setString("scope", endpointScopeString(endpoint.scope())); object.setString("routingMethod", routingMethodString(endpoint.routingMethod())); object.setBool("legacy", endpoint.legacy()); } private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) { response.setString("tenant", deploymentId.applicationId().tenant().value()); response.setString("application", deploymentId.applicationId().application().value()); response.setString("instance", deploymentId.applicationId().instance().value()); response.setString("environment", deploymentId.zoneId().environment().value()); response.setString("region", deploymentId.zoneId().region().value()); var application = controller.applications().requireApplication(TenantAndApplicationId.from(deploymentId.applicationId())); boolean legacyEndpoints = request.getBooleanProperty("includeLegacyEndpoints"); var endpointArray = response.setArray("endpoints"); EndpointList zoneEndpoints = controller.routing().readEndpointsOf(deploymentId) .scope(Endpoint.Scope.zone); if (!legacyEndpoints) { zoneEndpoints = zoneEndpoints.not().legacy().direct(); } for (var endpoint : zoneEndpoints) { toSlime(endpoint, endpointArray.addObject()); } EndpointList declaredEndpoints = controller.routing().declaredEndpointsOf(application) .targets(deploymentId); if (!legacyEndpoints) { declaredEndpoints = declaredEndpoints.not().legacy().direct(); } for (var endpoint : declaredEndpoints) { toSlime(endpoint, endpointArray.addObject()); } response.setString("clusters", withPath(toPath(deploymentId) + "/clusters", request.getUri()).toString()); response.setString("nodes", withPathAndQuery("/zone/v2/" + deploymentId.zoneId().environment() + "/" + deploymentId.zoneId().region() + "/nodes/v2/node/", "recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString()); response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString()); response.setString("version", deployment.version().toFullString()); response.setString("revision", application.revisions().get(deployment.revision()).stringId()); response.setLong("build", deployment.revision().number()); Instant lastDeploymentStart = controller.jobController().lastDeploymentStart(deploymentId.applicationId(), deployment); response.setLong("deployTimeEpochMs", lastDeploymentStart.toEpochMilli()); controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zoneId()) .ifPresent(deploymentTimeToLive -> response.setLong("expiryTimeEpochMs", lastDeploymentStart.plus(deploymentTimeToLive).toEpochMilli())); application.projectId().ifPresent(i -> response.setString("screwdriverId", String.valueOf(i))); var instance = application.instances().get(deploymentId.applicationId().instance()); if (instance != null) { if (!instance.rotations().isEmpty() && deployment.zone().environment() == Environment.prod) toSlime(instance.rotations(), instance.rotationStatus(), deployment, response); if (!deployment.zone().environment().isManuallyDeployed()) { DeploymentStatus status = controller.jobController().deploymentStatus(application); JobId jobId = new JobId(instance.id(), JobType.deploymentTo(deployment.zone())); Optional.ofNullable(status.jobSteps().get(jobId)) .ifPresent(stepStatus -> { JobControllerApiHandlerHelper.toSlime(response.setObject("applicationVersion"), application.revisions().get(deployment.revision())); if ( ! status.jobsToRun().containsKey(stepStatus.job().get())) response.setString("status", "complete"); else if (stepStatus.readyAt(instance.change()).map(controller.clock().instant()::isBefore).orElse(true)) response.setString("status", "pending"); else response.setString("status", "running"); }); } else { var deploymentRun = controller.jobController().last(deploymentId.applicationId(), JobType.deploymentTo(deploymentId.zoneId())); deploymentRun.ifPresent(run -> { response.setString("status", run.hasEnded() ? "complete" : "running"); }); } } response.setDouble("quota", deployment.quota().rate()); deployment.cost().ifPresent(cost -> response.setDouble("cost", cost)); controller.archiveBucketDb().archiveUriFor(deploymentId.zoneId(), deploymentId.applicationId().tenant(), false) .ifPresent(archiveUri -> response.setString("archiveUri", archiveUri.toString())); Cursor activity = response.setObject("activity"); deployment.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); deployment.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); deployment.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); deployment.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); DeploymentMetrics metrics = deployment.metrics(); Cursor metricsObject = response.setObject("metrics"); metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond()); metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond()); metricsObject.setDouble("documentCount", metrics.documentCount()); metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis()); metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis()); metrics.instant().ifPresent(instant -> metricsObject.setLong("lastUpdated", instant.toEpochMilli())); } private void toSlime(RotationState state, Cursor object) { Cursor bcpStatus = object.setObject("bcpStatus"); bcpStatus.setString("rotationStatus", rotationStateString(state)); } private void toSlime(List<AssignedRotation> rotations, RotationStatus status, Deployment deployment, Cursor object) { var array = object.setArray("endpointStatus"); for (var rotation : rotations) { var statusObject = array.addObject(); var targets = status.of(rotation.rotationId()); statusObject.setString("endpointId", rotation.endpointId().id()); statusObject.setString("rotationId", rotation.rotationId().asString()); statusObject.setString("clusterId", rotation.clusterId().value()); statusObject.setString("status", rotationStateString(status.of(rotation.rotationId(), deployment))); statusObject.setLong("lastUpdated", targets.lastUpdated().toEpochMilli()); } } private URI monitoringSystemUri(DeploymentId deploymentId) { return controller.zoneRegistry().getMonitoringSystemUri(deploymentId); } private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) { Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName)); ZoneId zone = requireZone(environment, region); Deployment deployment = instance.deployments().get(zone); if (deployment == null) { throw new NotExistsException(instance + " has no deployment in " + zone); } DeploymentId deploymentId = new DeploymentId(instance.id(), zone); RoutingStatus.Agent agent = isOperator(request) ? RoutingStatus.Agent.operator : RoutingStatus.Agent.tenant; RoutingStatus.Value status = inService ? RoutingStatus.Value.in : RoutingStatus.Value.out; controller.routing().of(deploymentId).setRoutingStatus(status, agent); return new MessageResponse(Text.format("Successfully set %s in %s %s service", instance.id().toShortString(), zone, inService ? "in" : "out of")); } private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Slime slime = new Slime(); Cursor array = slime.setObject().setArray("globalrotationoverride"); Optional<Endpoint> primaryEndpoint = controller.routing().readDeclaredEndpointsOf(deploymentId.applicationId()) .requiresRotation() .primary(); if (primaryEndpoint.isPresent()) { DeploymentRoutingContext context = controller.routing().of(deploymentId); RoutingStatus status = context.routingStatus(); array.addString(primaryEndpoint.get().upstreamName(deploymentId)); Cursor statusObject = array.addObject(); statusObject.setString("status", status.value().name()); statusObject.setString("reason", ""); statusObject.setString("agent", status.agent().name()); statusObject.setLong("timestamp", status.changedAt().getEpochSecond()); } return new SlimeJsonResponse(slime); } private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region, Optional<String> endpointId) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); Instance instance = controller.applications().requireInstance(applicationId); ZoneId zone = requireZone(environment, region); RotationId rotation = findRotationId(instance, endpointId); Deployment deployment = instance.deployments().get(zone); if (deployment == null) { throw new NotExistsException(instance + " has no deployment in " + zone); } Slime slime = new Slime(); Cursor response = slime.setObject(); toSlime(instance.rotationStatus().of(rotation, deployment), response); return new SlimeJsonResponse(slime); } private HttpResponse deploying(String tenantName, String applicationName, String instanceName, HttpRequest request) { Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName)); Slime slime = new Slime(); Cursor root = slime.setObject(); if ( ! instance.change().isEmpty()) { instance.change().platform().ifPresent(version -> root.setString("platform", version.toString())); instance.change().revision().ifPresent(revision -> root.setString("application", revision.toString())); root.setBool("pinned", instance.change().isPinned()); } return new SlimeJsonResponse(slime); } private HttpResponse suspended(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); boolean suspended = controller.applications().isSuspended(deploymentId); Slime slime = new Slime(); Cursor response = slime.setObject(); response.setBool("suspended", suspended); return new SlimeJsonResponse(slime); } private HttpResponse status(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String host, HttpURL.Path restPath, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); return controller.serviceRegistry().configServer().getServiceNodePage(deploymentId, serviceName, DomainName.of(host), HttpURL.Path.parse("/status").append(restPath), Query.empty().add(request.getJDiscRequest().parameters())); } private HttpResponse orchestrator(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); return controller.serviceRegistry().configServer().getServiceNodes(deploymentId); } private HttpResponse stateV1(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String host, HttpURL.Path rest, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Query query = Query.empty().add(request.getJDiscRequest().parameters()); query = query.set("forwarded-url", HttpURL.from(request.getUri()).withQuery(Query.empty()).asURI().toString()); return controller.serviceRegistry().configServer().getServiceNodePage( deploymentId, serviceName, DomainName.of(host), HttpURL.Path.parse("/state/v1").append(rest), query); } private HttpResponse content(String tenantName, String applicationName, String instanceName, String environment, String region, HttpURL.Path restPath, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); return controller.serviceRegistry().configServer().getApplicationPackageContent(deploymentId, restPath, request.getUri()); } private HttpResponse updateTenant(String tenantName, HttpRequest request) { getTenantOrThrow(tenantName); TenantName tenant = TenantName.from(tenantName); Inspector requestObject = toSlime(request.getData()).get(); controller.tenants().update(accessControlRequests.specification(tenant, requestObject), accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest())); return tenant(controller.tenants().require(TenantName.from(tenantName)), request); } private HttpResponse createTenant(String tenantName, HttpRequest request) { TenantName tenant = TenantName.from(tenantName); Inspector requestObject = toSlime(request.getData()).get(); controller.tenants().create(accessControlRequests.specification(tenant, requestObject), accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest())); if (controller.system().isPublic()) { User user = getAttribute(request, User.ATTRIBUTE_NAME, User.class); TenantInfo info = controller.tenants().require(tenant, CloudTenant.class) .info() .withContact(TenantContact.from(user.name(), user.email())); controller.tenants().lockOrThrow(tenant, LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(info); controller.tenants().store(lockedTenant); }); } return tenant(controller.tenants().require(TenantName.from(tenantName)), request); } private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) { Inspector requestObject = toSlime(request.getData()).get(); TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); Credentials credentials = accessControlRequests.credentials(id.tenant(), requestObject, request.getJDiscRequest()); Application application = controller.applications().createApplication(id, credentials); Slime slime = new Slime(); toSlime(id, slime.setObject(), request); return new SlimeJsonResponse(slime); } private HttpResponse createInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) { TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName); if (controller.applications().getApplication(applicationId).isEmpty()) createApplication(tenantName, applicationName, request); controller.applications().createInstance(applicationId.instance(instanceName)); Slime slime = new Slime(); toSlime(applicationId.instance(instanceName), slime.setObject(), request); return new SlimeJsonResponse(slime); } /** Trigger deployment of the given Vespa version if a valid one is given, e.g., "7.8.9". */ private HttpResponse deployPlatform(String tenantName, String applicationName, String instanceName, boolean pin, HttpRequest request) { String versionString = readToString(request.getData()); ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); StringBuilder response = new StringBuilder(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { Version version = Version.fromString(versionString); VersionStatus versionStatus = controller.readVersionStatus(); if (version.equals(Version.emptyVersion)) version = controller.systemVersion(versionStatus); if (!versionStatus.isActive(version)) throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " + "Version is not active in this system. " + "Active versions: " + versionStatus.versions() .stream() .map(VespaVersion::versionNumber) .map(Version::toString) .collect(joining(", "))); Change change = Change.of(version); if (pin) change = change.withPin(); controller.applications().deploymentTrigger().forceChange(id, change); response.append("Triggered ").append(change).append(" for ").append(id); }); return new MessageResponse(response.toString()); } /** Trigger deployment to the last known application package for the given application. */ private HttpResponse deployApplication(String tenantName, String applicationName, String instanceName, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); Inspector buildField = toSlime(request.getData()).get().field("build"); long build = buildField.valid() ? buildField.asLong() : -1; StringBuilder response = new StringBuilder(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { RevisionId revision = build == -1 ? application.get().revisions().last().get().id() : getRevision(application.get(), build); Change change = Change.of(revision); controller.applications().deploymentTrigger().forceChange(id, change); response.append("Triggered ").append(change).append(" for ").append(id); }); return new MessageResponse(response.toString()); } private RevisionId getRevision(Application application, long build) { return application.revisions().withPackage().stream() .map(ApplicationVersion::id) .filter(version -> version.number() == build) .findFirst() .filter(version -> controller.applications().applicationStore().hasBuild(application.id().tenant(), application.id().application(), build)) .orElseThrow(() -> new IllegalArgumentException("Build number '" + build + "' was not found")); } private HttpResponse cancelBuild(String tenantName, String applicationName, String build){ TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); RevisionId revision = RevisionId.forProduction(Long.parseLong(build)); controller.applications().lockApplicationOrThrow(id, application -> { controller.applications().store(application.withRevisions(revisions -> revisions.with(revisions.get(revision).skipped()))); }); return new MessageResponse("Marked build '" + build + "' as non-deployable"); } /** Cancel ongoing change for given application, e.g., everything with {"cancel":"all"} */ private HttpResponse cancelDeploy(String tenantName, String applicationName, String instanceName, String choice) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); StringBuilder response = new StringBuilder(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { Change change = application.get().require(id.instance()).change(); if (change.isEmpty()) { response.append("No deployment in progress for ").append(id).append(" at this time"); return; } ChangesToCancel cancel = ChangesToCancel.valueOf(choice.toUpperCase()); controller.applications().deploymentTrigger().cancelChange(id, cancel); response.append("Changed deployment from '").append(change).append("' to '").append(controller.applications().requireInstance(id).change()).append("' for ").append(id); }); return new MessageResponse(response.toString()); } /** Schedule reindexing of an application, or a subset of clusters, possibly on a subset of documents. */ private HttpResponse reindex(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); List<String> clusterNames = Optional.ofNullable(request.getProperty("clusterId")).stream() .flatMap(clusters -> Stream.of(clusters.split(","))) .filter(cluster -> ! cluster.isBlank()) .toList(); List<String> documentTypes = Optional.ofNullable(request.getProperty("documentType")).stream() .flatMap(types -> Stream.of(types.split(","))) .filter(type -> ! type.isBlank()) .toList(); Double speed = request.hasProperty("speed") ? Double.parseDouble(request.getProperty("speed")) : null; boolean indexedOnly = request.getBooleanProperty("indexedOnly"); controller.applications().reindex(id, zone, clusterNames, documentTypes, indexedOnly, speed); return new MessageResponse("Requested reindexing of " + id + " in " + zone + (clusterNames.isEmpty() ? "" : ", on clusters " + String.join(", ", clusterNames)) + (documentTypes.isEmpty() ? "" : ", for types " + String.join(", ", documentTypes)) + (indexedOnly ? ", for indexed types" : "") + (speed != null ? ", with speed " + speed : "")); } /** Gets reindexing status of an application in a zone. */ private HttpResponse getReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); ApplicationReindexing reindexing = controller.applications().applicationReindexing(id, zone); Slime slime = new Slime(); Cursor root = slime.setObject(); root.setBool("enabled", reindexing.enabled()); Cursor clustersArray = root.setArray("clusters"); reindexing.clusters().entrySet().stream().sorted(comparingByKey()) .forEach(cluster -> { Cursor clusterObject = clustersArray.addObject(); clusterObject.setString("name", cluster.getKey()); Cursor pendingArray = clusterObject.setArray("pending"); cluster.getValue().pending().entrySet().stream().sorted(comparingByKey()) .forEach(pending -> { Cursor pendingObject = pendingArray.addObject(); pendingObject.setString("type", pending.getKey()); pendingObject.setLong("requiredGeneration", pending.getValue()); }); Cursor readyArray = clusterObject.setArray("ready"); cluster.getValue().ready().entrySet().stream().sorted(comparingByKey()) .forEach(ready -> { Cursor readyObject = readyArray.addObject(); readyObject.setString("type", ready.getKey()); setStatus(readyObject, ready.getValue()); }); }); return new SlimeJsonResponse(slime); } void setStatus(Cursor statusObject, ApplicationReindexing.Status status) { status.readyAt().ifPresent(readyAt -> statusObject.setLong("readyAtMillis", readyAt.toEpochMilli())); status.startedAt().ifPresent(startedAt -> statusObject.setLong("startedAtMillis", startedAt.toEpochMilli())); status.endedAt().ifPresent(endedAt -> statusObject.setLong("endedAtMillis", endedAt.toEpochMilli())); status.state().map(ApplicationApiHandler::toString).ifPresent(state -> statusObject.setString("state", state)); status.message().ifPresent(message -> statusObject.setString("message", message)); status.progress().ifPresent(progress -> statusObject.setDouble("progress", progress)); status.speed().ifPresent(speed -> statusObject.setDouble("speed", speed)); } private static String toString(ApplicationReindexing.State state) { return switch (state) { case PENDING: yield "pending"; case RUNNING: yield "running"; case FAILED: yield "failed"; case SUCCESSFUL: yield "successful"; }; } /** Enables reindexing of an application in a zone. */ private HttpResponse enableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); controller.applications().enableReindexing(id, zone); return new MessageResponse("Enabled reindexing of " + id + " in " + zone); } /** Disables reindexing of an application in a zone. */ private HttpResponse disableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); controller.applications().disableReindexing(id, zone); return new MessageResponse("Disabled reindexing of " + id + " in " + zone); } /** Schedule restart of deployment, or specific host in a deployment */ private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); RestartFilter restartFilter = new RestartFilter() .withHostName(Optional.ofNullable(request.getProperty("hostname")).map(HostName::of)) .withClusterType(Optional.ofNullable(request.getProperty("clusterType")).map(ClusterSpec.Type::from)) .withClusterId(Optional.ofNullable(request.getProperty("clusterId")).map(ClusterSpec.Id::from)); controller.applications().restart(deploymentId, restartFilter); return new MessageResponse("Requested restart of " + deploymentId); } /** Set suspension status of the given deployment. */ private HttpResponse suspend(String tenantName, String applicationName, String instanceName, String environment, String region, boolean suspend) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); controller.applications().setSuspension(deploymentId, suspend); return new MessageResponse((suspend ? "Suspended" : "Resumed") + " orchestration of " + deploymentId); } private HttpResponse jobDeploy(ApplicationId id, JobType type, HttpRequest request) { if ( ! type.environment().isManuallyDeployed() && ! isOperator(request)) throw new IllegalArgumentException("Direct deployments are only allowed to manually deployed environments."); Map<String, byte[]> dataParts = parseDataParts(request); if ( ! dataParts.containsKey("applicationZip")) throw new IllegalArgumentException("Missing required form part 'applicationZip'"); ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP)); controller.applications().verifyApplicationIdentityConfiguration(id.tenant(), Optional.of(id.instance()), Optional.of(type.zone()), applicationPackage, Optional.of(requireUserPrincipal(request))); Optional<Version> version = Optional.ofNullable(dataParts.get("deployOptions")) .map(json -> SlimeUtils.jsonToSlime(json).get()) .flatMap(options -> optional("vespaVersion", options)) .map(Version::fromString); ensureApplicationExists(TenantAndApplicationId.from(id), request); boolean dryRun = Optional.ofNullable(dataParts.get("deployOptions")) .map(json -> SlimeUtils.jsonToSlime(json).get()) .flatMap(options -> optional("dryRun", options)) .map(Boolean::valueOf) .orElse(false); controller.jobController().deploy(id, type, version, applicationPackage, dryRun); RunId runId = controller.jobController().last(id, type).get().id(); Slime slime = new Slime(); Cursor rootObject = slime.setObject(); rootObject.setString("message", "Deployment started in " + runId + ". This may take about 15 minutes the first time."); rootObject.setLong("run", runId.number()); return new SlimeJsonResponse(slime); } private HttpResponse deploy(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); Map<String, byte[]> dataParts = parseDataParts(request); if ( ! dataParts.containsKey("deployOptions")) return ErrorResponse.badRequest("Missing required form part 'deployOptions'"); Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get(); Optional<SystemApplication> systemApplication = SystemApplication.matching(applicationId); if (systemApplication.isEmpty() || !systemApplication.get().hasApplicationPackage()) { return ErrorResponse.badRequest("Deployment of " + applicationId + " is not supported through this API"); } String vespaVersion = deployOptions.field("vespaVersion").asString(); if (!vespaVersion.isEmpty() && !vespaVersion.equals("null")) { return ErrorResponse.badRequest("Specifying version for " + applicationId + " is not permitted"); } VersionStatus versionStatus = controller.readVersionStatus(); if (versionStatus.isUpgrading()) { throw new IllegalArgumentException("Deployment of system applications during a system upgrade is not allowed"); } Optional<VespaVersion> systemVersion = versionStatus.systemVersion(); if (systemVersion.isEmpty()) { throw new IllegalArgumentException("Deployment of system applications is not permitted until system version is determined"); } ActivateResult result = controller.applications() .deploySystemApplicationPackage(systemApplication.get(), zone, systemVersion.get().versionNumber()); return new SlimeJsonResponse(toSlime(result)); } private HttpResponse deleteTenant(String tenantName, HttpRequest request) { boolean forget = request.getBooleanProperty("forget"); if (forget && !isOperator(request)) return ErrorResponse.forbidden("Only operators can forget a tenant"); controller.tenants().delete(TenantName.from(tenantName), Optional.of(accessControlRequests.credentials(TenantName.from(tenantName), toSlime(request.getData()).get(), request.getJDiscRequest())), forget); return new MessageResponse("Deleted tenant " + tenantName); } private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) { TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest()); controller.applications().deleteApplication(id, credentials); return new MessageResponse("Deleted application " + id); } private HttpResponse deleteInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) { TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); controller.applications().deleteInstance(id.instance(instanceName)); if (controller.applications().requireApplication(id).instances().isEmpty()) { Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest()); controller.applications().deleteApplication(id, credentials); } return new MessageResponse("Deleted instance " + id.instance(instanceName).toFullString()); } private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId id = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); controller.applications().deactivate(id.applicationId(), id.zoneId()); controller.jobController().last(id.applicationId(), JobType.deploymentTo(id.zoneId())) .filter(run -> ! run.hasEnded()) .ifPresent(last -> controller.jobController().abort(last.id(), "deployment deactivated by " + request.getJDiscRequest().getUserPrincipal().getName())); return new MessageResponse("Deactivated " + id); } /** Returns test config for indicated job, with production deployments of the default instance if the given is not in deployment spec. */ private HttpResponse testConfig(ApplicationId id, JobType type) { Application application = controller.applications().requireApplication(TenantAndApplicationId.from(id)); ApplicationId prodInstanceId = application.deploymentSpec().instance(id.instance()).isPresent() ? id : TenantAndApplicationId.from(id).defaultInstance(); HashSet<DeploymentId> deployments = controller.applications() .getInstance(prodInstanceId).stream() .flatMap(instance -> instance.productionDeployments().keySet().stream()) .map(zone -> new DeploymentId(prodInstanceId, zone)) .collect(Collectors.toCollection(HashSet::new)); ApplicationId toTest = type.isProduction() ? prodInstanceId : id; if ( ! type.isProduction()) deployments.add(new DeploymentId(toTest, type.zone())); Deployment deployment = application.require(toTest.instance()).deployments().get(type.zone()); if (deployment == null) throw new NotExistsException(toTest + " is not deployed in " + type.zone()); return new SlimeJsonResponse(testConfigSerializer.configSlime(id, type, false, deployment.version(), deployment.revision(), deployment.at(), controller.routing().readTestRunnerEndpointsOf(deployments), controller.applications().reachableContentClustersByZone(deployments))); } private HttpResponse requestServiceDump(String tenant, String application, String instance, String environment, String region, String hostname, HttpRequest request) { NodeRepository nodeRepository = controller.serviceRegistry().configServer().nodeRepository(); ZoneId zone = requireZone(environment, region); Slime report = getReport(nodeRepository, zone, tenant, application, instance, hostname).orElse(null); if (report != null) { Cursor cursor = report.get(); boolean force = request.getBooleanProperty("force"); if (!force && cursor.field("failedAt").asLong() == 0 && cursor.field("completedAt").asLong() == 0) { throw new IllegalArgumentException("Service dump already in progress for " + cursor.field("configId").asString()); } } Slime requestPayload; try { requestPayload = SlimeUtils.jsonToSlimeOrThrow(request.getData().readAllBytes()); } catch (Exception e) { throw new IllegalArgumentException("Missing or invalid JSON in request content", e); } Cursor requestPayloadCursor = requestPayload.get(); String configId = requestPayloadCursor.field("configId").asString(); long expiresAt = requestPayloadCursor.field("expiresAt").asLong(); if (configId.isEmpty()) { throw new IllegalArgumentException("Missing configId"); } Cursor artifactsCursor = requestPayloadCursor.field("artifacts"); int artifactEntries = artifactsCursor.entries(); if (artifactEntries == 0) { throw new IllegalArgumentException("Missing or empty 'artifacts'"); } Slime dumpRequest = new Slime(); Cursor dumpRequestCursor = dumpRequest.setObject(); dumpRequestCursor.setLong("createdMillis", controller.clock().millis()); dumpRequestCursor.setString("configId", configId); Cursor dumpRequestArtifactsCursor = dumpRequestCursor.setArray("artifacts"); for (int i = 0; i < artifactEntries; i++) { dumpRequestArtifactsCursor.addString(artifactsCursor.entry(i).asString()); } if (expiresAt > 0) { dumpRequestCursor.setLong("expiresAt", expiresAt); } Cursor dumpOptionsCursor = requestPayloadCursor.field("dumpOptions"); if (dumpOptionsCursor.children() > 0) { SlimeUtils.copyObject(dumpOptionsCursor, dumpRequestCursor.setObject("dumpOptions")); } var reportsUpdate = Map.of("serviceDump", new String(uncheck(() -> SlimeUtils.toJsonBytes(dumpRequest)))); nodeRepository.updateReports(zone, hostname, reportsUpdate); boolean wait = request.getBooleanProperty("wait"); if (!wait) return new MessageResponse("Request created"); return waitForServiceDumpResult(nodeRepository, zone, tenant, application, instance, hostname); } private HttpResponse getServiceDump(String tenant, String application, String instance, String environment, String region, String hostname, HttpRequest request) { NodeRepository nodeRepository = controller.serviceRegistry().configServer().nodeRepository(); ZoneId zone = requireZone(environment, region); Slime report = getReport(nodeRepository, zone, tenant, application, instance, hostname) .orElseThrow(() -> new NotExistsException("No service dump for node " + hostname)); return new SlimeJsonResponse(report); } private HttpResponse waitForServiceDumpResult(NodeRepository nodeRepository, ZoneId zone, String tenant, String application, String instance, String hostname) { int pollInterval = 2; Slime report; while (true) { report = getReport(nodeRepository, zone, tenant, application, instance, hostname).get(); Cursor cursor = report.get(); if (cursor.field("completedAt").asLong() > 0 || cursor.field("failedAt").asLong() > 0) { break; } final Slime copyForLambda = report; log.fine(() -> uncheck(() -> new String(SlimeUtils.toJsonBytes(copyForLambda)))); log.fine("Sleeping " + pollInterval + " seconds before checking report status again"); controller.sleeper().sleep(Duration.ofSeconds(pollInterval)); } return new SlimeJsonResponse(report); } private Optional<Slime> getReport(NodeRepository nodeRepository, ZoneId zone, String tenant, String application, String instance, String hostname) { Node node; try { node = nodeRepository.getNode(zone, hostname); } catch (IllegalArgumentException e) { throw new NotExistsException(hostname); } ApplicationId app = ApplicationId.from(tenant, application, instance); ApplicationId owner = node.owner().orElseThrow(() -> new IllegalArgumentException("Node has no owner")); if (!app.equals(owner)) { throw new IllegalArgumentException("Node is not owned by " + app.toFullString()); } String json = node.reports().get("serviceDump"); if (json == null) return Optional.empty(); return Optional.of(SlimeUtils.jsonToSlimeOrThrow(json)); } private static SourceRevision toSourceRevision(Inspector object) { if (!object.field("repository").valid() || !object.field("branch").valid() || !object.field("commit").valid()) { throw new IllegalArgumentException("Must specify \"repository\", \"branch\", and \"commit\"."); } return new SourceRevision(object.field("repository").asString(), object.field("branch").asString(), object.field("commit").asString()); } private Tenant getTenantOrThrow(String tenantName) { return controller.tenants().get(tenantName) .orElseThrow(() -> new NotExistsException(new TenantId(tenantName))); } private void toSlime(Cursor object, Tenant tenant, List<Application> applications, HttpRequest request) { object.setString("tenant", tenant.name().value()); object.setString("type", tenantType(tenant)); switch (tenant.type()) { case athenz: AthenzTenant athenzTenant = (AthenzTenant) tenant; object.setString("athensDomain", athenzTenant.domain().getName()); object.setString("property", athenzTenant.property().id()); athenzTenant.propertyId().ifPresent(id -> object.setString("propertyId", id.toString())); athenzTenant.contact().ifPresent(c -> { object.setString("propertyUrl", c.propertyUrl().toString()); object.setString("contactsUrl", c.url().toString()); object.setString("issueCreationUrl", c.issueTrackerUrl().toString()); Cursor contactsArray = object.setArray("contacts"); c.persons().forEach(persons -> { Cursor personArray = contactsArray.addArray(); persons.forEach(personArray::addString); }); }); break; case cloud: { CloudTenant cloudTenant = (CloudTenant) tenant; cloudTenant.creator().ifPresent(creator -> object.setString("creator", creator.getName())); Cursor pemDeveloperKeysArray = object.setArray("pemDeveloperKeys"); cloudTenant.developerKeys().forEach((key, user) -> { Cursor keyObject = pemDeveloperKeysArray.addObject(); keyObject.setString("key", KeyUtils.toPem(key)); keyObject.setString("user", user.getName()); }); toSlime(object, cloudTenant.tenantSecretStores()); toSlime(object.setObject("integrations").setObject("aws"), controller.serviceRegistry().roleService().getTenantRole(tenant.name()), cloudTenant.tenantSecretStores()); try { var usedQuota = applications.stream() .map(Application::quotaUsage) .reduce(QuotaUsage.none, QuotaUsage::add); toSlime(object.setObject("quota"), usedQuota); } catch (Exception e) { log.warning(String.format("Failed to get quota for tenant %s: %s", tenant.name(), Exceptions.toMessageString(e))); } cloudTenant.archiveAccess().awsRole().ifPresent(role -> object.setString("archiveAccessRole", role)); toSlime(cloudTenant.archiveAccess(), object.setObject("archiveAccess")); break; } case deleted: break; default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'."); } Cursor applicationArray = object.setArray("applications"); for (Application application : applications) { DeploymentStatus status = null; Collection<Instance> instances = showOnlyProductionInstances(request) ? application.productionInstances().values() : application.instances().values(); if (instances.isEmpty() && !showOnlyActiveInstances(request)) toSlime(application.id(), applicationArray.addObject(), request); for (Instance instance : instances) { if (showOnlyActiveInstances(request) && instance.deployments().isEmpty()) continue; if (recurseOverApplications(request)) { if (status == null) status = controller.jobController().deploymentStatus(application); toSlime(applicationArray.addObject(), instance, status, request); } else { toSlime(instance.id(), applicationArray.addObject(), request); } } } tenantMetaDataToSlime(tenant, applications, object.setObject("metaData")); } private void toSlime(ArchiveAccess archiveAccess, Cursor object) { archiveAccess.awsRole().ifPresent(role -> object.setString("awsRole", role)); archiveAccess.gcpMember().ifPresent(member -> object.setString("gcpMember", member)); } private void toSlime(Cursor object, QuotaUsage usage) { object.setDouble("budgetUsed", usage.rate()); } private void toSlime(ClusterResources resources, Cursor object) { object.setLong("nodes", resources.nodes()); object.setLong("groups", resources.groups()); toSlime(resources.nodeResources(), object.setObject("nodeResources")); double cost = ResourceMeterMaintainer.cost(resources, controller.serviceRegistry().zoneRegistry().system()); object.setDouble("cost", cost); } private void utilizationToSlime(Cluster.Utilization utilization, Cursor utilizationObject) { utilizationObject.setDouble("cpu", utilization.cpu()); utilizationObject.setDouble("idealCpu", utilization.idealCpu()); utilizationObject.setDouble("currentCpu", utilization.currentCpu()); utilizationObject.setDouble("peakCpu", utilization.peakCpu()); utilizationObject.setDouble("memory", utilization.memory()); utilizationObject.setDouble("idealMemory", utilization.idealMemory()); utilizationObject.setDouble("currentMemory", utilization.currentMemory()); utilizationObject.setDouble("peakMemory", utilization.peakMemory()); utilizationObject.setDouble("disk", utilization.disk()); utilizationObject.setDouble("idealDisk", utilization.idealDisk()); utilizationObject.setDouble("currentDisk", utilization.currentDisk()); utilizationObject.setDouble("peakDisk", utilization.peakDisk()); } private void scalingEventsToSlime(List<Cluster.ScalingEvent> scalingEvents, Cursor scalingEventsArray) { for (Cluster.ScalingEvent scalingEvent : scalingEvents) { Cursor scalingEventObject = scalingEventsArray.addObject(); toSlime(scalingEvent.from(), scalingEventObject.setObject("from")); toSlime(scalingEvent.to(), scalingEventObject.setObject("to")); scalingEventObject.setLong("at", scalingEvent.at().toEpochMilli()); scalingEvent.completion().ifPresent(completion -> scalingEventObject.setLong("completion", completion.toEpochMilli())); } } private void toSlime(NodeResources resources, Cursor object) { object.setDouble("vcpu", resources.vcpu()); object.setDouble("memoryGb", resources.memoryGb()); object.setDouble("diskGb", resources.diskGb()); object.setDouble("bandwidthGbps", resources.bandwidthGbps()); object.setString("diskSpeed", valueOf(resources.diskSpeed())); object.setString("storageType", valueOf(resources.storageType())); } private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) { object.setString("tenant", tenant.name().value()); Cursor metaData = object.setObject("metaData"); metaData.setString("type", tenantType(tenant)); switch (tenant.type()) { case athenz: AthenzTenant athenzTenant = (AthenzTenant) tenant; metaData.setString("athensDomain", athenzTenant.domain().getName()); metaData.setString("property", athenzTenant.property().id()); break; case cloud: break; case deleted: break; default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'."); } object.setString("url", withPath("/application/v4/tenant/" + tenant.name().value(), requestURI).toString()); } private void tenantMetaDataToSlime(Tenant tenant, List<Application> applications, Cursor object) { Optional<Instant> lastDev = applications.stream() .flatMap(application -> application.instances().values().stream()) .flatMap(instance -> instance.deployments().values().stream() .filter(deployment -> deployment.zone().environment() == Environment.dev) .map(deployment -> controller.jobController().lastDeploymentStart(instance.id(), deployment))) .max(Comparator.naturalOrder()) .or(() -> applications.stream() .flatMap(application -> application.instances().values().stream()) .flatMap(instance -> JobType.allIn(controller.zoneRegistry()).stream() .filter(job -> job.environment() == Environment.dev) .flatMap(jobType -> controller.jobController().last(instance.id(), jobType).stream())) .map(Run::start) .max(Comparator.naturalOrder())); Optional<Instant> lastSubmission = applications.stream() .flatMap(app -> app.revisions().last().flatMap(ApplicationVersion::buildTime).stream()) .max(Comparator.naturalOrder()); object.setLong("createdAtMillis", tenant.createdAt().toEpochMilli()); if (tenant.type() == Tenant.Type.deleted) object.setLong("deletedAtMillis", ((DeletedTenant) tenant).deletedAt().toEpochMilli()); lastDev.ifPresent(instant -> object.setLong("lastDeploymentToDevMillis", instant.toEpochMilli())); lastSubmission.ifPresent(instant -> object.setLong("lastSubmissionToProdMillis", instant.toEpochMilli())); tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.user) .ifPresent(instant -> object.setLong("lastLoginByUserMillis", instant.toEpochMilli())); tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.developer) .ifPresent(instant -> object.setLong("lastLoginByDeveloperMillis", instant.toEpochMilli())); tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.administrator) .ifPresent(instant -> object.setLong("lastLoginByAdministratorMillis", instant.toEpochMilli())); } /** Returns a copy of the given URI with the host and port from the given URI, the path set to the given path and the query set to given query*/ private URI withPathAndQuery(String newPath, String newQuery, URI uri) { try { return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, newQuery, null); } catch (URISyntaxException e) { throw new RuntimeException("Will not happen", e); } } /** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */ private URI withPath(String newPath, URI uri) { return withPathAndQuery(newPath, null, uri); } private String toPath(DeploymentId id) { return path("/application", "v4", "tenant", id.applicationId().tenant(), "application", id.applicationId().application(), "instance", id.applicationId().instance(), "environment", id.zoneId().environment(), "region", id.zoneId().region()); } private long asLong(String valueOrNull, long defaultWhenNull) { if (valueOrNull == null) return defaultWhenNull; try { return Long.parseLong(valueOrNull); } catch (NumberFormatException e) { throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'"); } } private Slime toSlime(InputStream jsonStream) { try { byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000); return SlimeUtils.jsonToSlime(jsonBytes); } catch (IOException e) { throw new RuntimeException(); } } private static Principal requireUserPrincipal(HttpRequest request) { Principal principal = request.getJDiscRequest().getUserPrincipal(); if (principal == null) throw new IllegalArgumentException("Expected a user principal"); return principal; } private Inspector mandatory(String key, Inspector object) { if ( ! object.field(key).valid()) throw new IllegalArgumentException("'" + key + "' is missing"); return object.field(key); } private Optional<String> optional(String key, Inspector object) { return SlimeUtils.optionalString(object.field(key)); } private static String path(Object... elements) { return Joiner.on("/").join(elements); } private void toSlime(TenantAndApplicationId id, Cursor object, HttpRequest request) { object.setString("tenant", id.tenant().value()); object.setString("application", id.application().value()); object.setString("url", withPath("/application/v4" + "/tenant/" + id.tenant().value() + "/application/" + id.application().value(), request.getUri()).toString()); } private void toSlime(ApplicationId id, Cursor object, HttpRequest request) { object.setString("tenant", id.tenant().value()); object.setString("application", id.application().value()); object.setString("instance", id.instance().value()); object.setString("url", withPath("/application/v4" + "/tenant/" + id.tenant().value() + "/application/" + id.application().value() + "/instance/" + id.instance().value(), request.getUri()).toString()); } private Slime toSlime(ActivateResult result) { Slime slime = new Slime(); Cursor object = slime.setObject(); object.setString("revisionId", result.revisionId().id()); object.setLong("applicationZipSize", result.applicationZipSizeBytes()); Cursor logArray = object.setArray("prepareMessages"); if (result.prepareResponse().log != null) { for (Log logMessage : result.prepareResponse().log) { Cursor logObject = logArray.addObject(); logObject.setLong("time", logMessage.time); logObject.setString("level", logMessage.level); logObject.setString("message", logMessage.message); } } Cursor changeObject = object.setObject("configChangeActions"); Cursor restartActionsArray = changeObject.setArray("restart"); for (RestartAction restartAction : result.prepareResponse().configChangeActions.restartActions) { Cursor restartActionObject = restartActionsArray.addObject(); restartActionObject.setString("clusterName", restartAction.clusterName); restartActionObject.setString("clusterType", restartAction.clusterType); restartActionObject.setString("serviceType", restartAction.serviceType); serviceInfosToSlime(restartAction.services, restartActionObject.setArray("services")); stringsToSlime(restartAction.messages, restartActionObject.setArray("messages")); } Cursor refeedActionsArray = changeObject.setArray("refeed"); for (RefeedAction refeedAction : result.prepareResponse().configChangeActions.refeedActions) { Cursor refeedActionObject = refeedActionsArray.addObject(); refeedActionObject.setString("name", refeedAction.name); refeedActionObject.setString("documentType", refeedAction.documentType); refeedActionObject.setString("clusterName", refeedAction.clusterName); serviceInfosToSlime(refeedAction.services, refeedActionObject.setArray("services")); stringsToSlime(refeedAction.messages, refeedActionObject.setArray("messages")); } return slime; } private void serviceInfosToSlime(List<ServiceInfo> serviceInfoList, Cursor array) { for (ServiceInfo serviceInfo : serviceInfoList) { Cursor serviceInfoObject = array.addObject(); serviceInfoObject.setString("serviceName", serviceInfo.serviceName); serviceInfoObject.setString("serviceType", serviceInfo.serviceType); serviceInfoObject.setString("configId", serviceInfo.configId); serviceInfoObject.setString("hostName", serviceInfo.hostName); } } private void stringsToSlime(List<String> strings, Cursor array) { for (String string : strings) array.addString(string); } private void toSlime(Cursor object, List<TenantSecretStore> tenantSecretStores) { Cursor secretStore = object.setArray("secretStores"); tenantSecretStores.forEach(store -> { toSlime(secretStore.addObject(), store); }); } private void toSlime(Cursor object, TenantRoles tenantRoles, List<TenantSecretStore> tenantSecretStores) { object.setString("tenantRole", tenantRoles.containerRole()); var stores = object.setArray("accounts"); tenantSecretStores.forEach(secretStore -> { toSlime(stores.addObject(), secretStore); }); } private void toSlime(Cursor object, TenantSecretStore secretStore) { object.setString("name", secretStore.getName()); object.setString("awsId", secretStore.getAwsId()); object.setString("role", secretStore.getRole()); } private String readToString(InputStream stream) { Scanner scanner = new Scanner(stream).useDelimiter("\\A"); if ( ! scanner.hasNext()) return null; return scanner.next(); } private static boolean recurseOverTenants(HttpRequest request) { return recurseOverApplications(request) || "tenant".equals(request.getProperty("recursive")); } private static boolean recurseOverApplications(HttpRequest request) { return recurseOverDeployments(request) || "application".equals(request.getProperty("recursive")); } private static boolean recurseOverDeployments(HttpRequest request) { return ImmutableSet.of("all", "true", "deployment").contains(request.getProperty("recursive")); } private static boolean showOnlyProductionInstances(HttpRequest request) { return "true".equals(request.getProperty("production")); } private static boolean showOnlyActiveInstances(HttpRequest request) { return "true".equals(request.getProperty("activeInstances")); } private static boolean includeDeleted(HttpRequest request) { return "true".equals(request.getProperty("includeDeleted")); } private static String tenantType(Tenant tenant) { return switch (tenant.type()) { case athenz: yield "ATHENS"; case cloud: yield "CLOUD"; case deleted: yield "DELETED"; }; } private static ApplicationId appIdFromPath(Path path) { return ApplicationId.from(path.get("tenant"), path.get("application"), path.get("instance")); } private JobType jobTypeFromPath(Path path) { return JobType.fromJobName(path.get("jobtype"), controller.zoneRegistry()); } private RunId runIdFromPath(Path path) { long number = Long.parseLong(path.get("number")); return new RunId(appIdFromPath(path), jobTypeFromPath(path), number); } private HttpResponse submit(String tenant, String application, HttpRequest request) { Map<String, byte[]> dataParts = parseDataParts(request); Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get(); long projectId = submitOptions.field("projectId").asLong(); projectId = projectId == 0 ? 1 : projectId; Optional<String> repository = optional("repository", submitOptions); Optional<String> branch = optional("branch", submitOptions); Optional<String> commit = optional("commit", submitOptions); Optional<SourceRevision> sourceRevision = repository.isPresent() && branch.isPresent() && commit.isPresent() ? Optional.of(new SourceRevision(repository.get(), branch.get(), commit.get())) : Optional.empty(); Optional<String> sourceUrl = optional("sourceUrl", submitOptions); Optional<String> authorEmail = optional("authorEmail", submitOptions); Optional<String> description = optional("description", submitOptions); int risk = (int) submitOptions.field("risk").asLong(); sourceUrl.map(URI::create).ifPresent(url -> { if (url.getHost() == null || url.getScheme() == null) throw new IllegalArgumentException("Source URL must include scheme and host"); }); ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP), true); byte[] testPackage = dataParts.get(EnvironmentResource.APPLICATION_TEST_ZIP); Submission submission = new Submission(applicationPackage, testPackage, sourceUrl, sourceRevision, authorEmail, description, risk); controller.applications().verifyApplicationIdentityConfiguration(TenantName.from(tenant), Optional.empty(), Optional.empty(), applicationPackage, Optional.of(requireUserPrincipal(request))); TenantAndApplicationId id = TenantAndApplicationId.from(tenant, application); ensureApplicationExists(id, request); return JobControllerApiHandlerHelper.submitResponse(controller.jobController(), id, submission, projectId); } private HttpResponse removeAllProdDeployments(String tenant, String application) { JobControllerApiHandlerHelper.submitResponse(controller.jobController(), TenantAndApplicationId.from(tenant, application), new Submission(ApplicationPackage.deploymentRemoval(), new byte[0], Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), 0), 0); return new MessageResponse("All deployments removed"); } private ZoneId requireZone(String environment, String region) { return requireZone(ZoneId.from(environment, region)); } private ZoneId requireZone(ZoneId zone) { if (zone.environment() == Environment.prod && zone.region().value().equals("controller")) { return zone; } if (!controller.zoneRegistry().hasZone(zone)) { throw new IllegalArgumentException("Zone " + zone + " does not exist in this system"); } return zone; } private static Map<String, byte[]> parseDataParts(HttpRequest request) { String contentHash = request.getHeader("X-Content-Hash"); if (contentHash == null) return new MultipartParser().parse(request); DigestInputStream digester = Signatures.sha256Digester(request.getData()); var dataParts = new MultipartParser().parse(request.getHeader("Content-Type"), digester, request.getUri()); if ( ! Arrays.equals(digester.getMessageDigest().digest(), Base64.getDecoder().decode(contentHash))) throw new IllegalArgumentException("Value of X-Content-Hash header does not match computed content hash"); return dataParts; } private static RotationId findRotationId(Instance instance, Optional<String> endpointId) { if (instance.rotations().isEmpty()) { throw new NotExistsException("global rotation does not exist for " + instance); } if (endpointId.isPresent()) { return instance.rotations().stream() .filter(r -> r.endpointId().id().equals(endpointId.get())) .map(AssignedRotation::rotationId) .findFirst() .orElseThrow(() -> new NotExistsException("endpoint " + endpointId.get() + " does not exist for " + instance)); } else if (instance.rotations().size() > 1) { throw new IllegalArgumentException(instance + " has multiple rotations. Query parameter 'endpointId' must be given"); } return instance.rotations().get(0).rotationId(); } private static String rotationStateString(RotationState state) { return switch (state) { case in: yield "IN"; case out: yield "OUT"; case unknown: yield "UNKNOWN"; }; } private static String endpointScopeString(Endpoint.Scope scope) { return switch (scope) { case weighted: yield "weighted"; case application: yield "application"; case global: yield "global"; case zone: yield "zone"; }; } private static String routingMethodString(RoutingMethod method) { return switch (method) { case exclusive: yield "exclusive"; case sharedLayer4: yield "sharedLayer4"; }; } private static <T> T getAttribute(HttpRequest request, String attributeName, Class<T> cls) { return Optional.ofNullable(request.getJDiscRequest().context().get(attributeName)) .filter(cls::isInstance) .map(cls::cast) .orElseThrow(() -> new IllegalArgumentException("Attribute '" + attributeName + "' was not set on request")); } /** Returns whether given request is by an operator */ private static boolean isOperator(HttpRequest request) { var securityContext = getAttribute(request, SecurityContext.ATTRIBUTE_NAME, SecurityContext.class); return securityContext.roles().stream() .map(Role::definition) .anyMatch(definition -> definition == RoleDefinition.hostedOperator); } private void ensureApplicationExists(TenantAndApplicationId id, HttpRequest request) { if (controller.applications().getApplication(id).isEmpty()) { log.fine("Application does not exist in public, creating: " + id); var credentials = accessControlRequests.credentials(id.tenant(), null /* not used on public */ , request.getJDiscRequest()); controller.applications().createApplication(id, credentials); } } private List<Deployment> sortedDeployments(Collection<Deployment> deployments, DeploymentInstanceSpec spec) { List<ZoneId> productionZones = spec.zones().stream() .filter(z -> z.region().isPresent()) .map(z -> ZoneId.from(z.environment(), z.region().get())) .toList(); return deployments.stream() .sorted(comparingInt(deployment -> productionZones.indexOf(deployment.zone()))) .collect(collectingAndThen(Collectors.toList(), Collections::unmodifiableList)); } }
Isn't it better to fail here? Or at least log a message that we're falling back to the version set by the parent POM and that it's different from the version decided by the controller.
protected void doExecute() throws IOException { Path output = Paths.get(outputFile).toAbsolutePath(); OptionalInt allowMajor = majorVersion(new File(project.getBasedir(), "src/main/application/deployment.xml").toPath()); allowMajor.ifPresent(major -> getLog().info("Allowing only major version " + major + ".")); Version compileVersion = Version.fromString(controller.compileVersion(id, allowMajor)); if (compileVersion.isAfter(Vtag.currentVersion)) compileVersion = Vtag.currentVersion; getLog().info("Vespa version to compile against is '" + compileVersion.toFullString() + "'."); getLog().info("Writing compile version to '" + output + "'."); Files.createDirectories(output.getParent()); Files.writeString(output, compileVersion.toFullString()); }
compileVersion = Vtag.currentVersion;
protected void doExecute() throws IOException { Path output = Paths.get(outputFile).toAbsolutePath(); OptionalInt allowMajor = majorVersion(new File(project.getBasedir(), "src/main/application/deployment.xml").toPath()); allowMajor.ifPresent(major -> getLog().info("Allowing only major version " + major + ".")); Version compileVersion = Version.fromString(controller.compileVersion(id, allowMajor)); if (compileVersion.isAfter(Vtag.currentVersion)) throw new IllegalStateException("parent version (" + Vtag.currentVersion.toFullString() + ") should be at least as " + "high as the Vespa version to compile against (" + compileVersion.toFullString() + ")"); getLog().info("Vespa version to compile against is '" + compileVersion.toFullString() + "'."); getLog().info("Writing compile version to '" + output + "'."); Files.createDirectories(output.getParent()); Files.writeString(output, compileVersion.toFullString()); }
class CompileVersionMojo extends AbstractVespaMojo { @Parameter(property = "outputFile", defaultValue = "target/vespa.compile.version") private String outputFile; @Override /** Returns the major version declared in given deploymentXml, if any */ static OptionalInt majorVersion(Path deploymentXml) { try { String xml = Files.readString(deploymentXml); Element deploymentTag = XML.getDocument(xml).getDocumentElement(); if (deploymentTag == null) return OptionalInt.empty(); String allowMajor = deploymentTag.getAttribute("major-version"); if (allowMajor.isEmpty()) return OptionalInt.empty(); return OptionalInt.of(parseMajor(allowMajor)); } catch (NoSuchFileException ignored) { return OptionalInt.empty(); } catch (IOException e) { throw new UncheckedIOException(e); } } private static int parseMajor(String s) { try { int major = Integer.parseInt(s); if (major < 1) throw new IllegalArgumentException("Major version must be positive, got " + major); return major; } catch (NumberFormatException e) { throw new IllegalArgumentException("Invalid major version '" + s + "'", e); } } }
class CompileVersionMojo extends AbstractVespaMojo { @Parameter(property = "outputFile", defaultValue = "target/vespa.compile.version") private String outputFile; @Override /** Returns the major version declared in given deploymentXml, if any */ static OptionalInt majorVersion(Path deploymentXml) { try { String xml = Files.readString(deploymentXml); Element deploymentTag = XML.getDocument(xml).getDocumentElement(); if (deploymentTag == null) return OptionalInt.empty(); String allowMajor = deploymentTag.getAttribute("major-version"); if (allowMajor.isEmpty()) return OptionalInt.empty(); return OptionalInt.of(parseMajor(allowMajor)); } catch (NoSuchFileException ignored) { return OptionalInt.empty(); } catch (IOException e) { throw new UncheckedIOException(e); } } private static int parseMajor(String s) { try { int major = Integer.parseInt(s); if (major < 1) throw new IllegalArgumentException("Major version must be positive, got " + major); return major; } catch (NumberFormatException e) { throw new IllegalArgumentException("Invalid major version '" + s + "'", e); } } }
That's what I wonder, yes ... Currently, we fail later, when trying to compile against a version which is newer than the parent. This happens in the package step. We want to keep that check. We could, however, fail earlier (here) with a more specialised error message, pointing out that the parent version isn't the newest one available—the controller only considers compile versions it can find in the designated maven repository for each system.
protected void doExecute() throws IOException { Path output = Paths.get(outputFile).toAbsolutePath(); OptionalInt allowMajor = majorVersion(new File(project.getBasedir(), "src/main/application/deployment.xml").toPath()); allowMajor.ifPresent(major -> getLog().info("Allowing only major version " + major + ".")); Version compileVersion = Version.fromString(controller.compileVersion(id, allowMajor)); if (compileVersion.isAfter(Vtag.currentVersion)) compileVersion = Vtag.currentVersion; getLog().info("Vespa version to compile against is '" + compileVersion.toFullString() + "'."); getLog().info("Writing compile version to '" + output + "'."); Files.createDirectories(output.getParent()); Files.writeString(output, compileVersion.toFullString()); }
compileVersion = Vtag.currentVersion;
protected void doExecute() throws IOException { Path output = Paths.get(outputFile).toAbsolutePath(); OptionalInt allowMajor = majorVersion(new File(project.getBasedir(), "src/main/application/deployment.xml").toPath()); allowMajor.ifPresent(major -> getLog().info("Allowing only major version " + major + ".")); Version compileVersion = Version.fromString(controller.compileVersion(id, allowMajor)); if (compileVersion.isAfter(Vtag.currentVersion)) throw new IllegalStateException("parent version (" + Vtag.currentVersion.toFullString() + ") should be at least as " + "high as the Vespa version to compile against (" + compileVersion.toFullString() + ")"); getLog().info("Vespa version to compile against is '" + compileVersion.toFullString() + "'."); getLog().info("Writing compile version to '" + output + "'."); Files.createDirectories(output.getParent()); Files.writeString(output, compileVersion.toFullString()); }
class CompileVersionMojo extends AbstractVespaMojo { @Parameter(property = "outputFile", defaultValue = "target/vespa.compile.version") private String outputFile; @Override /** Returns the major version declared in given deploymentXml, if any */ static OptionalInt majorVersion(Path deploymentXml) { try { String xml = Files.readString(deploymentXml); Element deploymentTag = XML.getDocument(xml).getDocumentElement(); if (deploymentTag == null) return OptionalInt.empty(); String allowMajor = deploymentTag.getAttribute("major-version"); if (allowMajor.isEmpty()) return OptionalInt.empty(); return OptionalInt.of(parseMajor(allowMajor)); } catch (NoSuchFileException ignored) { return OptionalInt.empty(); } catch (IOException e) { throw new UncheckedIOException(e); } } private static int parseMajor(String s) { try { int major = Integer.parseInt(s); if (major < 1) throw new IllegalArgumentException("Major version must be positive, got " + major); return major; } catch (NumberFormatException e) { throw new IllegalArgumentException("Invalid major version '" + s + "'", e); } } }
class CompileVersionMojo extends AbstractVespaMojo { @Parameter(property = "outputFile", defaultValue = "target/vespa.compile.version") private String outputFile; @Override /** Returns the major version declared in given deploymentXml, if any */ static OptionalInt majorVersion(Path deploymentXml) { try { String xml = Files.readString(deploymentXml); Element deploymentTag = XML.getDocument(xml).getDocumentElement(); if (deploymentTag == null) return OptionalInt.empty(); String allowMajor = deploymentTag.getAttribute("major-version"); if (allowMajor.isEmpty()) return OptionalInt.empty(); return OptionalInt.of(parseMajor(allowMajor)); } catch (NoSuchFileException ignored) { return OptionalInt.empty(); } catch (IOException e) { throw new UncheckedIOException(e); } } private static int parseMajor(String s) { try { int major = Integer.parseInt(s); if (major < 1) throw new IllegalArgumentException("Major version must be positive, got " + major); return major; } catch (NumberFormatException e) { throw new IllegalArgumentException("Invalid major version '" + s + "'", e); } } }
> We could, however, fail earlier (here) with a more specialised error message I vote for this. If it's going to fail anyway, earlier is better.
protected void doExecute() throws IOException { Path output = Paths.get(outputFile).toAbsolutePath(); OptionalInt allowMajor = majorVersion(new File(project.getBasedir(), "src/main/application/deployment.xml").toPath()); allowMajor.ifPresent(major -> getLog().info("Allowing only major version " + major + ".")); Version compileVersion = Version.fromString(controller.compileVersion(id, allowMajor)); if (compileVersion.isAfter(Vtag.currentVersion)) compileVersion = Vtag.currentVersion; getLog().info("Vespa version to compile against is '" + compileVersion.toFullString() + "'."); getLog().info("Writing compile version to '" + output + "'."); Files.createDirectories(output.getParent()); Files.writeString(output, compileVersion.toFullString()); }
compileVersion = Vtag.currentVersion;
protected void doExecute() throws IOException { Path output = Paths.get(outputFile).toAbsolutePath(); OptionalInt allowMajor = majorVersion(new File(project.getBasedir(), "src/main/application/deployment.xml").toPath()); allowMajor.ifPresent(major -> getLog().info("Allowing only major version " + major + ".")); Version compileVersion = Version.fromString(controller.compileVersion(id, allowMajor)); if (compileVersion.isAfter(Vtag.currentVersion)) throw new IllegalStateException("parent version (" + Vtag.currentVersion.toFullString() + ") should be at least as " + "high as the Vespa version to compile against (" + compileVersion.toFullString() + ")"); getLog().info("Vespa version to compile against is '" + compileVersion.toFullString() + "'."); getLog().info("Writing compile version to '" + output + "'."); Files.createDirectories(output.getParent()); Files.writeString(output, compileVersion.toFullString()); }
class CompileVersionMojo extends AbstractVespaMojo { @Parameter(property = "outputFile", defaultValue = "target/vespa.compile.version") private String outputFile; @Override /** Returns the major version declared in given deploymentXml, if any */ static OptionalInt majorVersion(Path deploymentXml) { try { String xml = Files.readString(deploymentXml); Element deploymentTag = XML.getDocument(xml).getDocumentElement(); if (deploymentTag == null) return OptionalInt.empty(); String allowMajor = deploymentTag.getAttribute("major-version"); if (allowMajor.isEmpty()) return OptionalInt.empty(); return OptionalInt.of(parseMajor(allowMajor)); } catch (NoSuchFileException ignored) { return OptionalInt.empty(); } catch (IOException e) { throw new UncheckedIOException(e); } } private static int parseMajor(String s) { try { int major = Integer.parseInt(s); if (major < 1) throw new IllegalArgumentException("Major version must be positive, got " + major); return major; } catch (NumberFormatException e) { throw new IllegalArgumentException("Invalid major version '" + s + "'", e); } } }
class CompileVersionMojo extends AbstractVespaMojo { @Parameter(property = "outputFile", defaultValue = "target/vespa.compile.version") private String outputFile; @Override /** Returns the major version declared in given deploymentXml, if any */ static OptionalInt majorVersion(Path deploymentXml) { try { String xml = Files.readString(deploymentXml); Element deploymentTag = XML.getDocument(xml).getDocumentElement(); if (deploymentTag == null) return OptionalInt.empty(); String allowMajor = deploymentTag.getAttribute("major-version"); if (allowMajor.isEmpty()) return OptionalInt.empty(); return OptionalInt.of(parseMajor(allowMajor)); } catch (NoSuchFileException ignored) { return OptionalInt.empty(); } catch (IOException e) { throw new UncheckedIOException(e); } } private static int parseMajor(String s) { try { int major = Integer.parseInt(s); if (major < 1) throw new IllegalArgumentException("Major version must be positive, got " + major); return major; } catch (NumberFormatException e) { throw new IllegalArgumentException("Invalid major version '" + s + "'", e); } } }
Agreed.
protected void doExecute() throws IOException { Path output = Paths.get(outputFile).toAbsolutePath(); OptionalInt allowMajor = majorVersion(new File(project.getBasedir(), "src/main/application/deployment.xml").toPath()); allowMajor.ifPresent(major -> getLog().info("Allowing only major version " + major + ".")); Version compileVersion = Version.fromString(controller.compileVersion(id, allowMajor)); if (compileVersion.isAfter(Vtag.currentVersion)) compileVersion = Vtag.currentVersion; getLog().info("Vespa version to compile against is '" + compileVersion.toFullString() + "'."); getLog().info("Writing compile version to '" + output + "'."); Files.createDirectories(output.getParent()); Files.writeString(output, compileVersion.toFullString()); }
compileVersion = Vtag.currentVersion;
protected void doExecute() throws IOException { Path output = Paths.get(outputFile).toAbsolutePath(); OptionalInt allowMajor = majorVersion(new File(project.getBasedir(), "src/main/application/deployment.xml").toPath()); allowMajor.ifPresent(major -> getLog().info("Allowing only major version " + major + ".")); Version compileVersion = Version.fromString(controller.compileVersion(id, allowMajor)); if (compileVersion.isAfter(Vtag.currentVersion)) throw new IllegalStateException("parent version (" + Vtag.currentVersion.toFullString() + ") should be at least as " + "high as the Vespa version to compile against (" + compileVersion.toFullString() + ")"); getLog().info("Vespa version to compile against is '" + compileVersion.toFullString() + "'."); getLog().info("Writing compile version to '" + output + "'."); Files.createDirectories(output.getParent()); Files.writeString(output, compileVersion.toFullString()); }
class CompileVersionMojo extends AbstractVespaMojo { @Parameter(property = "outputFile", defaultValue = "target/vespa.compile.version") private String outputFile; @Override /** Returns the major version declared in given deploymentXml, if any */ static OptionalInt majorVersion(Path deploymentXml) { try { String xml = Files.readString(deploymentXml); Element deploymentTag = XML.getDocument(xml).getDocumentElement(); if (deploymentTag == null) return OptionalInt.empty(); String allowMajor = deploymentTag.getAttribute("major-version"); if (allowMajor.isEmpty()) return OptionalInt.empty(); return OptionalInt.of(parseMajor(allowMajor)); } catch (NoSuchFileException ignored) { return OptionalInt.empty(); } catch (IOException e) { throw new UncheckedIOException(e); } } private static int parseMajor(String s) { try { int major = Integer.parseInt(s); if (major < 1) throw new IllegalArgumentException("Major version must be positive, got " + major); return major; } catch (NumberFormatException e) { throw new IllegalArgumentException("Invalid major version '" + s + "'", e); } } }
class CompileVersionMojo extends AbstractVespaMojo { @Parameter(property = "outputFile", defaultValue = "target/vespa.compile.version") private String outputFile; @Override /** Returns the major version declared in given deploymentXml, if any */ static OptionalInt majorVersion(Path deploymentXml) { try { String xml = Files.readString(deploymentXml); Element deploymentTag = XML.getDocument(xml).getDocumentElement(); if (deploymentTag == null) return OptionalInt.empty(); String allowMajor = deploymentTag.getAttribute("major-version"); if (allowMajor.isEmpty()) return OptionalInt.empty(); return OptionalInt.of(parseMajor(allowMajor)); } catch (NoSuchFileException ignored) { return OptionalInt.empty(); } catch (IOException e) { throw new UncheckedIOException(e); } } private static int parseMajor(String s) { try { int major = Integer.parseInt(s); if (major < 1) throw new IllegalArgumentException("Major version must be positive, got " + major); return major; } catch (NumberFormatException e) { throw new IllegalArgumentException("Invalid major version '" + s + "'", e); } } }
```suggestion public Network { Objects.requireNonNull(name); Objects.requireNonNull(ipv4Address); } ```
public record Network(String name, String ipv4Address) { public Network(String name, String ipv4Address) { this.name = Objects.requireNonNull(name); this.ipv4Address = Objects.requireNonNull(ipv4Address); } }
}
public record Network(String name, String ipv4Address) { public Network { Objects.requireNonNull(name); Objects.requireNonNull(ipv4Address); } }
class Container extends PartialContainer { private final String hostname; private final ContainerResources resources; private final int conmonPid; private final List<Network> networks; public Container(ContainerId id, ContainerName name, Instant createdAt, State state, String imageId, DockerImage image, Map<String, String> labels, int pid, int conmonPid, String hostname, ContainerResources resources, List<Network> networks, boolean managed) { super(id, name, createdAt, state, imageId, image, labels, pid, managed); this.hostname = Objects.requireNonNull(hostname); this.resources = Objects.requireNonNull(resources); this.conmonPid = conmonPid; this.networks = List.copyOf(Objects.requireNonNull(networks)); } /** The hostname of this, if any */ public String hostname() { return hostname; } /** Resource limits for this*/ public ContainerResources resources() { return resources; } /** Pid of the conmon process for this container */ public int conmonPid() { return conmonPid; } /** The networks used by this */ public List<Network> networks() { return networks; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; if (!super.equals(o)) return false; Container that = (Container) o; return conmonPid == that.conmonPid && hostname.equals(that.hostname) && resources.equals(that.resources) && networks.equals(that.networks); } @Override public int hashCode() { return Objects.hash(super.hashCode(), hostname, resources, conmonPid, networks); } /** The network of a container */ }
class Container extends PartialContainer { private final String hostname; private final ContainerResources resources; private final int conmonPid; private final List<Network> networks; public Container(ContainerId id, ContainerName name, Instant createdAt, State state, String imageId, DockerImage image, Map<String, String> labels, int pid, int conmonPid, String hostname, ContainerResources resources, List<Network> networks, boolean managed) { super(id, name, createdAt, state, imageId, image, labels, pid, managed); this.hostname = Objects.requireNonNull(hostname); this.resources = Objects.requireNonNull(resources); this.conmonPid = conmonPid; this.networks = List.copyOf(Objects.requireNonNull(networks)); } /** The hostname of this, if any */ public String hostname() { return hostname; } /** Resource limits for this*/ public ContainerResources resources() { return resources; } /** Pid of the conmon process for this container */ public int conmonPid() { return conmonPid; } /** The networks used by this */ public List<Network> networks() { return networks; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; if (!super.equals(o)) return false; Container that = (Container) o; return conmonPid == that.conmonPid && hostname.equals(that.hostname) && resources.equals(that.resources) && networks.equals(that.networks); } @Override public int hashCode() { return Objects.hash(super.hashCode(), hostname, resources, conmonPid, networks); } /** The network of a container */ }
great
String readBinPathFallback(NodeAgentContext context, ContainerPath coredumpPath) { String[] command = {GDB_PATH_RHEL8, "-n", "-batch", "-core", coredumpPath.pathInContainer()}; CommandResult result = container.executeCommandInContainer(context, context.users().root(), command); Matcher matcher = CORE_GENERATOR_PATH_PATTERN.matcher(result.getOutput()); if (! matcher.find()) { throw ConvergenceException.ofError(String.format("Failed to extract binary path from GDB, result: %s, command: %s", asString(result), Arrays.toString(command))); } return matcher.group("path").split(" ")[0]; }
String[] command = {GDB_PATH_RHEL8, "-n", "-batch", "-core", coredumpPath.pathInContainer()};
String readBinPathFallback(NodeAgentContext context, ContainerPath coredumpPath) { String[] command = {GDB_PATH_RHEL8, "-n", "-batch", "-core", coredumpPath.pathInContainer()}; CommandResult result = container.executeCommandInContainer(context, context.users().root(), command); Matcher matcher = CORE_GENERATOR_PATH_PATTERN.matcher(result.getOutput()); if (! matcher.find()) { throw ConvergenceException.ofError(String.format("Failed to extract binary path from GDB, result: %s, command: %s", asString(result), Arrays.toString(command))); } return matcher.group("path").split(" ")[0]; }
class CoreCollector { private static final Logger logger = Logger.getLogger(CoreCollector.class.getName()); private static final Pattern JAVA_HEAP_DUMP_PATTERN = Pattern.compile("java_pid.*\\.hprof$"); private static final Pattern CORE_GENERATOR_PATH_PATTERN = Pattern.compile("(?m)^Core was generated by `(?<path>.*?)'\\."); private static final Pattern EXECFN_PATH_PATTERN = Pattern.compile("^.* execfn: '(?<path>.*?)'"); private static final Pattern FROM_PATH_PATTERN = Pattern.compile("^.* from '(?<path>.*?)'"); static final String GDB_PATH_RHEL8 = "/opt/rh/gcc-toolset-11/root/bin/gdb"; static final Map<String, Object> JAVA_HEAP_DUMP_METADATA = Map.of("bin_path", "java", "backtrace", List.of("Heap dump, no backtrace available")); private final ContainerOperations container; public CoreCollector(ContainerOperations container) { this.container = container; } String readBinPath(NodeAgentContext context, ContainerPath coredumpPath) { String[] command = {"file", coredumpPath.pathInContainer()}; try { CommandResult result = container.executeCommandInContainer(context, context.users().root(), command); if (result.getExitCode() != 0) { throw ConvergenceException.ofError("file command failed with " + asString(result)); } Matcher execfnMatcher = EXECFN_PATH_PATTERN.matcher(result.getOutput()); if (execfnMatcher.find()) { return execfnMatcher.group("path").split(" ")[0]; } Matcher fromMatcher = FROM_PATH_PATTERN.matcher(result.getOutput()); if (fromMatcher.find()) { return fromMatcher.group("path").split(" ")[0]; } } catch (RuntimeException e) { context.log(logger, Level.WARNING, String.format("Failed getting bin path, command: %s. " + "Trying fallback instead", Arrays.toString(command)), e); } return readBinPathFallback(context, coredumpPath); } List<String> readBacktrace(NodeAgentContext context, ContainerPath coredumpPath, String binPath, boolean allThreads) { String threads = allThreads ? "thread apply all bt" : "bt"; String[] command = {GDB_PATH_RHEL8, "-n", "-ex", "set print frame-arguments none", "-ex", threads, "-batch", binPath, coredumpPath.pathInContainer()}; CommandResult result = container.executeCommandInContainer(context, context.users().root(), command); if (result.getExitCode() != 0) throw ConvergenceException.ofError("Failed to read backtrace " + asString(result) + ", Command: " + Arrays.toString(command)); return List.of(result.getOutput().split("\n")); } List<String> readJstack(NodeAgentContext context, ContainerPath coredumpPath, String binPath) { String[] command = {"jhsdb", "jstack", "--exe", binPath, "--core", coredumpPath.pathInContainer()}; CommandResult result = container.executeCommandInContainer(context, context.users().root(), command); if (result.getExitCode() != 0) throw ConvergenceException.ofError("Failed to read jstack " + asString(result) + ", Command: " + Arrays.toString(command)); return List.of(result.getOutput().split("\n")); } /** * Collects metadata about a given core dump * @param context context of the NodeAgent that owns the core dump * @param coredumpPath path to core dump file inside the container * @return map of relevant metadata about the core dump */ Map<String, Object> collect(NodeAgentContext context, ContainerPath coredumpPath) { if (JAVA_HEAP_DUMP_PATTERN.matcher(coredumpPath.getFileName().toString()).find()) return JAVA_HEAP_DUMP_METADATA; Map<String, Object> data = new HashMap<>(); try { String binPath = readBinPath(context, coredumpPath); data.put("bin_path", binPath); if (Path.of(binPath).getFileName().toString().equals("java")) { data.put("backtrace_all_threads", readJstack(context, coredumpPath, binPath)); } else { data.put("backtrace", readBacktrace(context, coredumpPath, binPath, false)); data.put("backtrace_all_threads", readBacktrace(context, coredumpPath, binPath, true)); } } catch (ConvergenceException e) { context.log(logger, Level.WARNING, "Failed to extract backtrace: " + e.getMessage()); } catch (RuntimeException e) { context.log(logger, Level.WARNING, "Failed to extract backtrace", e); } return data; } private String asString(CommandResult result) { return "exit status " + result.getExitCode() + ", output '" + result.getOutput() + "'"; } }
class CoreCollector { private static final Logger logger = Logger.getLogger(CoreCollector.class.getName()); private static final Pattern JAVA_HEAP_DUMP_PATTERN = Pattern.compile("java_pid.*\\.hprof$"); private static final Pattern CORE_GENERATOR_PATH_PATTERN = Pattern.compile("(?m)^Core was generated by `(?<path>.*?)'\\."); private static final Pattern EXECFN_PATH_PATTERN = Pattern.compile("^.* execfn: '(?<path>.*?)'"); private static final Pattern FROM_PATH_PATTERN = Pattern.compile("^.* from '(?<path>.*?)'"); static final String GDB_PATH_RHEL8 = "/opt/rh/gcc-toolset-11/root/bin/gdb"; static final Map<String, Object> JAVA_HEAP_DUMP_METADATA = Map.of("bin_path", "java", "backtrace", List.of("Heap dump, no backtrace available")); private final ContainerOperations container; public CoreCollector(ContainerOperations container) { this.container = container; } String readBinPath(NodeAgentContext context, ContainerPath coredumpPath) { String[] command = {"file", coredumpPath.pathInContainer()}; try { CommandResult result = container.executeCommandInContainer(context, context.users().root(), command); if (result.getExitCode() != 0) { throw ConvergenceException.ofError("file command failed with " + asString(result)); } Matcher execfnMatcher = EXECFN_PATH_PATTERN.matcher(result.getOutput()); if (execfnMatcher.find()) { return execfnMatcher.group("path").split(" ")[0]; } Matcher fromMatcher = FROM_PATH_PATTERN.matcher(result.getOutput()); if (fromMatcher.find()) { return fromMatcher.group("path").split(" ")[0]; } } catch (RuntimeException e) { context.log(logger, Level.WARNING, String.format("Failed getting bin path, command: %s. " + "Trying fallback instead", Arrays.toString(command)), e); } return readBinPathFallback(context, coredumpPath); } List<String> readBacktrace(NodeAgentContext context, ContainerPath coredumpPath, String binPath, boolean allThreads) { String threads = allThreads ? "thread apply all bt" : "bt"; String[] command = {GDB_PATH_RHEL8, "-n", "-ex", "set print frame-arguments none", "-ex", threads, "-batch", binPath, coredumpPath.pathInContainer()}; CommandResult result = container.executeCommandInContainer(context, context.users().root(), command); if (result.getExitCode() != 0) throw ConvergenceException.ofError("Failed to read backtrace " + asString(result) + ", Command: " + Arrays.toString(command)); return List.of(result.getOutput().split("\n")); } List<String> readJstack(NodeAgentContext context, ContainerPath coredumpPath, String binPath) { String[] command = {"jhsdb", "jstack", "--exe", binPath, "--core", coredumpPath.pathInContainer()}; CommandResult result = container.executeCommandInContainer(context, context.users().root(), command); if (result.getExitCode() != 0) throw ConvergenceException.ofError("Failed to read jstack " + asString(result) + ", Command: " + Arrays.toString(command)); return List.of(result.getOutput().split("\n")); } /** * Collects metadata about a given core dump * @param context context of the NodeAgent that owns the core dump * @param coredumpPath path to core dump file inside the container * @return map of relevant metadata about the core dump */ Map<String, Object> collect(NodeAgentContext context, ContainerPath coredumpPath) { if (JAVA_HEAP_DUMP_PATTERN.matcher(coredumpPath.getFileName().toString()).find()) return JAVA_HEAP_DUMP_METADATA; Map<String, Object> data = new HashMap<>(); try { String binPath = readBinPath(context, coredumpPath); data.put("bin_path", binPath); if (Path.of(binPath).getFileName().toString().equals("java")) { data.put("backtrace_all_threads", readJstack(context, coredumpPath, binPath)); } else { data.put("backtrace", readBacktrace(context, coredumpPath, binPath, false)); data.put("backtrace_all_threads", readBacktrace(context, coredumpPath, binPath, true)); } } catch (ConvergenceException e) { context.log(logger, Level.WARNING, "Failed to extract backtrace: " + e.getMessage()); } catch (RuntimeException e) { context.log(logger, Level.WARNING, "Failed to extract backtrace", e); } return data; } private String asString(CommandResult result) { return "exit status " + result.getExitCode() + ", output '" + result.getOutput() + "'"; } }
Add a TODO for removing this path.
public void writeVersionStatus(VersionStatus status) { curator.set(versionStatusPath(), asJson(versionStatusSerializer.toSlime(status))); }
curator.set(versionStatusPath(), asJson(versionStatusSerializer.toSlime(status)));
public void writeVersionStatus(VersionStatus status) { curator.set(versionStatusPath(), asJson(versionStatusSerializer.toSlime(status))); }
class CuratorDb { private static final Logger log = Logger.getLogger(CuratorDb.class.getName()); private static final Duration deployLockTimeout = Duration.ofMinutes(30); private static final Duration defaultLockTimeout = Duration.ofMinutes(5); private static final Duration defaultTryLockTimeout = Duration.ofSeconds(1); private static final Path root = Path.fromString("/controller/v1"); private static final Path lockRoot = root.append("locks"); private static final Path tenantRoot = root.append("tenants"); private static final Path applicationRoot = root.append("applications"); private static final Path jobRoot = root.append("jobs"); private static final Path controllerRoot = root.append("controllers"); private static final Path routingPoliciesRoot = root.append("routingPolicies"); private static final Path zoneRoutingPoliciesRoot = root.append("zoneRoutingPolicies"); private static final Path endpointCertificateRoot = root.append("applicationCertificates"); private static final Path archiveBucketsRoot = root.append("archiveBuckets"); private static final Path changeRequestsRoot = root.append("changeRequests"); private static final Path notificationsRoot = root.append("notifications"); private static final Path supportAccessRoot = root.append("supportAccess"); private final NodeVersionSerializer nodeVersionSerializer = new NodeVersionSerializer(); private final VersionStatusSerializer versionStatusSerializer = new VersionStatusSerializer(nodeVersionSerializer); private final ControllerVersionSerializer controllerVersionSerializer = new ControllerVersionSerializer(); private final ConfidenceOverrideSerializer confidenceOverrideSerializer = new ConfidenceOverrideSerializer(); private final TenantSerializer tenantSerializer = new TenantSerializer(); private final OsVersionSerializer osVersionSerializer = new OsVersionSerializer(); private final OsVersionTargetSerializer osVersionTargetSerializer = new OsVersionTargetSerializer(osVersionSerializer); private final OsVersionStatusSerializer osVersionStatusSerializer = new OsVersionStatusSerializer(osVersionSerializer, nodeVersionSerializer); private final RoutingPolicySerializer routingPolicySerializer = new RoutingPolicySerializer(); private final ZoneRoutingPolicySerializer zoneRoutingPolicySerializer = new ZoneRoutingPolicySerializer(routingPolicySerializer); private final AuditLogSerializer auditLogSerializer = new AuditLogSerializer(); private final NameServiceQueueSerializer nameServiceQueueSerializer = new NameServiceQueueSerializer(); private final ApplicationSerializer applicationSerializer = new ApplicationSerializer(); private final RunSerializer runSerializer = new RunSerializer(); private final RetriggerEntrySerializer retriggerEntrySerializer = new RetriggerEntrySerializer(); private final NotificationsSerializer notificationsSerializer = new NotificationsSerializer(); private final Curator curator; private final Duration tryLockTimeout; private final Map<Path, Pair<Integer, Application>> cachedApplications = new ConcurrentHashMap<>(); private final Map<Path, Pair<Integer, NavigableMap<RunId, Run>>> cachedHistoricRuns = new ConcurrentHashMap<>(); @Inject public CuratorDb(Curator curator, ServiceRegistry services) { this(curator, defaultTryLockTimeout, services.zoneRegistry().system()); } CuratorDb(Curator curator, Duration tryLockTimeout, SystemName system) { this.curator = curator; this.tryLockTimeout = tryLockTimeout; } /** Returns all hostnames configured to be part of this ZooKeeper cluster */ public List<String> cluster() { return Arrays.stream(curator.zooKeeperEnsembleConnectionSpec().split(",")) .filter(hostAndPort -> !hostAndPort.isEmpty()) .map(hostAndPort -> hostAndPort.split(":")[0]) .collect(Collectors.toUnmodifiableList()); } public Mutex lock(TenantName name) { return curator.lock(lockPath(name), defaultLockTimeout.multipliedBy(2)); } public Mutex lock(TenantAndApplicationId id) { return curator.lock(lockPath(id), defaultLockTimeout.multipliedBy(2)); } public Mutex lockForDeployment(ApplicationId id, ZoneId zone) { return curator.lock(lockPath(id, zone), deployLockTimeout); } public Mutex lock(ApplicationId id, JobType type) { return curator.lock(lockPath(id, type), defaultLockTimeout); } public Mutex lock(ApplicationId id, JobType type, Step step) throws TimeoutException { return tryLock(lockPath(id, type, step)); } public Mutex lockRotations() { return curator.lock(lockRoot.append("rotations"), defaultLockTimeout); } public Mutex lockConfidenceOverrides() { return curator.lock(lockRoot.append("confidenceOverrides"), defaultLockTimeout); } public Mutex lockMaintenanceJob(String jobName) { try { return tryLock(lockRoot.append("maintenanceJobLocks").append(jobName)); } catch (TimeoutException e) { throw new UncheckedTimeoutException(e); } } public Mutex lockProvisionState(String provisionStateId) { return curator.lock(lockPath(provisionStateId), Duration.ofSeconds(1)); } public Mutex lockOsVersions() { return curator.lock(lockRoot.append("osTargetVersion"), defaultLockTimeout); } public Mutex lockOsVersionStatus() { return curator.lock(lockRoot.append("osVersionStatus"), defaultLockTimeout); } public Mutex lockRoutingPolicies() { return curator.lock(lockRoot.append("routingPolicies"), defaultLockTimeout); } public Mutex lockAuditLog() { return curator.lock(lockRoot.append("auditLog"), defaultLockTimeout); } public Mutex lockNameServiceQueue() { return curator.lock(lockRoot.append("nameServiceQueue"), defaultLockTimeout); } public Mutex lockMeteringRefreshTime() throws TimeoutException { return tryLock(lockRoot.append("meteringRefreshTime")); } public Mutex lockArchiveBuckets(ZoneId zoneId) { return curator.lock(lockRoot.append("archiveBuckets").append(zoneId.value()), defaultLockTimeout); } public Mutex lockChangeRequests() { return curator.lock(lockRoot.append("changeRequests"), defaultLockTimeout); } public Mutex lockNotifications(TenantName tenantName) { return curator.lock(lockRoot.append("notifications").append(tenantName.value()), defaultLockTimeout); } public Mutex lockSupportAccess(DeploymentId deploymentId) { return curator.lock(lockRoot.append("supportAccess").append(deploymentId.dottedString()), defaultLockTimeout); } public Mutex lockDeploymentRetriggerQueue() { return curator.lock(lockRoot.append("deploymentRetriggerQueue"), defaultLockTimeout); } /** Try locking with a low timeout, meaning it is OK to fail lock acquisition. * * Useful for maintenance jobs, where there is no point in running the jobs back to back. */ private Mutex tryLock(Path path) throws TimeoutException { try { return curator.lock(path, tryLockTimeout); } catch (UncheckedTimeoutException e) { throw new TimeoutException(e.getMessage()); } } private <T> Optional<T> read(Path path, Function<byte[], T> mapper) { return curator.getData(path).filter(data -> data.length > 0).map(mapper); } private Optional<Slime> readSlime(Path path) { return read(path, SlimeUtils::jsonToSlime); } private static byte[] asJson(Slime slime) { try { return SlimeUtils.toJsonBytes(slime); } catch (IOException e) { throw new UncheckedIOException(e); } } public double readUpgradesPerMinute() { return read(upgradesPerMinutePath(), ByteBuffer::wrap).map(ByteBuffer::getDouble).orElse(0.125); } public void writeUpgradesPerMinute(double n) { curator.set(upgradesPerMinutePath(), ByteBuffer.allocate(Double.BYTES).putDouble(n).array()); } public VersionStatus readVersionStatus() { return readSlime(versionStatusPath()).map(versionStatusSerializer::fromSlime).orElseGet(VersionStatus::empty); } public void writeConfidenceOverrides(Map<Version, VespaVersion.Confidence> overrides) { curator.set(confidenceOverridesPath(), asJson(confidenceOverrideSerializer.toSlime(overrides))); } public Map<Version, VespaVersion.Confidence> readConfidenceOverrides() { return readSlime(confidenceOverridesPath()).map(confidenceOverrideSerializer::fromSlime) .orElseGet(Collections::emptyMap); } public void writeControllerVersion(HostName hostname, ControllerVersion version) { curator.set(controllerPath(hostname.value()), asJson(controllerVersionSerializer.toSlime(version))); } public ControllerVersion readControllerVersion(HostName hostname) { return readSlime(controllerPath(hostname.value())) .map(controllerVersionSerializer::fromSlime) .orElse(ControllerVersion.CURRENT); } public void writeOsVersionTargets(Set<OsVersionTarget> versions) { curator.set(osVersionTargetsPath(), asJson(osVersionTargetSerializer.toSlime(versions))); } public Set<OsVersionTarget> readOsVersionTargets() { return readSlime(osVersionTargetsPath()).map(osVersionTargetSerializer::fromSlime).orElseGet(Collections::emptySet); } public void writeOsVersionStatus(OsVersionStatus status) { curator.set(osVersionStatusPath(), asJson(osVersionStatusSerializer.toSlime(status))); } public OsVersionStatus readOsVersionStatus() { return readSlime(osVersionStatusPath()).map(osVersionStatusSerializer::fromSlime).orElse(OsVersionStatus.empty); } public void writeTenant(Tenant tenant) { curator.set(tenantPath(tenant.name()), asJson(tenantSerializer.toSlime(tenant))); } public Optional<Tenant> readTenant(TenantName name) { return readSlime(tenantPath(name)).map(tenantSerializer::tenantFrom); } public List<Tenant> readTenants() { return readTenantNames().stream() .map(this::readTenant) .flatMap(Optional::stream) .collect(collectingAndThen(Collectors.toList(), Collections::unmodifiableList)); } public List<TenantName> readTenantNames() { return curator.getChildren(tenantRoot).stream() .map(TenantName::from) .collect(Collectors.toList()); } public void removeTenant(TenantName name) { curator.delete(tenantPath(name)); } public void writeApplication(Application application) { curator.set(applicationPath(application.id()), asJson(applicationSerializer.toSlime(application))); } public Optional<Application> readApplication(TenantAndApplicationId application) { Path path = applicationPath(application); return curator.getStat(path) .map(stat -> cachedApplications.compute(path, (__, old) -> old != null && old.getFirst() == stat.getVersion() ? old : new Pair<>(stat.getVersion(), read(path, bytes -> applicationSerializer.fromSlime(bytes)).get())).getSecond()); } public List<Application> readApplications(boolean canFail) { return readApplications(ignored -> true, canFail); } public List<Application> readApplications(TenantName name) { return readApplications(application -> application.tenant().equals(name), false); } private List<Application> readApplications(Predicate<TenantAndApplicationId> applicationFilter, boolean canFail) { var applicationIds = readApplicationIds(); var applications = new ArrayList<Application>(applicationIds.size()); for (var id : applicationIds) { if (!applicationFilter.test(id)) continue; try { readApplication(id).ifPresent(applications::add); } catch (Exception e) { if (canFail) { log.log(Level.SEVERE, "Failed to read application '" + id + "', this must be fixed through " + "manual intervention", e); } else { throw e; } } } return Collections.unmodifiableList(applications); } public List<TenantAndApplicationId> readApplicationIds() { return curator.getChildren(applicationRoot).stream() .map(TenantAndApplicationId::fromSerialized) .sorted() .collect(toUnmodifiableList()); } public void removeApplication(TenantAndApplicationId id) { curator.delete(applicationPath(id)); } public void writeLastRun(Run run) { curator.set(lastRunPath(run.id().application(), run.id().type()), asJson(runSerializer.toSlime(run))); } public void writeHistoricRuns(ApplicationId id, JobType type, Iterable<Run> runs) { Path path = runsPath(id, type); curator.set(path, asJson(runSerializer.toSlime(runs))); } public Optional<Run> readLastRun(ApplicationId id, JobType type) { return readSlime(lastRunPath(id, type)).map(runSerializer::runFromSlime); } public NavigableMap<RunId, Run> readHistoricRuns(ApplicationId id, JobType type) { Path path = runsPath(id, type); return curator.getStat(path) .map(stat -> cachedHistoricRuns.compute(path, (__, old) -> old != null && old.getFirst() == stat.getVersion() ? old : new Pair<>(stat.getVersion(), runSerializer.runsFromSlime(readSlime(path).get()))).getSecond()) .orElseGet(Collections::emptyNavigableMap); } public void deleteRunData(ApplicationId id, JobType type) { curator.delete(runsPath(id, type)); curator.delete(lastRunPath(id, type)); } public void deleteRunData(ApplicationId id) { curator.delete(jobRoot.append(id.serializedForm())); } public List<ApplicationId> applicationsWithJobs() { return curator.getChildren(jobRoot).stream() .map(ApplicationId::fromSerializedForm) .collect(Collectors.toList()); } public Optional<byte[]> readLog(ApplicationId id, JobType type, long chunkId) { return curator.getData(logPath(id, type, chunkId)); } public void writeLog(ApplicationId id, JobType type, long chunkId, byte[] log) { curator.set(logPath(id, type, chunkId), log); } public void deleteLog(ApplicationId id, JobType type) { curator.delete(runsPath(id, type).append("logs")); } public Optional<Long> readLastLogEntryId(ApplicationId id, JobType type) { return curator.getData(lastLogPath(id, type)) .map(String::new).map(Long::parseLong); } public void writeLastLogEntryId(ApplicationId id, JobType type, long lastId) { curator.set(lastLogPath(id, type), Long.toString(lastId).getBytes()); } public LongStream getLogChunkIds(ApplicationId id, JobType type) { return curator.getChildren(runsPath(id, type).append("logs")).stream() .mapToLong(Long::parseLong) .sorted(); } public AuditLog readAuditLog() { return readSlime(auditLogPath()).map(auditLogSerializer::fromSlime) .orElse(AuditLog.empty); } public void writeAuditLog(AuditLog log) { curator.set(auditLogPath(), asJson(auditLogSerializer.toSlime(log))); } public NameServiceQueue readNameServiceQueue() { return readSlime(nameServiceQueuePath()).map(nameServiceQueueSerializer::fromSlime) .orElse(NameServiceQueue.EMPTY); } public void writeNameServiceQueue(NameServiceQueue queue) { curator.set(nameServiceQueuePath(), asJson(nameServiceQueueSerializer.toSlime(queue))); } @SuppressWarnings("unused") public Optional<byte[]> readProvisionState(String provisionId) { return curator.getData(provisionStatePath(provisionId)); } @SuppressWarnings("unused") public void writeProvisionState(String provisionId, byte[] data) { curator.set(provisionStatePath(provisionId), data); } @SuppressWarnings("unused") public List<String> readProvisionStateIds() { return curator.getChildren(provisionStatePath()); } public void writeRoutingPolicies(ApplicationId application, List<RoutingPolicy> policies) { for (var policy : policies) { if (!policy.id().owner().equals(application)) { throw new IllegalArgumentException(policy.id() + " does not belong to the application being written: " + application.toShortString()); } } curator.set(routingPolicyPath(application), asJson(routingPolicySerializer.toSlime(policies))); } public Map<ApplicationId, List<RoutingPolicy>> readRoutingPolicies() { return readRoutingPolicies((instance) -> true); } public Map<ApplicationId, List<RoutingPolicy>> readRoutingPolicies(Predicate<ApplicationId> filter) { return curator.getChildren(routingPoliciesRoot).stream() .map(ApplicationId::fromSerializedForm) .filter(filter) .collect(Collectors.toUnmodifiableMap(Function.identity(), this::readRoutingPolicies)); } public List<RoutingPolicy> readRoutingPolicies(ApplicationId application) { return readSlime(routingPolicyPath(application)).map(slime -> routingPolicySerializer.fromSlime(application, slime)) .orElseGet(List::of); } public void writeZoneRoutingPolicy(ZoneRoutingPolicy policy) { curator.set(zoneRoutingPolicyPath(policy.zone()), asJson(zoneRoutingPolicySerializer.toSlime(policy))); } public ZoneRoutingPolicy readZoneRoutingPolicy(ZoneId zone) { return readSlime(zoneRoutingPolicyPath(zone)).map(data -> zoneRoutingPolicySerializer.fromSlime(zone, data)) .orElseGet(() -> new ZoneRoutingPolicy(zone, RoutingStatus.DEFAULT)); } public void writeEndpointCertificateMetadata(ApplicationId applicationId, EndpointCertificateMetadata endpointCertificateMetadata) { curator.set(endpointCertificatePath(applicationId), asJson(EndpointCertificateMetadataSerializer.toSlime(endpointCertificateMetadata))); } public void deleteEndpointCertificateMetadata(ApplicationId applicationId) { curator.delete(endpointCertificatePath(applicationId)); } public Optional<EndpointCertificateMetadata> readEndpointCertificateMetadata(ApplicationId applicationId) { return curator.getData(endpointCertificatePath(applicationId)).map(String::new).map(EndpointCertificateMetadataSerializer::fromJsonString); } public Map<ApplicationId, EndpointCertificateMetadata> readAllEndpointCertificateMetadata() { Map<ApplicationId, EndpointCertificateMetadata> allEndpointCertificateMetadata = new HashMap<>(); for (String appIdString : curator.getChildren(endpointCertificateRoot)) { ApplicationId applicationId = ApplicationId.fromSerializedForm(appIdString); Optional<EndpointCertificateMetadata> endpointCertificateMetadata = readEndpointCertificateMetadata(applicationId); allEndpointCertificateMetadata.put(applicationId, endpointCertificateMetadata.orElseThrow()); } return allEndpointCertificateMetadata; } public void writeMeteringRefreshTime(long timestamp) { curator.set(meteringRefreshPath(), Long.toString(timestamp).getBytes()); } public long readMeteringRefreshTime() { return curator.getData(meteringRefreshPath()) .map(String::new).map(Long::parseLong) .orElse(0L); } public Set<ArchiveBucket> readArchiveBuckets(ZoneId zoneId) { return curator.getData(archiveBucketsPath(zoneId)).map(String::new).map(ArchiveBucketsSerializer::fromJsonString) .orElseGet(Set::of); } public void writeArchiveBuckets(ZoneId zoneid, Set<ArchiveBucket> archiveBuckets) { curator.set(archiveBucketsPath(zoneid), asJson(ArchiveBucketsSerializer.toSlime(archiveBuckets))); } public Optional<VespaChangeRequest> readChangeRequest(String changeRequestId) { return readSlime(changeRequestPath(changeRequestId)).map(ChangeRequestSerializer::fromSlime); } public List<VespaChangeRequest> readChangeRequests() { return curator.getChildren(changeRequestsRoot) .stream() .map(this::readChangeRequest) .flatMap(Optional::stream) .collect(Collectors.toList()); } public void writeChangeRequest(VespaChangeRequest changeRequest) { curator.set(changeRequestPath(changeRequest.getId()), asJson(ChangeRequestSerializer.toSlime(changeRequest))); } public void deleteChangeRequest(VespaChangeRequest changeRequest) { curator.delete(changeRequestPath(changeRequest.getId())); } public List<Notification> readNotifications(TenantName tenantName) { return readSlime(notificationsPath(tenantName)) .map(slime -> notificationsSerializer.fromSlime(tenantName, slime)).orElseGet(List::of); } public List<TenantName> listTenantsWithNotifications() { return curator.getChildren(notificationsRoot).stream() .map(TenantName::from) .collect(Collectors.toUnmodifiableList()); } public void writeNotifications(TenantName tenantName, List<Notification> notifications) { curator.set(notificationsPath(tenantName), asJson(notificationsSerializer.toSlime(notifications))); } public void deleteNotifications(TenantName tenantName) { curator.delete(notificationsPath(tenantName)); } public SupportAccess readSupportAccess(DeploymentId deploymentId) { return readSlime(supportAccessPath(deploymentId)).map(SupportAccessSerializer::fromSlime).orElse(SupportAccess.DISALLOWED_NO_HISTORY); } /** Take lock before reading before writing */ public void writeSupportAccess(DeploymentId deploymentId, SupportAccess supportAccess) { curator.set(supportAccessPath(deploymentId), asJson(SupportAccessSerializer.toSlime(supportAccess))); } public List<RetriggerEntry> readRetriggerEntries() { return readSlime(deploymentRetriggerPath()).map(retriggerEntrySerializer::fromSlime).orElseGet(List::of); } public void writeRetriggerEntries(List<RetriggerEntry> retriggerEntries) { curator.set(deploymentRetriggerPath(), asJson(retriggerEntrySerializer.toSlime(retriggerEntries))); } private Path lockPath(TenantName tenant) { return lockRoot .append(tenant.value()); } private Path lockPath(TenantAndApplicationId application) { return lockRoot.append(application.tenant().value() + ":" + application.application().value()); } private Path lockPath(ApplicationId instance, ZoneId zone) { return lockRoot.append(instance.serializedForm() + ":" + zone.environment().value() + ":" + zone.region().value()); } private Path lockPath(ApplicationId instance, JobType type) { return lockRoot.append(instance.serializedForm() + ":" + type.jobName()); } private Path lockPath(ApplicationId instance, JobType type, Step step) { return lockRoot.append(instance.serializedForm() + ":" + type.jobName() + ":" + step.name()); } private Path lockPath(String provisionId) { return lockRoot .append(provisionStatePath()) .append(provisionId); } private static Path upgradesPerMinutePath() { return root.append("upgrader").append("upgradesPerMinute"); } private static Path targetMajorVersionPath() { return root.append("upgrader").append("targetMajorVersion"); } private static Path confidenceOverridesPath() { return root.append("upgrader").append("confidenceOverrides"); } private static Path osVersionTargetsPath() { return root.append("osUpgrader").append("targetVersion"); } private static Path osVersionStatusPath() { return root.append("osVersionStatus"); } private static Path versionStatusPath() { return root.append("versionStatus"); } private static Path routingPolicyPath(ApplicationId application) { return routingPoliciesRoot.append(application.serializedForm()); } private static Path zoneRoutingPolicyPath(ZoneId zone) { return zoneRoutingPoliciesRoot.append(zone.value()); } private static Path nameServiceQueuePath() { return root.append("nameServiceQueue"); } private static Path auditLogPath() { return root.append("auditLog"); } private static Path provisionStatePath() { return root.append("provisioning").append("states"); } private static Path provisionStatePath(String provisionId) { return provisionStatePath().append(provisionId); } private static Path tenantPath(TenantName name) { return tenantRoot.append(name.value()); } private static Path applicationPath(TenantAndApplicationId id) { return applicationRoot.append(id.serialized()); } private static Path runsPath(ApplicationId id, JobType type) { return jobRoot.append(id.serializedForm()).append(type.jobName()); } private static Path lastRunPath(ApplicationId id, JobType type) { return runsPath(id, type).append("last"); } private static Path logPath(ApplicationId id, JobType type, long first) { return runsPath(id, type).append("logs").append(Long.toString(first)); } private static Path lastLogPath(ApplicationId id, JobType type) { return runsPath(id, type).append("logs"); } private static Path controllerPath(String hostname) { return controllerRoot.append(hostname); } private static Path endpointCertificatePath(ApplicationId id) { return endpointCertificateRoot.append(id.serializedForm()); } private static Path meteringRefreshPath() { return root.append("meteringRefreshTime"); } private static Path archiveBucketsPath(ZoneId zoneId) { return archiveBucketsRoot.append(zoneId.value()); } private static Path changeRequestPath(String id) { return changeRequestsRoot.append(id); } private static Path notificationsPath(TenantName tenantName) { return notificationsRoot.append(tenantName.value()); } private static Path supportAccessPath(DeploymentId deploymentId) { return supportAccessRoot.append(deploymentId.dottedString()); } private static Path deploymentRetriggerPath() { return root.append("deploymentRetriggerQueue"); } }
class CuratorDb { private static final Logger log = Logger.getLogger(CuratorDb.class.getName()); private static final Duration deployLockTimeout = Duration.ofMinutes(30); private static final Duration defaultLockTimeout = Duration.ofMinutes(5); private static final Duration defaultTryLockTimeout = Duration.ofSeconds(1); private static final Path root = Path.fromString("/controller/v1"); private static final Path lockRoot = root.append("locks"); private static final Path tenantRoot = root.append("tenants"); private static final Path applicationRoot = root.append("applications"); private static final Path jobRoot = root.append("jobs"); private static final Path controllerRoot = root.append("controllers"); private static final Path routingPoliciesRoot = root.append("routingPolicies"); private static final Path zoneRoutingPoliciesRoot = root.append("zoneRoutingPolicies"); private static final Path endpointCertificateRoot = root.append("applicationCertificates"); private static final Path archiveBucketsRoot = root.append("archiveBuckets"); private static final Path changeRequestsRoot = root.append("changeRequests"); private static final Path notificationsRoot = root.append("notifications"); private static final Path supportAccessRoot = root.append("supportAccess"); private final NodeVersionSerializer nodeVersionSerializer = new NodeVersionSerializer(); private final VersionStatusSerializer versionStatusSerializer = new VersionStatusSerializer(nodeVersionSerializer); private final ControllerVersionSerializer controllerVersionSerializer = new ControllerVersionSerializer(); private final ConfidenceOverrideSerializer confidenceOverrideSerializer = new ConfidenceOverrideSerializer(); private final TenantSerializer tenantSerializer = new TenantSerializer(); private final OsVersionSerializer osVersionSerializer = new OsVersionSerializer(); private final OsVersionTargetSerializer osVersionTargetSerializer = new OsVersionTargetSerializer(osVersionSerializer); private final OsVersionStatusSerializer osVersionStatusSerializer = new OsVersionStatusSerializer(osVersionSerializer, nodeVersionSerializer); private final RoutingPolicySerializer routingPolicySerializer = new RoutingPolicySerializer(); private final ZoneRoutingPolicySerializer zoneRoutingPolicySerializer = new ZoneRoutingPolicySerializer(routingPolicySerializer); private final AuditLogSerializer auditLogSerializer = new AuditLogSerializer(); private final NameServiceQueueSerializer nameServiceQueueSerializer = new NameServiceQueueSerializer(); private final ApplicationSerializer applicationSerializer = new ApplicationSerializer(); private final RunSerializer runSerializer = new RunSerializer(); private final RetriggerEntrySerializer retriggerEntrySerializer = new RetriggerEntrySerializer(); private final NotificationsSerializer notificationsSerializer = new NotificationsSerializer(); private final Curator curator; private final Duration tryLockTimeout; private final Map<Path, Pair<Integer, Application>> cachedApplications = new ConcurrentHashMap<>(); private final Map<Path, Pair<Integer, NavigableMap<RunId, Run>>> cachedHistoricRuns = new ConcurrentHashMap<>(); @Inject public CuratorDb(Curator curator, ServiceRegistry services) { this(curator, defaultTryLockTimeout, services.zoneRegistry().system()); } CuratorDb(Curator curator, Duration tryLockTimeout, SystemName system) { this.curator = curator; this.tryLockTimeout = tryLockTimeout; } /** Returns all hostnames configured to be part of this ZooKeeper cluster */ public List<String> cluster() { return Arrays.stream(curator.zooKeeperEnsembleConnectionSpec().split(",")) .filter(hostAndPort -> !hostAndPort.isEmpty()) .map(hostAndPort -> hostAndPort.split(":")[0]) .collect(Collectors.toUnmodifiableList()); } public Mutex lock(TenantName name) { return curator.lock(lockPath(name), defaultLockTimeout.multipliedBy(2)); } public Mutex lock(TenantAndApplicationId id) { return curator.lock(lockPath(id), defaultLockTimeout.multipliedBy(2)); } public Mutex lockForDeployment(ApplicationId id, ZoneId zone) { return curator.lock(lockPath(id, zone), deployLockTimeout); } public Mutex lock(ApplicationId id, JobType type) { return curator.lock(lockPath(id, type), defaultLockTimeout); } public Mutex lock(ApplicationId id, JobType type, Step step) throws TimeoutException { return tryLock(lockPath(id, type, step)); } public Mutex lockRotations() { return curator.lock(lockRoot.append("rotations"), defaultLockTimeout); } public Mutex lockConfidenceOverrides() { return curator.lock(lockRoot.append("confidenceOverrides"), defaultLockTimeout); } public Mutex lockMaintenanceJob(String jobName) { try { return tryLock(lockRoot.append("maintenanceJobLocks").append(jobName)); } catch (TimeoutException e) { throw new UncheckedTimeoutException(e); } } public Mutex lockProvisionState(String provisionStateId) { return curator.lock(lockPath(provisionStateId), Duration.ofSeconds(1)); } public Mutex lockOsVersions() { return curator.lock(lockRoot.append("osTargetVersion"), defaultLockTimeout); } public Mutex lockOsVersionStatus() { return curator.lock(lockRoot.append("osVersionStatus"), defaultLockTimeout); } public Mutex lockRoutingPolicies() { return curator.lock(lockRoot.append("routingPolicies"), defaultLockTimeout); } public Mutex lockAuditLog() { return curator.lock(lockRoot.append("auditLog"), defaultLockTimeout); } public Mutex lockNameServiceQueue() { return curator.lock(lockRoot.append("nameServiceQueue"), defaultLockTimeout); } public Mutex lockMeteringRefreshTime() throws TimeoutException { return tryLock(lockRoot.append("meteringRefreshTime")); } public Mutex lockArchiveBuckets(ZoneId zoneId) { return curator.lock(lockRoot.append("archiveBuckets").append(zoneId.value()), defaultLockTimeout); } public Mutex lockChangeRequests() { return curator.lock(lockRoot.append("changeRequests"), defaultLockTimeout); } public Mutex lockNotifications(TenantName tenantName) { return curator.lock(lockRoot.append("notifications").append(tenantName.value()), defaultLockTimeout); } public Mutex lockSupportAccess(DeploymentId deploymentId) { return curator.lock(lockRoot.append("supportAccess").append(deploymentId.dottedString()), defaultLockTimeout); } public Mutex lockDeploymentRetriggerQueue() { return curator.lock(lockRoot.append("deploymentRetriggerQueue"), defaultLockTimeout); } /** Try locking with a low timeout, meaning it is OK to fail lock acquisition. * * Useful for maintenance jobs, where there is no point in running the jobs back to back. */ private Mutex tryLock(Path path) throws TimeoutException { try { return curator.lock(path, tryLockTimeout); } catch (UncheckedTimeoutException e) { throw new TimeoutException(e.getMessage()); } } private <T> Optional<T> read(Path path, Function<byte[], T> mapper) { return curator.getData(path).filter(data -> data.length > 0).map(mapper); } private Optional<Slime> readSlime(Path path) { return read(path, SlimeUtils::jsonToSlime); } private static byte[] asJson(Slime slime) { try { return SlimeUtils.toJsonBytes(slime); } catch (IOException e) { throw new UncheckedIOException(e); } } public double readUpgradesPerMinute() { return read(upgradesPerMinutePath(), ByteBuffer::wrap).map(ByteBuffer::getDouble).orElse(0.125); } public void writeUpgradesPerMinute(double n) { curator.set(upgradesPerMinutePath(), ByteBuffer.allocate(Double.BYTES).putDouble(n).array()); } public VersionStatus readVersionStatus() { return readSlime(versionStatusPath()).map(versionStatusSerializer::fromSlime).orElseGet(VersionStatus::empty); } public void writeConfidenceOverrides(Map<Version, VespaVersion.Confidence> overrides) { curator.set(confidenceOverridesPath(), asJson(confidenceOverrideSerializer.toSlime(overrides))); } public Map<Version, VespaVersion.Confidence> readConfidenceOverrides() { return readSlime(confidenceOverridesPath()).map(confidenceOverrideSerializer::fromSlime) .orElseGet(Collections::emptyMap); } public void writeControllerVersion(HostName hostname, ControllerVersion version) { curator.set(controllerPath(hostname.value()), asJson(controllerVersionSerializer.toSlime(version))); } public ControllerVersion readControllerVersion(HostName hostname) { return readSlime(controllerPath(hostname.value())) .map(controllerVersionSerializer::fromSlime) .orElse(ControllerVersion.CURRENT); } public void writeOsVersionTargets(Set<OsVersionTarget> versions) { curator.set(osVersionTargetsPath(), asJson(osVersionTargetSerializer.toSlime(versions))); } public Set<OsVersionTarget> readOsVersionTargets() { return readSlime(osVersionTargetsPath()).map(osVersionTargetSerializer::fromSlime).orElseGet(Collections::emptySet); } public void writeOsVersionStatus(OsVersionStatus status) { curator.set(osVersionStatusPath(), asJson(osVersionStatusSerializer.toSlime(status))); } public OsVersionStatus readOsVersionStatus() { return readSlime(osVersionStatusPath()).map(osVersionStatusSerializer::fromSlime).orElse(OsVersionStatus.empty); } public void writeTenant(Tenant tenant) { curator.set(tenantPath(tenant.name()), asJson(tenantSerializer.toSlime(tenant))); } public Optional<Tenant> readTenant(TenantName name) { return readSlime(tenantPath(name)).map(tenantSerializer::tenantFrom); } public List<Tenant> readTenants() { return readTenantNames().stream() .map(this::readTenant) .flatMap(Optional::stream) .collect(collectingAndThen(Collectors.toList(), Collections::unmodifiableList)); } public List<TenantName> readTenantNames() { return curator.getChildren(tenantRoot).stream() .map(TenantName::from) .collect(Collectors.toList()); } public void removeTenant(TenantName name) { curator.delete(tenantPath(name)); } public void writeApplication(Application application) { curator.set(applicationPath(application.id()), asJson(applicationSerializer.toSlime(application))); } public Optional<Application> readApplication(TenantAndApplicationId application) { Path path = applicationPath(application); return curator.getStat(path) .map(stat -> cachedApplications.compute(path, (__, old) -> old != null && old.getFirst() == stat.getVersion() ? old : new Pair<>(stat.getVersion(), read(path, bytes -> applicationSerializer.fromSlime(bytes)).get())).getSecond()); } public List<Application> readApplications(boolean canFail) { return readApplications(ignored -> true, canFail); } public List<Application> readApplications(TenantName name) { return readApplications(application -> application.tenant().equals(name), false); } private List<Application> readApplications(Predicate<TenantAndApplicationId> applicationFilter, boolean canFail) { var applicationIds = readApplicationIds(); var applications = new ArrayList<Application>(applicationIds.size()); for (var id : applicationIds) { if (!applicationFilter.test(id)) continue; try { readApplication(id).ifPresent(applications::add); } catch (Exception e) { if (canFail) { log.log(Level.SEVERE, "Failed to read application '" + id + "', this must be fixed through " + "manual intervention", e); } else { throw e; } } } return Collections.unmodifiableList(applications); } public List<TenantAndApplicationId> readApplicationIds() { return curator.getChildren(applicationRoot).stream() .map(TenantAndApplicationId::fromSerialized) .sorted() .collect(toUnmodifiableList()); } public void removeApplication(TenantAndApplicationId id) { curator.delete(applicationPath(id)); } public void writeLastRun(Run run) { curator.set(lastRunPath(run.id().application(), run.id().type()), asJson(runSerializer.toSlime(run))); } public void writeHistoricRuns(ApplicationId id, JobType type, Iterable<Run> runs) { Path path = runsPath(id, type); curator.set(path, asJson(runSerializer.toSlime(runs))); } public Optional<Run> readLastRun(ApplicationId id, JobType type) { return readSlime(lastRunPath(id, type)).map(runSerializer::runFromSlime); } public NavigableMap<RunId, Run> readHistoricRuns(ApplicationId id, JobType type) { Path path = runsPath(id, type); return curator.getStat(path) .map(stat -> cachedHistoricRuns.compute(path, (__, old) -> old != null && old.getFirst() == stat.getVersion() ? old : new Pair<>(stat.getVersion(), runSerializer.runsFromSlime(readSlime(path).get()))).getSecond()) .orElseGet(Collections::emptyNavigableMap); } public void deleteRunData(ApplicationId id, JobType type) { curator.delete(runsPath(id, type)); curator.delete(lastRunPath(id, type)); } public void deleteRunData(ApplicationId id) { curator.delete(jobRoot.append(id.serializedForm())); } public List<ApplicationId> applicationsWithJobs() { return curator.getChildren(jobRoot).stream() .map(ApplicationId::fromSerializedForm) .collect(Collectors.toList()); } public Optional<byte[]> readLog(ApplicationId id, JobType type, long chunkId) { return curator.getData(logPath(id, type, chunkId)); } public void writeLog(ApplicationId id, JobType type, long chunkId, byte[] log) { curator.set(logPath(id, type, chunkId), log); } public void deleteLog(ApplicationId id, JobType type) { curator.delete(runsPath(id, type).append("logs")); } public Optional<Long> readLastLogEntryId(ApplicationId id, JobType type) { return curator.getData(lastLogPath(id, type)) .map(String::new).map(Long::parseLong); } public void writeLastLogEntryId(ApplicationId id, JobType type, long lastId) { curator.set(lastLogPath(id, type), Long.toString(lastId).getBytes()); } public LongStream getLogChunkIds(ApplicationId id, JobType type) { return curator.getChildren(runsPath(id, type).append("logs")).stream() .mapToLong(Long::parseLong) .sorted(); } public AuditLog readAuditLog() { return readSlime(auditLogPath()).map(auditLogSerializer::fromSlime) .orElse(AuditLog.empty); } public void writeAuditLog(AuditLog log) { curator.set(auditLogPath(), asJson(auditLogSerializer.toSlime(log))); } public NameServiceQueue readNameServiceQueue() { return readSlime(nameServiceQueuePath()).map(nameServiceQueueSerializer::fromSlime) .orElse(NameServiceQueue.EMPTY); } public void writeNameServiceQueue(NameServiceQueue queue) { curator.set(nameServiceQueuePath(), asJson(nameServiceQueueSerializer.toSlime(queue))); } @SuppressWarnings("unused") public Optional<byte[]> readProvisionState(String provisionId) { return curator.getData(provisionStatePath(provisionId)); } @SuppressWarnings("unused") public void writeProvisionState(String provisionId, byte[] data) { curator.set(provisionStatePath(provisionId), data); } @SuppressWarnings("unused") public List<String> readProvisionStateIds() { return curator.getChildren(provisionStatePath()); } public void writeRoutingPolicies(ApplicationId application, List<RoutingPolicy> policies) { for (var policy : policies) { if (!policy.id().owner().equals(application)) { throw new IllegalArgumentException(policy.id() + " does not belong to the application being written: " + application.toShortString()); } } curator.set(routingPolicyPath(application), asJson(routingPolicySerializer.toSlime(policies))); } public Map<ApplicationId, List<RoutingPolicy>> readRoutingPolicies() { return readRoutingPolicies((instance) -> true); } public Map<ApplicationId, List<RoutingPolicy>> readRoutingPolicies(Predicate<ApplicationId> filter) { return curator.getChildren(routingPoliciesRoot).stream() .map(ApplicationId::fromSerializedForm) .filter(filter) .collect(Collectors.toUnmodifiableMap(Function.identity(), this::readRoutingPolicies)); } public List<RoutingPolicy> readRoutingPolicies(ApplicationId application) { return readSlime(routingPolicyPath(application)).map(slime -> routingPolicySerializer.fromSlime(application, slime)) .orElseGet(List::of); } public void writeZoneRoutingPolicy(ZoneRoutingPolicy policy) { curator.set(zoneRoutingPolicyPath(policy.zone()), asJson(zoneRoutingPolicySerializer.toSlime(policy))); } public ZoneRoutingPolicy readZoneRoutingPolicy(ZoneId zone) { return readSlime(zoneRoutingPolicyPath(zone)).map(data -> zoneRoutingPolicySerializer.fromSlime(zone, data)) .orElseGet(() -> new ZoneRoutingPolicy(zone, RoutingStatus.DEFAULT)); } public void writeEndpointCertificateMetadata(ApplicationId applicationId, EndpointCertificateMetadata endpointCertificateMetadata) { curator.set(endpointCertificatePath(applicationId), asJson(EndpointCertificateMetadataSerializer.toSlime(endpointCertificateMetadata))); } public void deleteEndpointCertificateMetadata(ApplicationId applicationId) { curator.delete(endpointCertificatePath(applicationId)); } public Optional<EndpointCertificateMetadata> readEndpointCertificateMetadata(ApplicationId applicationId) { return curator.getData(endpointCertificatePath(applicationId)).map(String::new).map(EndpointCertificateMetadataSerializer::fromJsonString); } public Map<ApplicationId, EndpointCertificateMetadata> readAllEndpointCertificateMetadata() { Map<ApplicationId, EndpointCertificateMetadata> allEndpointCertificateMetadata = new HashMap<>(); for (String appIdString : curator.getChildren(endpointCertificateRoot)) { ApplicationId applicationId = ApplicationId.fromSerializedForm(appIdString); Optional<EndpointCertificateMetadata> endpointCertificateMetadata = readEndpointCertificateMetadata(applicationId); allEndpointCertificateMetadata.put(applicationId, endpointCertificateMetadata.orElseThrow()); } return allEndpointCertificateMetadata; } public void writeMeteringRefreshTime(long timestamp) { curator.set(meteringRefreshPath(), Long.toString(timestamp).getBytes()); } public long readMeteringRefreshTime() { return curator.getData(meteringRefreshPath()) .map(String::new).map(Long::parseLong) .orElse(0L); } public Set<ArchiveBucket> readArchiveBuckets(ZoneId zoneId) { return curator.getData(archiveBucketsPath(zoneId)).map(String::new).map(ArchiveBucketsSerializer::fromJsonString) .orElseGet(Set::of); } public void writeArchiveBuckets(ZoneId zoneid, Set<ArchiveBucket> archiveBuckets) { curator.set(archiveBucketsPath(zoneid), asJson(ArchiveBucketsSerializer.toSlime(archiveBuckets))); } public Optional<VespaChangeRequest> readChangeRequest(String changeRequestId) { return readSlime(changeRequestPath(changeRequestId)).map(ChangeRequestSerializer::fromSlime); } public List<VespaChangeRequest> readChangeRequests() { return curator.getChildren(changeRequestsRoot) .stream() .map(this::readChangeRequest) .flatMap(Optional::stream) .collect(Collectors.toList()); } public void writeChangeRequest(VespaChangeRequest changeRequest) { curator.set(changeRequestPath(changeRequest.getId()), asJson(ChangeRequestSerializer.toSlime(changeRequest))); } public void deleteChangeRequest(VespaChangeRequest changeRequest) { curator.delete(changeRequestPath(changeRequest.getId())); } public List<Notification> readNotifications(TenantName tenantName) { return readSlime(notificationsPath(tenantName)) .map(slime -> notificationsSerializer.fromSlime(tenantName, slime)).orElseGet(List::of); } public List<TenantName> listTenantsWithNotifications() { return curator.getChildren(notificationsRoot).stream() .map(TenantName::from) .collect(Collectors.toUnmodifiableList()); } public void writeNotifications(TenantName tenantName, List<Notification> notifications) { curator.set(notificationsPath(tenantName), asJson(notificationsSerializer.toSlime(notifications))); } public void deleteNotifications(TenantName tenantName) { curator.delete(notificationsPath(tenantName)); } public SupportAccess readSupportAccess(DeploymentId deploymentId) { return readSlime(supportAccessPath(deploymentId)).map(SupportAccessSerializer::fromSlime).orElse(SupportAccess.DISALLOWED_NO_HISTORY); } /** Take lock before reading before writing */ public void writeSupportAccess(DeploymentId deploymentId, SupportAccess supportAccess) { curator.set(supportAccessPath(deploymentId), asJson(SupportAccessSerializer.toSlime(supportAccess))); } public List<RetriggerEntry> readRetriggerEntries() { return readSlime(deploymentRetriggerPath()).map(retriggerEntrySerializer::fromSlime).orElseGet(List::of); } public void writeRetriggerEntries(List<RetriggerEntry> retriggerEntries) { curator.set(deploymentRetriggerPath(), asJson(retriggerEntrySerializer.toSlime(retriggerEntries))); } private Path lockPath(TenantName tenant) { return lockRoot .append(tenant.value()); } private Path lockPath(TenantAndApplicationId application) { return lockRoot.append(application.tenant().value() + ":" + application.application().value()); } private Path lockPath(ApplicationId instance, ZoneId zone) { return lockRoot.append(instance.serializedForm() + ":" + zone.environment().value() + ":" + zone.region().value()); } private Path lockPath(ApplicationId instance, JobType type) { return lockRoot.append(instance.serializedForm() + ":" + type.jobName()); } private Path lockPath(ApplicationId instance, JobType type, Step step) { return lockRoot.append(instance.serializedForm() + ":" + type.jobName() + ":" + step.name()); } private Path lockPath(String provisionId) { return lockRoot .append(provisionStatePath()) .append(provisionId); } private static Path upgradesPerMinutePath() { return root.append("upgrader").append("upgradesPerMinute"); } private static Path confidenceOverridesPath() { return root.append("upgrader").append("confidenceOverrides"); } private static Path osVersionTargetsPath() { return root.append("osUpgrader").append("targetVersion"); } private static Path osVersionStatusPath() { return root.append("osVersionStatus"); } private static Path versionStatusPath() { return root.append("versionStatus"); } private static Path routingPolicyPath(ApplicationId application) { return routingPoliciesRoot.append(application.serializedForm()); } private static Path zoneRoutingPolicyPath(ZoneId zone) { return zoneRoutingPoliciesRoot.append(zone.value()); } private static Path nameServiceQueuePath() { return root.append("nameServiceQueue"); } private static Path auditLogPath() { return root.append("auditLog"); } private static Path provisionStatePath() { return root.append("provisioning").append("states"); } private static Path provisionStatePath(String provisionId) { return provisionStatePath().append(provisionId); } private static Path tenantPath(TenantName name) { return tenantRoot.append(name.value()); } private static Path applicationPath(TenantAndApplicationId id) { return applicationRoot.append(id.serialized()); } private static Path runsPath(ApplicationId id, JobType type) { return jobRoot.append(id.serializedForm()).append(type.jobName()); } private static Path lastRunPath(ApplicationId id, JobType type) { return runsPath(id, type).append("last"); } private static Path logPath(ApplicationId id, JobType type, long first) { return runsPath(id, type).append("logs").append(Long.toString(first)); } private static Path lastLogPath(ApplicationId id, JobType type) { return runsPath(id, type).append("logs"); } private static Path controllerPath(String hostname) { return controllerRoot.append(hostname); } private static Path endpointCertificatePath(ApplicationId id) { return endpointCertificateRoot.append(id.serializedForm()); } private static Path meteringRefreshPath() { return root.append("meteringRefreshTime"); } private static Path archiveBucketsPath(ZoneId zoneId) { return archiveBucketsRoot.append(zoneId.value()); } private static Path changeRequestPath(String id) { return changeRequestsRoot.append(id); } private static Path notificationsPath(TenantName tenantName) { return notificationsRoot.append(tenantName.value()); } private static Path supportAccessPath(DeploymentId deploymentId) { return supportAccessRoot.append(deploymentId.dottedString()); } private static Path deploymentRetriggerPath() { return root.append("deploymentRetriggerQueue"); } }
Just forgot to remove it, fixed now :)
public void writeVersionStatus(VersionStatus status) { curator.set(versionStatusPath(), asJson(versionStatusSerializer.toSlime(status))); }
curator.set(versionStatusPath(), asJson(versionStatusSerializer.toSlime(status)));
public void writeVersionStatus(VersionStatus status) { curator.set(versionStatusPath(), asJson(versionStatusSerializer.toSlime(status))); }
class CuratorDb { private static final Logger log = Logger.getLogger(CuratorDb.class.getName()); private static final Duration deployLockTimeout = Duration.ofMinutes(30); private static final Duration defaultLockTimeout = Duration.ofMinutes(5); private static final Duration defaultTryLockTimeout = Duration.ofSeconds(1); private static final Path root = Path.fromString("/controller/v1"); private static final Path lockRoot = root.append("locks"); private static final Path tenantRoot = root.append("tenants"); private static final Path applicationRoot = root.append("applications"); private static final Path jobRoot = root.append("jobs"); private static final Path controllerRoot = root.append("controllers"); private static final Path routingPoliciesRoot = root.append("routingPolicies"); private static final Path zoneRoutingPoliciesRoot = root.append("zoneRoutingPolicies"); private static final Path endpointCertificateRoot = root.append("applicationCertificates"); private static final Path archiveBucketsRoot = root.append("archiveBuckets"); private static final Path changeRequestsRoot = root.append("changeRequests"); private static final Path notificationsRoot = root.append("notifications"); private static final Path supportAccessRoot = root.append("supportAccess"); private final NodeVersionSerializer nodeVersionSerializer = new NodeVersionSerializer(); private final VersionStatusSerializer versionStatusSerializer = new VersionStatusSerializer(nodeVersionSerializer); private final ControllerVersionSerializer controllerVersionSerializer = new ControllerVersionSerializer(); private final ConfidenceOverrideSerializer confidenceOverrideSerializer = new ConfidenceOverrideSerializer(); private final TenantSerializer tenantSerializer = new TenantSerializer(); private final OsVersionSerializer osVersionSerializer = new OsVersionSerializer(); private final OsVersionTargetSerializer osVersionTargetSerializer = new OsVersionTargetSerializer(osVersionSerializer); private final OsVersionStatusSerializer osVersionStatusSerializer = new OsVersionStatusSerializer(osVersionSerializer, nodeVersionSerializer); private final RoutingPolicySerializer routingPolicySerializer = new RoutingPolicySerializer(); private final ZoneRoutingPolicySerializer zoneRoutingPolicySerializer = new ZoneRoutingPolicySerializer(routingPolicySerializer); private final AuditLogSerializer auditLogSerializer = new AuditLogSerializer(); private final NameServiceQueueSerializer nameServiceQueueSerializer = new NameServiceQueueSerializer(); private final ApplicationSerializer applicationSerializer = new ApplicationSerializer(); private final RunSerializer runSerializer = new RunSerializer(); private final RetriggerEntrySerializer retriggerEntrySerializer = new RetriggerEntrySerializer(); private final NotificationsSerializer notificationsSerializer = new NotificationsSerializer(); private final Curator curator; private final Duration tryLockTimeout; private final Map<Path, Pair<Integer, Application>> cachedApplications = new ConcurrentHashMap<>(); private final Map<Path, Pair<Integer, NavigableMap<RunId, Run>>> cachedHistoricRuns = new ConcurrentHashMap<>(); @Inject public CuratorDb(Curator curator, ServiceRegistry services) { this(curator, defaultTryLockTimeout, services.zoneRegistry().system()); } CuratorDb(Curator curator, Duration tryLockTimeout, SystemName system) { this.curator = curator; this.tryLockTimeout = tryLockTimeout; } /** Returns all hostnames configured to be part of this ZooKeeper cluster */ public List<String> cluster() { return Arrays.stream(curator.zooKeeperEnsembleConnectionSpec().split(",")) .filter(hostAndPort -> !hostAndPort.isEmpty()) .map(hostAndPort -> hostAndPort.split(":")[0]) .collect(Collectors.toUnmodifiableList()); } public Mutex lock(TenantName name) { return curator.lock(lockPath(name), defaultLockTimeout.multipliedBy(2)); } public Mutex lock(TenantAndApplicationId id) { return curator.lock(lockPath(id), defaultLockTimeout.multipliedBy(2)); } public Mutex lockForDeployment(ApplicationId id, ZoneId zone) { return curator.lock(lockPath(id, zone), deployLockTimeout); } public Mutex lock(ApplicationId id, JobType type) { return curator.lock(lockPath(id, type), defaultLockTimeout); } public Mutex lock(ApplicationId id, JobType type, Step step) throws TimeoutException { return tryLock(lockPath(id, type, step)); } public Mutex lockRotations() { return curator.lock(lockRoot.append("rotations"), defaultLockTimeout); } public Mutex lockConfidenceOverrides() { return curator.lock(lockRoot.append("confidenceOverrides"), defaultLockTimeout); } public Mutex lockMaintenanceJob(String jobName) { try { return tryLock(lockRoot.append("maintenanceJobLocks").append(jobName)); } catch (TimeoutException e) { throw new UncheckedTimeoutException(e); } } public Mutex lockProvisionState(String provisionStateId) { return curator.lock(lockPath(provisionStateId), Duration.ofSeconds(1)); } public Mutex lockOsVersions() { return curator.lock(lockRoot.append("osTargetVersion"), defaultLockTimeout); } public Mutex lockOsVersionStatus() { return curator.lock(lockRoot.append("osVersionStatus"), defaultLockTimeout); } public Mutex lockRoutingPolicies() { return curator.lock(lockRoot.append("routingPolicies"), defaultLockTimeout); } public Mutex lockAuditLog() { return curator.lock(lockRoot.append("auditLog"), defaultLockTimeout); } public Mutex lockNameServiceQueue() { return curator.lock(lockRoot.append("nameServiceQueue"), defaultLockTimeout); } public Mutex lockMeteringRefreshTime() throws TimeoutException { return tryLock(lockRoot.append("meteringRefreshTime")); } public Mutex lockArchiveBuckets(ZoneId zoneId) { return curator.lock(lockRoot.append("archiveBuckets").append(zoneId.value()), defaultLockTimeout); } public Mutex lockChangeRequests() { return curator.lock(lockRoot.append("changeRequests"), defaultLockTimeout); } public Mutex lockNotifications(TenantName tenantName) { return curator.lock(lockRoot.append("notifications").append(tenantName.value()), defaultLockTimeout); } public Mutex lockSupportAccess(DeploymentId deploymentId) { return curator.lock(lockRoot.append("supportAccess").append(deploymentId.dottedString()), defaultLockTimeout); } public Mutex lockDeploymentRetriggerQueue() { return curator.lock(lockRoot.append("deploymentRetriggerQueue"), defaultLockTimeout); } /** Try locking with a low timeout, meaning it is OK to fail lock acquisition. * * Useful for maintenance jobs, where there is no point in running the jobs back to back. */ private Mutex tryLock(Path path) throws TimeoutException { try { return curator.lock(path, tryLockTimeout); } catch (UncheckedTimeoutException e) { throw new TimeoutException(e.getMessage()); } } private <T> Optional<T> read(Path path, Function<byte[], T> mapper) { return curator.getData(path).filter(data -> data.length > 0).map(mapper); } private Optional<Slime> readSlime(Path path) { return read(path, SlimeUtils::jsonToSlime); } private static byte[] asJson(Slime slime) { try { return SlimeUtils.toJsonBytes(slime); } catch (IOException e) { throw new UncheckedIOException(e); } } public double readUpgradesPerMinute() { return read(upgradesPerMinutePath(), ByteBuffer::wrap).map(ByteBuffer::getDouble).orElse(0.125); } public void writeUpgradesPerMinute(double n) { curator.set(upgradesPerMinutePath(), ByteBuffer.allocate(Double.BYTES).putDouble(n).array()); } public VersionStatus readVersionStatus() { return readSlime(versionStatusPath()).map(versionStatusSerializer::fromSlime).orElseGet(VersionStatus::empty); } public void writeConfidenceOverrides(Map<Version, VespaVersion.Confidence> overrides) { curator.set(confidenceOverridesPath(), asJson(confidenceOverrideSerializer.toSlime(overrides))); } public Map<Version, VespaVersion.Confidence> readConfidenceOverrides() { return readSlime(confidenceOverridesPath()).map(confidenceOverrideSerializer::fromSlime) .orElseGet(Collections::emptyMap); } public void writeControllerVersion(HostName hostname, ControllerVersion version) { curator.set(controllerPath(hostname.value()), asJson(controllerVersionSerializer.toSlime(version))); } public ControllerVersion readControllerVersion(HostName hostname) { return readSlime(controllerPath(hostname.value())) .map(controllerVersionSerializer::fromSlime) .orElse(ControllerVersion.CURRENT); } public void writeOsVersionTargets(Set<OsVersionTarget> versions) { curator.set(osVersionTargetsPath(), asJson(osVersionTargetSerializer.toSlime(versions))); } public Set<OsVersionTarget> readOsVersionTargets() { return readSlime(osVersionTargetsPath()).map(osVersionTargetSerializer::fromSlime).orElseGet(Collections::emptySet); } public void writeOsVersionStatus(OsVersionStatus status) { curator.set(osVersionStatusPath(), asJson(osVersionStatusSerializer.toSlime(status))); } public OsVersionStatus readOsVersionStatus() { return readSlime(osVersionStatusPath()).map(osVersionStatusSerializer::fromSlime).orElse(OsVersionStatus.empty); } public void writeTenant(Tenant tenant) { curator.set(tenantPath(tenant.name()), asJson(tenantSerializer.toSlime(tenant))); } public Optional<Tenant> readTenant(TenantName name) { return readSlime(tenantPath(name)).map(tenantSerializer::tenantFrom); } public List<Tenant> readTenants() { return readTenantNames().stream() .map(this::readTenant) .flatMap(Optional::stream) .collect(collectingAndThen(Collectors.toList(), Collections::unmodifiableList)); } public List<TenantName> readTenantNames() { return curator.getChildren(tenantRoot).stream() .map(TenantName::from) .collect(Collectors.toList()); } public void removeTenant(TenantName name) { curator.delete(tenantPath(name)); } public void writeApplication(Application application) { curator.set(applicationPath(application.id()), asJson(applicationSerializer.toSlime(application))); } public Optional<Application> readApplication(TenantAndApplicationId application) { Path path = applicationPath(application); return curator.getStat(path) .map(stat -> cachedApplications.compute(path, (__, old) -> old != null && old.getFirst() == stat.getVersion() ? old : new Pair<>(stat.getVersion(), read(path, bytes -> applicationSerializer.fromSlime(bytes)).get())).getSecond()); } public List<Application> readApplications(boolean canFail) { return readApplications(ignored -> true, canFail); } public List<Application> readApplications(TenantName name) { return readApplications(application -> application.tenant().equals(name), false); } private List<Application> readApplications(Predicate<TenantAndApplicationId> applicationFilter, boolean canFail) { var applicationIds = readApplicationIds(); var applications = new ArrayList<Application>(applicationIds.size()); for (var id : applicationIds) { if (!applicationFilter.test(id)) continue; try { readApplication(id).ifPresent(applications::add); } catch (Exception e) { if (canFail) { log.log(Level.SEVERE, "Failed to read application '" + id + "', this must be fixed through " + "manual intervention", e); } else { throw e; } } } return Collections.unmodifiableList(applications); } public List<TenantAndApplicationId> readApplicationIds() { return curator.getChildren(applicationRoot).stream() .map(TenantAndApplicationId::fromSerialized) .sorted() .collect(toUnmodifiableList()); } public void removeApplication(TenantAndApplicationId id) { curator.delete(applicationPath(id)); } public void writeLastRun(Run run) { curator.set(lastRunPath(run.id().application(), run.id().type()), asJson(runSerializer.toSlime(run))); } public void writeHistoricRuns(ApplicationId id, JobType type, Iterable<Run> runs) { Path path = runsPath(id, type); curator.set(path, asJson(runSerializer.toSlime(runs))); } public Optional<Run> readLastRun(ApplicationId id, JobType type) { return readSlime(lastRunPath(id, type)).map(runSerializer::runFromSlime); } public NavigableMap<RunId, Run> readHistoricRuns(ApplicationId id, JobType type) { Path path = runsPath(id, type); return curator.getStat(path) .map(stat -> cachedHistoricRuns.compute(path, (__, old) -> old != null && old.getFirst() == stat.getVersion() ? old : new Pair<>(stat.getVersion(), runSerializer.runsFromSlime(readSlime(path).get()))).getSecond()) .orElseGet(Collections::emptyNavigableMap); } public void deleteRunData(ApplicationId id, JobType type) { curator.delete(runsPath(id, type)); curator.delete(lastRunPath(id, type)); } public void deleteRunData(ApplicationId id) { curator.delete(jobRoot.append(id.serializedForm())); } public List<ApplicationId> applicationsWithJobs() { return curator.getChildren(jobRoot).stream() .map(ApplicationId::fromSerializedForm) .collect(Collectors.toList()); } public Optional<byte[]> readLog(ApplicationId id, JobType type, long chunkId) { return curator.getData(logPath(id, type, chunkId)); } public void writeLog(ApplicationId id, JobType type, long chunkId, byte[] log) { curator.set(logPath(id, type, chunkId), log); } public void deleteLog(ApplicationId id, JobType type) { curator.delete(runsPath(id, type).append("logs")); } public Optional<Long> readLastLogEntryId(ApplicationId id, JobType type) { return curator.getData(lastLogPath(id, type)) .map(String::new).map(Long::parseLong); } public void writeLastLogEntryId(ApplicationId id, JobType type, long lastId) { curator.set(lastLogPath(id, type), Long.toString(lastId).getBytes()); } public LongStream getLogChunkIds(ApplicationId id, JobType type) { return curator.getChildren(runsPath(id, type).append("logs")).stream() .mapToLong(Long::parseLong) .sorted(); } public AuditLog readAuditLog() { return readSlime(auditLogPath()).map(auditLogSerializer::fromSlime) .orElse(AuditLog.empty); } public void writeAuditLog(AuditLog log) { curator.set(auditLogPath(), asJson(auditLogSerializer.toSlime(log))); } public NameServiceQueue readNameServiceQueue() { return readSlime(nameServiceQueuePath()).map(nameServiceQueueSerializer::fromSlime) .orElse(NameServiceQueue.EMPTY); } public void writeNameServiceQueue(NameServiceQueue queue) { curator.set(nameServiceQueuePath(), asJson(nameServiceQueueSerializer.toSlime(queue))); } @SuppressWarnings("unused") public Optional<byte[]> readProvisionState(String provisionId) { return curator.getData(provisionStatePath(provisionId)); } @SuppressWarnings("unused") public void writeProvisionState(String provisionId, byte[] data) { curator.set(provisionStatePath(provisionId), data); } @SuppressWarnings("unused") public List<String> readProvisionStateIds() { return curator.getChildren(provisionStatePath()); } public void writeRoutingPolicies(ApplicationId application, List<RoutingPolicy> policies) { for (var policy : policies) { if (!policy.id().owner().equals(application)) { throw new IllegalArgumentException(policy.id() + " does not belong to the application being written: " + application.toShortString()); } } curator.set(routingPolicyPath(application), asJson(routingPolicySerializer.toSlime(policies))); } public Map<ApplicationId, List<RoutingPolicy>> readRoutingPolicies() { return readRoutingPolicies((instance) -> true); } public Map<ApplicationId, List<RoutingPolicy>> readRoutingPolicies(Predicate<ApplicationId> filter) { return curator.getChildren(routingPoliciesRoot).stream() .map(ApplicationId::fromSerializedForm) .filter(filter) .collect(Collectors.toUnmodifiableMap(Function.identity(), this::readRoutingPolicies)); } public List<RoutingPolicy> readRoutingPolicies(ApplicationId application) { return readSlime(routingPolicyPath(application)).map(slime -> routingPolicySerializer.fromSlime(application, slime)) .orElseGet(List::of); } public void writeZoneRoutingPolicy(ZoneRoutingPolicy policy) { curator.set(zoneRoutingPolicyPath(policy.zone()), asJson(zoneRoutingPolicySerializer.toSlime(policy))); } public ZoneRoutingPolicy readZoneRoutingPolicy(ZoneId zone) { return readSlime(zoneRoutingPolicyPath(zone)).map(data -> zoneRoutingPolicySerializer.fromSlime(zone, data)) .orElseGet(() -> new ZoneRoutingPolicy(zone, RoutingStatus.DEFAULT)); } public void writeEndpointCertificateMetadata(ApplicationId applicationId, EndpointCertificateMetadata endpointCertificateMetadata) { curator.set(endpointCertificatePath(applicationId), asJson(EndpointCertificateMetadataSerializer.toSlime(endpointCertificateMetadata))); } public void deleteEndpointCertificateMetadata(ApplicationId applicationId) { curator.delete(endpointCertificatePath(applicationId)); } public Optional<EndpointCertificateMetadata> readEndpointCertificateMetadata(ApplicationId applicationId) { return curator.getData(endpointCertificatePath(applicationId)).map(String::new).map(EndpointCertificateMetadataSerializer::fromJsonString); } public Map<ApplicationId, EndpointCertificateMetadata> readAllEndpointCertificateMetadata() { Map<ApplicationId, EndpointCertificateMetadata> allEndpointCertificateMetadata = new HashMap<>(); for (String appIdString : curator.getChildren(endpointCertificateRoot)) { ApplicationId applicationId = ApplicationId.fromSerializedForm(appIdString); Optional<EndpointCertificateMetadata> endpointCertificateMetadata = readEndpointCertificateMetadata(applicationId); allEndpointCertificateMetadata.put(applicationId, endpointCertificateMetadata.orElseThrow()); } return allEndpointCertificateMetadata; } public void writeMeteringRefreshTime(long timestamp) { curator.set(meteringRefreshPath(), Long.toString(timestamp).getBytes()); } public long readMeteringRefreshTime() { return curator.getData(meteringRefreshPath()) .map(String::new).map(Long::parseLong) .orElse(0L); } public Set<ArchiveBucket> readArchiveBuckets(ZoneId zoneId) { return curator.getData(archiveBucketsPath(zoneId)).map(String::new).map(ArchiveBucketsSerializer::fromJsonString) .orElseGet(Set::of); } public void writeArchiveBuckets(ZoneId zoneid, Set<ArchiveBucket> archiveBuckets) { curator.set(archiveBucketsPath(zoneid), asJson(ArchiveBucketsSerializer.toSlime(archiveBuckets))); } public Optional<VespaChangeRequest> readChangeRequest(String changeRequestId) { return readSlime(changeRequestPath(changeRequestId)).map(ChangeRequestSerializer::fromSlime); } public List<VespaChangeRequest> readChangeRequests() { return curator.getChildren(changeRequestsRoot) .stream() .map(this::readChangeRequest) .flatMap(Optional::stream) .collect(Collectors.toList()); } public void writeChangeRequest(VespaChangeRequest changeRequest) { curator.set(changeRequestPath(changeRequest.getId()), asJson(ChangeRequestSerializer.toSlime(changeRequest))); } public void deleteChangeRequest(VespaChangeRequest changeRequest) { curator.delete(changeRequestPath(changeRequest.getId())); } public List<Notification> readNotifications(TenantName tenantName) { return readSlime(notificationsPath(tenantName)) .map(slime -> notificationsSerializer.fromSlime(tenantName, slime)).orElseGet(List::of); } public List<TenantName> listTenantsWithNotifications() { return curator.getChildren(notificationsRoot).stream() .map(TenantName::from) .collect(Collectors.toUnmodifiableList()); } public void writeNotifications(TenantName tenantName, List<Notification> notifications) { curator.set(notificationsPath(tenantName), asJson(notificationsSerializer.toSlime(notifications))); } public void deleteNotifications(TenantName tenantName) { curator.delete(notificationsPath(tenantName)); } public SupportAccess readSupportAccess(DeploymentId deploymentId) { return readSlime(supportAccessPath(deploymentId)).map(SupportAccessSerializer::fromSlime).orElse(SupportAccess.DISALLOWED_NO_HISTORY); } /** Take lock before reading before writing */ public void writeSupportAccess(DeploymentId deploymentId, SupportAccess supportAccess) { curator.set(supportAccessPath(deploymentId), asJson(SupportAccessSerializer.toSlime(supportAccess))); } public List<RetriggerEntry> readRetriggerEntries() { return readSlime(deploymentRetriggerPath()).map(retriggerEntrySerializer::fromSlime).orElseGet(List::of); } public void writeRetriggerEntries(List<RetriggerEntry> retriggerEntries) { curator.set(deploymentRetriggerPath(), asJson(retriggerEntrySerializer.toSlime(retriggerEntries))); } private Path lockPath(TenantName tenant) { return lockRoot .append(tenant.value()); } private Path lockPath(TenantAndApplicationId application) { return lockRoot.append(application.tenant().value() + ":" + application.application().value()); } private Path lockPath(ApplicationId instance, ZoneId zone) { return lockRoot.append(instance.serializedForm() + ":" + zone.environment().value() + ":" + zone.region().value()); } private Path lockPath(ApplicationId instance, JobType type) { return lockRoot.append(instance.serializedForm() + ":" + type.jobName()); } private Path lockPath(ApplicationId instance, JobType type, Step step) { return lockRoot.append(instance.serializedForm() + ":" + type.jobName() + ":" + step.name()); } private Path lockPath(String provisionId) { return lockRoot .append(provisionStatePath()) .append(provisionId); } private static Path upgradesPerMinutePath() { return root.append("upgrader").append("upgradesPerMinute"); } private static Path targetMajorVersionPath() { return root.append("upgrader").append("targetMajorVersion"); } private static Path confidenceOverridesPath() { return root.append("upgrader").append("confidenceOverrides"); } private static Path osVersionTargetsPath() { return root.append("osUpgrader").append("targetVersion"); } private static Path osVersionStatusPath() { return root.append("osVersionStatus"); } private static Path versionStatusPath() { return root.append("versionStatus"); } private static Path routingPolicyPath(ApplicationId application) { return routingPoliciesRoot.append(application.serializedForm()); } private static Path zoneRoutingPolicyPath(ZoneId zone) { return zoneRoutingPoliciesRoot.append(zone.value()); } private static Path nameServiceQueuePath() { return root.append("nameServiceQueue"); } private static Path auditLogPath() { return root.append("auditLog"); } private static Path provisionStatePath() { return root.append("provisioning").append("states"); } private static Path provisionStatePath(String provisionId) { return provisionStatePath().append(provisionId); } private static Path tenantPath(TenantName name) { return tenantRoot.append(name.value()); } private static Path applicationPath(TenantAndApplicationId id) { return applicationRoot.append(id.serialized()); } private static Path runsPath(ApplicationId id, JobType type) { return jobRoot.append(id.serializedForm()).append(type.jobName()); } private static Path lastRunPath(ApplicationId id, JobType type) { return runsPath(id, type).append("last"); } private static Path logPath(ApplicationId id, JobType type, long first) { return runsPath(id, type).append("logs").append(Long.toString(first)); } private static Path lastLogPath(ApplicationId id, JobType type) { return runsPath(id, type).append("logs"); } private static Path controllerPath(String hostname) { return controllerRoot.append(hostname); } private static Path endpointCertificatePath(ApplicationId id) { return endpointCertificateRoot.append(id.serializedForm()); } private static Path meteringRefreshPath() { return root.append("meteringRefreshTime"); } private static Path archiveBucketsPath(ZoneId zoneId) { return archiveBucketsRoot.append(zoneId.value()); } private static Path changeRequestPath(String id) { return changeRequestsRoot.append(id); } private static Path notificationsPath(TenantName tenantName) { return notificationsRoot.append(tenantName.value()); } private static Path supportAccessPath(DeploymentId deploymentId) { return supportAccessRoot.append(deploymentId.dottedString()); } private static Path deploymentRetriggerPath() { return root.append("deploymentRetriggerQueue"); } }
class CuratorDb { private static final Logger log = Logger.getLogger(CuratorDb.class.getName()); private static final Duration deployLockTimeout = Duration.ofMinutes(30); private static final Duration defaultLockTimeout = Duration.ofMinutes(5); private static final Duration defaultTryLockTimeout = Duration.ofSeconds(1); private static final Path root = Path.fromString("/controller/v1"); private static final Path lockRoot = root.append("locks"); private static final Path tenantRoot = root.append("tenants"); private static final Path applicationRoot = root.append("applications"); private static final Path jobRoot = root.append("jobs"); private static final Path controllerRoot = root.append("controllers"); private static final Path routingPoliciesRoot = root.append("routingPolicies"); private static final Path zoneRoutingPoliciesRoot = root.append("zoneRoutingPolicies"); private static final Path endpointCertificateRoot = root.append("applicationCertificates"); private static final Path archiveBucketsRoot = root.append("archiveBuckets"); private static final Path changeRequestsRoot = root.append("changeRequests"); private static final Path notificationsRoot = root.append("notifications"); private static final Path supportAccessRoot = root.append("supportAccess"); private final NodeVersionSerializer nodeVersionSerializer = new NodeVersionSerializer(); private final VersionStatusSerializer versionStatusSerializer = new VersionStatusSerializer(nodeVersionSerializer); private final ControllerVersionSerializer controllerVersionSerializer = new ControllerVersionSerializer(); private final ConfidenceOverrideSerializer confidenceOverrideSerializer = new ConfidenceOverrideSerializer(); private final TenantSerializer tenantSerializer = new TenantSerializer(); private final OsVersionSerializer osVersionSerializer = new OsVersionSerializer(); private final OsVersionTargetSerializer osVersionTargetSerializer = new OsVersionTargetSerializer(osVersionSerializer); private final OsVersionStatusSerializer osVersionStatusSerializer = new OsVersionStatusSerializer(osVersionSerializer, nodeVersionSerializer); private final RoutingPolicySerializer routingPolicySerializer = new RoutingPolicySerializer(); private final ZoneRoutingPolicySerializer zoneRoutingPolicySerializer = new ZoneRoutingPolicySerializer(routingPolicySerializer); private final AuditLogSerializer auditLogSerializer = new AuditLogSerializer(); private final NameServiceQueueSerializer nameServiceQueueSerializer = new NameServiceQueueSerializer(); private final ApplicationSerializer applicationSerializer = new ApplicationSerializer(); private final RunSerializer runSerializer = new RunSerializer(); private final RetriggerEntrySerializer retriggerEntrySerializer = new RetriggerEntrySerializer(); private final NotificationsSerializer notificationsSerializer = new NotificationsSerializer(); private final Curator curator; private final Duration tryLockTimeout; private final Map<Path, Pair<Integer, Application>> cachedApplications = new ConcurrentHashMap<>(); private final Map<Path, Pair<Integer, NavigableMap<RunId, Run>>> cachedHistoricRuns = new ConcurrentHashMap<>(); @Inject public CuratorDb(Curator curator, ServiceRegistry services) { this(curator, defaultTryLockTimeout, services.zoneRegistry().system()); } CuratorDb(Curator curator, Duration tryLockTimeout, SystemName system) { this.curator = curator; this.tryLockTimeout = tryLockTimeout; } /** Returns all hostnames configured to be part of this ZooKeeper cluster */ public List<String> cluster() { return Arrays.stream(curator.zooKeeperEnsembleConnectionSpec().split(",")) .filter(hostAndPort -> !hostAndPort.isEmpty()) .map(hostAndPort -> hostAndPort.split(":")[0]) .collect(Collectors.toUnmodifiableList()); } public Mutex lock(TenantName name) { return curator.lock(lockPath(name), defaultLockTimeout.multipliedBy(2)); } public Mutex lock(TenantAndApplicationId id) { return curator.lock(lockPath(id), defaultLockTimeout.multipliedBy(2)); } public Mutex lockForDeployment(ApplicationId id, ZoneId zone) { return curator.lock(lockPath(id, zone), deployLockTimeout); } public Mutex lock(ApplicationId id, JobType type) { return curator.lock(lockPath(id, type), defaultLockTimeout); } public Mutex lock(ApplicationId id, JobType type, Step step) throws TimeoutException { return tryLock(lockPath(id, type, step)); } public Mutex lockRotations() { return curator.lock(lockRoot.append("rotations"), defaultLockTimeout); } public Mutex lockConfidenceOverrides() { return curator.lock(lockRoot.append("confidenceOverrides"), defaultLockTimeout); } public Mutex lockMaintenanceJob(String jobName) { try { return tryLock(lockRoot.append("maintenanceJobLocks").append(jobName)); } catch (TimeoutException e) { throw new UncheckedTimeoutException(e); } } public Mutex lockProvisionState(String provisionStateId) { return curator.lock(lockPath(provisionStateId), Duration.ofSeconds(1)); } public Mutex lockOsVersions() { return curator.lock(lockRoot.append("osTargetVersion"), defaultLockTimeout); } public Mutex lockOsVersionStatus() { return curator.lock(lockRoot.append("osVersionStatus"), defaultLockTimeout); } public Mutex lockRoutingPolicies() { return curator.lock(lockRoot.append("routingPolicies"), defaultLockTimeout); } public Mutex lockAuditLog() { return curator.lock(lockRoot.append("auditLog"), defaultLockTimeout); } public Mutex lockNameServiceQueue() { return curator.lock(lockRoot.append("nameServiceQueue"), defaultLockTimeout); } public Mutex lockMeteringRefreshTime() throws TimeoutException { return tryLock(lockRoot.append("meteringRefreshTime")); } public Mutex lockArchiveBuckets(ZoneId zoneId) { return curator.lock(lockRoot.append("archiveBuckets").append(zoneId.value()), defaultLockTimeout); } public Mutex lockChangeRequests() { return curator.lock(lockRoot.append("changeRequests"), defaultLockTimeout); } public Mutex lockNotifications(TenantName tenantName) { return curator.lock(lockRoot.append("notifications").append(tenantName.value()), defaultLockTimeout); } public Mutex lockSupportAccess(DeploymentId deploymentId) { return curator.lock(lockRoot.append("supportAccess").append(deploymentId.dottedString()), defaultLockTimeout); } public Mutex lockDeploymentRetriggerQueue() { return curator.lock(lockRoot.append("deploymentRetriggerQueue"), defaultLockTimeout); } /** Try locking with a low timeout, meaning it is OK to fail lock acquisition. * * Useful for maintenance jobs, where there is no point in running the jobs back to back. */ private Mutex tryLock(Path path) throws TimeoutException { try { return curator.lock(path, tryLockTimeout); } catch (UncheckedTimeoutException e) { throw new TimeoutException(e.getMessage()); } } private <T> Optional<T> read(Path path, Function<byte[], T> mapper) { return curator.getData(path).filter(data -> data.length > 0).map(mapper); } private Optional<Slime> readSlime(Path path) { return read(path, SlimeUtils::jsonToSlime); } private static byte[] asJson(Slime slime) { try { return SlimeUtils.toJsonBytes(slime); } catch (IOException e) { throw new UncheckedIOException(e); } } public double readUpgradesPerMinute() { return read(upgradesPerMinutePath(), ByteBuffer::wrap).map(ByteBuffer::getDouble).orElse(0.125); } public void writeUpgradesPerMinute(double n) { curator.set(upgradesPerMinutePath(), ByteBuffer.allocate(Double.BYTES).putDouble(n).array()); } public VersionStatus readVersionStatus() { return readSlime(versionStatusPath()).map(versionStatusSerializer::fromSlime).orElseGet(VersionStatus::empty); } public void writeConfidenceOverrides(Map<Version, VespaVersion.Confidence> overrides) { curator.set(confidenceOverridesPath(), asJson(confidenceOverrideSerializer.toSlime(overrides))); } public Map<Version, VespaVersion.Confidence> readConfidenceOverrides() { return readSlime(confidenceOverridesPath()).map(confidenceOverrideSerializer::fromSlime) .orElseGet(Collections::emptyMap); } public void writeControllerVersion(HostName hostname, ControllerVersion version) { curator.set(controllerPath(hostname.value()), asJson(controllerVersionSerializer.toSlime(version))); } public ControllerVersion readControllerVersion(HostName hostname) { return readSlime(controllerPath(hostname.value())) .map(controllerVersionSerializer::fromSlime) .orElse(ControllerVersion.CURRENT); } public void writeOsVersionTargets(Set<OsVersionTarget> versions) { curator.set(osVersionTargetsPath(), asJson(osVersionTargetSerializer.toSlime(versions))); } public Set<OsVersionTarget> readOsVersionTargets() { return readSlime(osVersionTargetsPath()).map(osVersionTargetSerializer::fromSlime).orElseGet(Collections::emptySet); } public void writeOsVersionStatus(OsVersionStatus status) { curator.set(osVersionStatusPath(), asJson(osVersionStatusSerializer.toSlime(status))); } public OsVersionStatus readOsVersionStatus() { return readSlime(osVersionStatusPath()).map(osVersionStatusSerializer::fromSlime).orElse(OsVersionStatus.empty); } public void writeTenant(Tenant tenant) { curator.set(tenantPath(tenant.name()), asJson(tenantSerializer.toSlime(tenant))); } public Optional<Tenant> readTenant(TenantName name) { return readSlime(tenantPath(name)).map(tenantSerializer::tenantFrom); } public List<Tenant> readTenants() { return readTenantNames().stream() .map(this::readTenant) .flatMap(Optional::stream) .collect(collectingAndThen(Collectors.toList(), Collections::unmodifiableList)); } public List<TenantName> readTenantNames() { return curator.getChildren(tenantRoot).stream() .map(TenantName::from) .collect(Collectors.toList()); } public void removeTenant(TenantName name) { curator.delete(tenantPath(name)); } public void writeApplication(Application application) { curator.set(applicationPath(application.id()), asJson(applicationSerializer.toSlime(application))); } public Optional<Application> readApplication(TenantAndApplicationId application) { Path path = applicationPath(application); return curator.getStat(path) .map(stat -> cachedApplications.compute(path, (__, old) -> old != null && old.getFirst() == stat.getVersion() ? old : new Pair<>(stat.getVersion(), read(path, bytes -> applicationSerializer.fromSlime(bytes)).get())).getSecond()); } public List<Application> readApplications(boolean canFail) { return readApplications(ignored -> true, canFail); } public List<Application> readApplications(TenantName name) { return readApplications(application -> application.tenant().equals(name), false); } private List<Application> readApplications(Predicate<TenantAndApplicationId> applicationFilter, boolean canFail) { var applicationIds = readApplicationIds(); var applications = new ArrayList<Application>(applicationIds.size()); for (var id : applicationIds) { if (!applicationFilter.test(id)) continue; try { readApplication(id).ifPresent(applications::add); } catch (Exception e) { if (canFail) { log.log(Level.SEVERE, "Failed to read application '" + id + "', this must be fixed through " + "manual intervention", e); } else { throw e; } } } return Collections.unmodifiableList(applications); } public List<TenantAndApplicationId> readApplicationIds() { return curator.getChildren(applicationRoot).stream() .map(TenantAndApplicationId::fromSerialized) .sorted() .collect(toUnmodifiableList()); } public void removeApplication(TenantAndApplicationId id) { curator.delete(applicationPath(id)); } public void writeLastRun(Run run) { curator.set(lastRunPath(run.id().application(), run.id().type()), asJson(runSerializer.toSlime(run))); } public void writeHistoricRuns(ApplicationId id, JobType type, Iterable<Run> runs) { Path path = runsPath(id, type); curator.set(path, asJson(runSerializer.toSlime(runs))); } public Optional<Run> readLastRun(ApplicationId id, JobType type) { return readSlime(lastRunPath(id, type)).map(runSerializer::runFromSlime); } public NavigableMap<RunId, Run> readHistoricRuns(ApplicationId id, JobType type) { Path path = runsPath(id, type); return curator.getStat(path) .map(stat -> cachedHistoricRuns.compute(path, (__, old) -> old != null && old.getFirst() == stat.getVersion() ? old : new Pair<>(stat.getVersion(), runSerializer.runsFromSlime(readSlime(path).get()))).getSecond()) .orElseGet(Collections::emptyNavigableMap); } public void deleteRunData(ApplicationId id, JobType type) { curator.delete(runsPath(id, type)); curator.delete(lastRunPath(id, type)); } public void deleteRunData(ApplicationId id) { curator.delete(jobRoot.append(id.serializedForm())); } public List<ApplicationId> applicationsWithJobs() { return curator.getChildren(jobRoot).stream() .map(ApplicationId::fromSerializedForm) .collect(Collectors.toList()); } public Optional<byte[]> readLog(ApplicationId id, JobType type, long chunkId) { return curator.getData(logPath(id, type, chunkId)); } public void writeLog(ApplicationId id, JobType type, long chunkId, byte[] log) { curator.set(logPath(id, type, chunkId), log); } public void deleteLog(ApplicationId id, JobType type) { curator.delete(runsPath(id, type).append("logs")); } public Optional<Long> readLastLogEntryId(ApplicationId id, JobType type) { return curator.getData(lastLogPath(id, type)) .map(String::new).map(Long::parseLong); } public void writeLastLogEntryId(ApplicationId id, JobType type, long lastId) { curator.set(lastLogPath(id, type), Long.toString(lastId).getBytes()); } public LongStream getLogChunkIds(ApplicationId id, JobType type) { return curator.getChildren(runsPath(id, type).append("logs")).stream() .mapToLong(Long::parseLong) .sorted(); } public AuditLog readAuditLog() { return readSlime(auditLogPath()).map(auditLogSerializer::fromSlime) .orElse(AuditLog.empty); } public void writeAuditLog(AuditLog log) { curator.set(auditLogPath(), asJson(auditLogSerializer.toSlime(log))); } public NameServiceQueue readNameServiceQueue() { return readSlime(nameServiceQueuePath()).map(nameServiceQueueSerializer::fromSlime) .orElse(NameServiceQueue.EMPTY); } public void writeNameServiceQueue(NameServiceQueue queue) { curator.set(nameServiceQueuePath(), asJson(nameServiceQueueSerializer.toSlime(queue))); } @SuppressWarnings("unused") public Optional<byte[]> readProvisionState(String provisionId) { return curator.getData(provisionStatePath(provisionId)); } @SuppressWarnings("unused") public void writeProvisionState(String provisionId, byte[] data) { curator.set(provisionStatePath(provisionId), data); } @SuppressWarnings("unused") public List<String> readProvisionStateIds() { return curator.getChildren(provisionStatePath()); } public void writeRoutingPolicies(ApplicationId application, List<RoutingPolicy> policies) { for (var policy : policies) { if (!policy.id().owner().equals(application)) { throw new IllegalArgumentException(policy.id() + " does not belong to the application being written: " + application.toShortString()); } } curator.set(routingPolicyPath(application), asJson(routingPolicySerializer.toSlime(policies))); } public Map<ApplicationId, List<RoutingPolicy>> readRoutingPolicies() { return readRoutingPolicies((instance) -> true); } public Map<ApplicationId, List<RoutingPolicy>> readRoutingPolicies(Predicate<ApplicationId> filter) { return curator.getChildren(routingPoliciesRoot).stream() .map(ApplicationId::fromSerializedForm) .filter(filter) .collect(Collectors.toUnmodifiableMap(Function.identity(), this::readRoutingPolicies)); } public List<RoutingPolicy> readRoutingPolicies(ApplicationId application) { return readSlime(routingPolicyPath(application)).map(slime -> routingPolicySerializer.fromSlime(application, slime)) .orElseGet(List::of); } public void writeZoneRoutingPolicy(ZoneRoutingPolicy policy) { curator.set(zoneRoutingPolicyPath(policy.zone()), asJson(zoneRoutingPolicySerializer.toSlime(policy))); } public ZoneRoutingPolicy readZoneRoutingPolicy(ZoneId zone) { return readSlime(zoneRoutingPolicyPath(zone)).map(data -> zoneRoutingPolicySerializer.fromSlime(zone, data)) .orElseGet(() -> new ZoneRoutingPolicy(zone, RoutingStatus.DEFAULT)); } public void writeEndpointCertificateMetadata(ApplicationId applicationId, EndpointCertificateMetadata endpointCertificateMetadata) { curator.set(endpointCertificatePath(applicationId), asJson(EndpointCertificateMetadataSerializer.toSlime(endpointCertificateMetadata))); } public void deleteEndpointCertificateMetadata(ApplicationId applicationId) { curator.delete(endpointCertificatePath(applicationId)); } public Optional<EndpointCertificateMetadata> readEndpointCertificateMetadata(ApplicationId applicationId) { return curator.getData(endpointCertificatePath(applicationId)).map(String::new).map(EndpointCertificateMetadataSerializer::fromJsonString); } public Map<ApplicationId, EndpointCertificateMetadata> readAllEndpointCertificateMetadata() { Map<ApplicationId, EndpointCertificateMetadata> allEndpointCertificateMetadata = new HashMap<>(); for (String appIdString : curator.getChildren(endpointCertificateRoot)) { ApplicationId applicationId = ApplicationId.fromSerializedForm(appIdString); Optional<EndpointCertificateMetadata> endpointCertificateMetadata = readEndpointCertificateMetadata(applicationId); allEndpointCertificateMetadata.put(applicationId, endpointCertificateMetadata.orElseThrow()); } return allEndpointCertificateMetadata; } public void writeMeteringRefreshTime(long timestamp) { curator.set(meteringRefreshPath(), Long.toString(timestamp).getBytes()); } public long readMeteringRefreshTime() { return curator.getData(meteringRefreshPath()) .map(String::new).map(Long::parseLong) .orElse(0L); } public Set<ArchiveBucket> readArchiveBuckets(ZoneId zoneId) { return curator.getData(archiveBucketsPath(zoneId)).map(String::new).map(ArchiveBucketsSerializer::fromJsonString) .orElseGet(Set::of); } public void writeArchiveBuckets(ZoneId zoneid, Set<ArchiveBucket> archiveBuckets) { curator.set(archiveBucketsPath(zoneid), asJson(ArchiveBucketsSerializer.toSlime(archiveBuckets))); } public Optional<VespaChangeRequest> readChangeRequest(String changeRequestId) { return readSlime(changeRequestPath(changeRequestId)).map(ChangeRequestSerializer::fromSlime); } public List<VespaChangeRequest> readChangeRequests() { return curator.getChildren(changeRequestsRoot) .stream() .map(this::readChangeRequest) .flatMap(Optional::stream) .collect(Collectors.toList()); } public void writeChangeRequest(VespaChangeRequest changeRequest) { curator.set(changeRequestPath(changeRequest.getId()), asJson(ChangeRequestSerializer.toSlime(changeRequest))); } public void deleteChangeRequest(VespaChangeRequest changeRequest) { curator.delete(changeRequestPath(changeRequest.getId())); } public List<Notification> readNotifications(TenantName tenantName) { return readSlime(notificationsPath(tenantName)) .map(slime -> notificationsSerializer.fromSlime(tenantName, slime)).orElseGet(List::of); } public List<TenantName> listTenantsWithNotifications() { return curator.getChildren(notificationsRoot).stream() .map(TenantName::from) .collect(Collectors.toUnmodifiableList()); } public void writeNotifications(TenantName tenantName, List<Notification> notifications) { curator.set(notificationsPath(tenantName), asJson(notificationsSerializer.toSlime(notifications))); } public void deleteNotifications(TenantName tenantName) { curator.delete(notificationsPath(tenantName)); } public SupportAccess readSupportAccess(DeploymentId deploymentId) { return readSlime(supportAccessPath(deploymentId)).map(SupportAccessSerializer::fromSlime).orElse(SupportAccess.DISALLOWED_NO_HISTORY); } /** Take lock before reading before writing */ public void writeSupportAccess(DeploymentId deploymentId, SupportAccess supportAccess) { curator.set(supportAccessPath(deploymentId), asJson(SupportAccessSerializer.toSlime(supportAccess))); } public List<RetriggerEntry> readRetriggerEntries() { return readSlime(deploymentRetriggerPath()).map(retriggerEntrySerializer::fromSlime).orElseGet(List::of); } public void writeRetriggerEntries(List<RetriggerEntry> retriggerEntries) { curator.set(deploymentRetriggerPath(), asJson(retriggerEntrySerializer.toSlime(retriggerEntries))); } private Path lockPath(TenantName tenant) { return lockRoot .append(tenant.value()); } private Path lockPath(TenantAndApplicationId application) { return lockRoot.append(application.tenant().value() + ":" + application.application().value()); } private Path lockPath(ApplicationId instance, ZoneId zone) { return lockRoot.append(instance.serializedForm() + ":" + zone.environment().value() + ":" + zone.region().value()); } private Path lockPath(ApplicationId instance, JobType type) { return lockRoot.append(instance.serializedForm() + ":" + type.jobName()); } private Path lockPath(ApplicationId instance, JobType type, Step step) { return lockRoot.append(instance.serializedForm() + ":" + type.jobName() + ":" + step.name()); } private Path lockPath(String provisionId) { return lockRoot .append(provisionStatePath()) .append(provisionId); } private static Path upgradesPerMinutePath() { return root.append("upgrader").append("upgradesPerMinute"); } private static Path confidenceOverridesPath() { return root.append("upgrader").append("confidenceOverrides"); } private static Path osVersionTargetsPath() { return root.append("osUpgrader").append("targetVersion"); } private static Path osVersionStatusPath() { return root.append("osVersionStatus"); } private static Path versionStatusPath() { return root.append("versionStatus"); } private static Path routingPolicyPath(ApplicationId application) { return routingPoliciesRoot.append(application.serializedForm()); } private static Path zoneRoutingPolicyPath(ZoneId zone) { return zoneRoutingPoliciesRoot.append(zone.value()); } private static Path nameServiceQueuePath() { return root.append("nameServiceQueue"); } private static Path auditLogPath() { return root.append("auditLog"); } private static Path provisionStatePath() { return root.append("provisioning").append("states"); } private static Path provisionStatePath(String provisionId) { return provisionStatePath().append(provisionId); } private static Path tenantPath(TenantName name) { return tenantRoot.append(name.value()); } private static Path applicationPath(TenantAndApplicationId id) { return applicationRoot.append(id.serialized()); } private static Path runsPath(ApplicationId id, JobType type) { return jobRoot.append(id.serializedForm()).append(type.jobName()); } private static Path lastRunPath(ApplicationId id, JobType type) { return runsPath(id, type).append("last"); } private static Path logPath(ApplicationId id, JobType type, long first) { return runsPath(id, type).append("logs").append(Long.toString(first)); } private static Path lastLogPath(ApplicationId id, JobType type) { return runsPath(id, type).append("logs"); } private static Path controllerPath(String hostname) { return controllerRoot.append(hostname); } private static Path endpointCertificatePath(ApplicationId id) { return endpointCertificateRoot.append(id.serializedForm()); } private static Path meteringRefreshPath() { return root.append("meteringRefreshTime"); } private static Path archiveBucketsPath(ZoneId zoneId) { return archiveBucketsRoot.append(zoneId.value()); } private static Path changeRequestPath(String id) { return changeRequestsRoot.append(id); } private static Path notificationsPath(TenantName tenantName) { return notificationsRoot.append(tenantName.value()); } private static Path supportAccessPath(DeploymentId deploymentId) { return supportAccessRoot.append(deploymentId.dottedString()); } private static Path deploymentRetriggerPath() { return root.append("deploymentRetriggerQueue"); } }
Good point, I didn't notice this. I'll fix it in a PR soon.
private void validateMajorVersion(TenantAndApplicationId id, Submission submission) { submission.applicationPackage().deploymentSpec().majorVersion().ifPresent(explicitMajor -> { if (explicitMajor < 8) controller.notificationsDb().setNotification(NotificationSource.from(id), Type.applicationPackage, Notification.Level.warning, "Vespa 7 will soon be end of life, upgrade to Vespa 8 now:" + "https: else controller.notificationsDb().removeNotification(NotificationSource.from(id), Type.applicationPackage); }); }
controller.notificationsDb().setNotification(NotificationSource.from(id),
private void validateMajorVersion(TenantAndApplicationId id, Submission submission) { submission.applicationPackage().deploymentSpec().majorVersion().ifPresent(explicitMajor -> { if (explicitMajor < 8) controller.notificationsDb().setNotification(NotificationSource.from(id), Type.applicationPackage, Notification.Level.warning, "Vespa 7 will soon be end of life, upgrade to Vespa 8 now:" + "https: else controller.notificationsDb().removeNotification(NotificationSource.from(id), Type.applicationPackage); }); }
class JobController { public static final Duration maxHistoryAge = Duration.ofDays(60); private static final Logger log = Logger.getLogger(JobController.class.getName()); private final int historyLength; private final Controller controller; private final CuratorDb curator; private final BufferedLogStore logs; private final TesterCloud cloud; private final JobMetrics metric; private final AtomicReference<Consumer<Run>> runner = new AtomicReference<>(__ -> { }); public JobController(Controller controller) { this.historyLength = controller.system().isCd() ? 256 : 64; this.controller = controller; this.curator = controller.curator(); this.logs = new BufferedLogStore(curator, controller.serviceRegistry().runDataStore()); this.cloud = controller.serviceRegistry().testerCloud(); this.metric = new JobMetrics(controller.metric(), controller::system); } public TesterCloud cloud() { return cloud; } public int historyLength() { return historyLength; } public void setRunner(Consumer<Run> runner) { this.runner.set(runner); } /** Rewrite all job data with the newest format. */ public void updateStorage() { for (ApplicationId id : instances()) for (JobType type : jobs(id)) { locked(id, type, runs -> { curator.readLastRun(id, type).ifPresent(curator::writeLastRun); }); } } /** Returns all entries currently logged for the given run. */ public Optional<RunLog> details(RunId id) { return details(id, -1); } /** Returns the logged entries for the given run, which are after the given id threshold. */ public Optional<RunLog> details(RunId id, long after) { try (Mutex __ = curator.lock(id.application(), id.type())) { Run run = runs(id.application(), id.type()).get(id); if (run == null) return Optional.empty(); return active(id).isPresent() ? Optional.of(logs.readActive(id.application(), id.type(), after)) : logs.readFinished(id, after); } } /** Stores the given log entries for the given run and step. */ public void log(RunId id, Step step, List<LogEntry> entries) { locked(id, __ -> { logs.append(id.application(), id.type(), step, entries, true); return __; }); } /** Stores the given log messages for the given run and step. */ public void log(RunId id, Step step, Level level, List<String> messages) { log(id, step, messages.stream() .map(message -> new LogEntry(0, controller.clock().instant(), LogEntry.typeOf(level), message)) .collect(toList())); } /** Stores the given log message for the given run and step. */ public void log(RunId id, Step step, Level level, String message) { log(id, step, level, Collections.singletonList(message)); } /** Fetches any new Vespa log entries, and records the timestamp of the last of these, for continuation. */ public void updateVespaLog(RunId id) { locked(id, run -> { if ( ! run.hasStep(copyVespaLogs)) return run; storeVespaLogs(id); ZoneId zone = id.type().zone(); Optional<Deployment> deployment = Optional.ofNullable(controller.applications().requireInstance(id.application()) .deployments().get(zone)); if (deployment.isEmpty() || deployment.get().at().isBefore(run.start())) return run; List<LogEntry> log; Instant deployedAt; Instant from; if ( ! run.id().type().isProduction()) { deployedAt = run.stepInfo(installInitialReal).or(() -> run.stepInfo(installReal)).flatMap(StepInfo::startTime).orElseThrow(); from = run.lastVespaLogTimestamp().isAfter(run.start()) ? run.lastVespaLogTimestamp() : deployedAt.minusSeconds(10); log = LogEntry.parseVespaLog(controller.serviceRegistry().configServer() .getLogs(new DeploymentId(id.application(), zone), Map.of("from", Long.toString(from.toEpochMilli()))), from); } else log = List.of(); if (id.type().isTest()) { deployedAt = run.stepInfo(installTester).flatMap(StepInfo::startTime).orElseThrow(); from = run.lastVespaLogTimestamp().isAfter(run.start()) ? run.lastVespaLogTimestamp() : deployedAt.minusSeconds(10); List<LogEntry> testerLog = LogEntry.parseVespaLog(controller.serviceRegistry().configServer() .getLogs(new DeploymentId(id.tester().id(), zone), Map.of("from", Long.toString(from.toEpochMilli()))), from); Instant justNow = controller.clock().instant().minusSeconds(2); log = Stream.concat(log.stream(), testerLog.stream()) .filter(entry -> entry.at().isBefore(justNow)) .sorted(comparing(LogEntry::at)) .collect(toUnmodifiableList()); } if (log.isEmpty()) return run; logs.append(id.application(), id.type(), Step.copyVespaLogs, log, false); return run.with(log.get(log.size() - 1).at()); }); } public InputStream getVespaLogs(RunId id, long fromMillis, boolean tester) { Run run = run(id); return run.stepStatus(copyVespaLogs).map(succeeded::equals).orElse(false) ? controller.serviceRegistry().runDataStore().getLogs(id, tester) : getVespaLogsFromLogserver(run, fromMillis, tester).orElse(InputStream.nullInputStream()); } public static Optional<Instant> deploymentCompletedAt(Run run, boolean tester) { return (tester ? run.stepInfo(installTester) : run.stepInfo(installInitialReal).or(() -> run.stepInfo(installReal))) .flatMap(StepInfo::startTime).map(start -> start.minusSeconds(10)); } public void storeVespaLogs(RunId id) { Run run = run(id); if ( ! id.type().isProduction()) { getVespaLogsFromLogserver(run, 0, false).ifPresent(logs -> { try (logs) { controller.serviceRegistry().runDataStore().putLogs(id, false, logs); } catch (IOException e) { throw new UncheckedIOException(e); } }); } if (id.type().isTest()) { getVespaLogsFromLogserver(run, 0, true).ifPresent(logs -> { try (logs) { controller.serviceRegistry().runDataStore().putLogs(id, true, logs); } catch(IOException e){ throw new UncheckedIOException(e); } }); } } private Optional<InputStream> getVespaLogsFromLogserver(Run run, long fromMillis, boolean tester) { return deploymentCompletedAt(run, tester).map(at -> controller.serviceRegistry().configServer().getLogs(new DeploymentId(tester ? run.id().tester().id() : run.id().application(), run.id().type().zone()), Map.of("from", Long.toString(Math.max(fromMillis, at.toEpochMilli())), "to", Long.toString(run.end().orElse(controller.clock().instant()).toEpochMilli())))); } /** Fetches any new test log entries, and records the id of the last of these, for continuation. */ public void updateTestLog(RunId id) { locked(id, run -> { Optional<Step> step = Stream.of(endStagingSetup, endTests) .filter(run.readySteps()::contains) .findAny(); if (step.isEmpty()) return run; List<LogEntry> entries = cloud.getLog(new DeploymentId(id.tester().id(), id.type().zone()), run.lastTestLogEntry()); if (entries.isEmpty()) return run; logs.append(id.application(), id.type(), step.get(), entries, false); return run.with(entries.stream().mapToLong(LogEntry::id).max().getAsLong()); }); } public void updateTestReport(RunId id) { locked(id, run -> { Optional<TestReport> report = cloud.getTestReport(new DeploymentId(id.tester().id(), id.type().zone())); if (report.isEmpty()) { return run; } logs.writeTestReport(id, report.get()); return run; }); } public Optional<String> getTestReports(RunId id) { return logs.readTestReports(id); } /** Stores the given certificate as the tester certificate for this run, or throws if it's already set. */ public void storeTesterCertificate(RunId id, X509Certificate testerCertificate) { locked(id, run -> run.with(testerCertificate)); } /** Returns a list of all instances of applications which have registered. */ public List<ApplicationId> instances() { return controller.applications().readable().stream() .flatMap(application -> application.instances().values().stream()) .map(Instance::id) .collect(toUnmodifiableList()); } /** Returns all job types which have been run for the given application. */ private List<JobType> jobs(ApplicationId id) { return JobType.allIn(controller.zoneRegistry()).stream() .filter(type -> last(id, type).isPresent()) .collect(toUnmodifiableList()); } /** Returns an immutable map of all known runs for the given application and job type. */ public NavigableMap<RunId, Run> runs(JobId id) { return runs(id.application(), id.type()); } /** Lists the start time of non-redeployment runs of the given job, in order of increasing age. */ public List<Instant> jobStarts(JobId id) { return runs(id).descendingMap().values().stream() .filter(run -> ! run.isRedeployment()) .map(Run::start) .collect(toUnmodifiableList()); } /** Returns when given deployment last started deploying, falling back to time of deployment if it cannot be determined from job runs */ public Instant lastDeploymentStart(ApplicationId instanceId, Deployment deployment) { return jobStarts(new JobId(instanceId, JobType.deploymentTo(deployment.zone()))).stream() .findFirst() .orElseGet(deployment::at); } /** Returns an immutable map of all known runs for the given application and job type. */ public NavigableMap<RunId, Run> runs(ApplicationId id, JobType type) { ImmutableSortedMap.Builder<RunId, Run> runs = ImmutableSortedMap.orderedBy(Comparator.comparing(RunId::number)); Optional<Run> last = last(id, type); curator.readHistoricRuns(id, type).forEach((runId, run) -> { if (last.isEmpty() || ! runId.equals(last.get().id())) runs.put(runId, run); }); last.ifPresent(run -> runs.put(run.id(), run)); return runs.build(); } /** Returns the run with the given id, or throws if no such run exists. */ public Run run(RunId id) { return runs(id.application(), id.type()).values().stream() .filter(run -> run.id().equals(id)) .findAny() .orElseThrow(() -> new NoSuchElementException("no run with id '" + id + "' exists")); } /** Returns the last run of the given type, for the given application, if one has been run. */ public Optional<Run> last(JobId job) { return curator.readLastRun(job.application(), job.type()); } /** Returns the last run of the given type, for the given application, if one has been run. */ public Optional<Run> last(ApplicationId id, JobType type) { return curator.readLastRun(id, type); } /** Returns the last completed of the given job. */ public Optional<Run> lastCompleted(JobId id) { return JobStatus.lastCompleted(runs(id)); } /** Returns the first failing of the given job. */ public Optional<Run> firstFailing(JobId id) { return JobStatus.firstFailing(runs(id)); } /** Returns the last success of the given job. */ public Optional<Run> lastSuccess(JobId id) { return JobStatus.lastSuccess(runs(id)); } /** Returns the run with the given id, provided it is still active. */ public Optional<Run> active(RunId id) { return last(id.application(), id.type()) .filter(run -> ! run.hasEnded()) .filter(run -> run.id().equals(id)); } /** Returns a list of all active runs. */ public List<Run> active() { return controller.applications().idList().stream() .flatMap(id -> active(id).stream()) .toList(); } /** Returns a list of all active runs for the given application. */ public List<Run> active(TenantAndApplicationId id) { return controller.applications().requireApplication(id).instances().keySet().stream() .flatMap(name -> JobType.allIn(controller.zoneRegistry()).stream() .map(type -> last(id.instance(name), type)) .flatMap(Optional::stream) .filter(run -> ! run.hasEnded())) .toList(); } /** Returns a list of all active runs for the given instance. */ public List<Run> active(ApplicationId id) { return JobType.allIn(controller.zoneRegistry()).stream() .map(type -> last(id, type)) .flatMap(Optional::stream) .filter(run -> !run.hasEnded()) .toList(); } /** Returns the job status of the given job, possibly empty. */ public JobStatus jobStatus(JobId id) { return new JobStatus(id, runs(id)); } /** Returns the deployment status of the given application. */ public DeploymentStatus deploymentStatus(Application application) { VersionStatus versionStatus = controller.readVersionStatus(); return deploymentStatus(application, versionStatus, controller.systemVersion(versionStatus)); } private DeploymentStatus deploymentStatus(Application application, VersionStatus versionStatus, Version systemVersion) { return new DeploymentStatus(application, this::jobStatus, controller.zoneRegistry(), versionStatus, systemVersion, instance -> controller.applications().versionCompatibility(application.id().instance(instance)), controller.clock().instant()); } /** Adds deployment status to each of the given applications. */ public DeploymentStatusList deploymentStatuses(ApplicationList applications, VersionStatus versionStatus) { Version systemVersion = controller.systemVersion(versionStatus); return DeploymentStatusList.from(applications.asList().stream() .map(application -> deploymentStatus(application, versionStatus, systemVersion)) .toList()); } /** Adds deployment status to each of the given applications. Calling this will do an implicit read of the controller's version status */ public DeploymentStatusList deploymentStatuses(ApplicationList applications) { VersionStatus versionStatus = controller.readVersionStatus(); return deploymentStatuses(applications, versionStatus); } /** Changes the status of the given step, for the given run, provided it is still active. */ public void update(RunId id, RunStatus status, LockedStep step) { locked(id, run -> run.with(status, step)); } /** * Changes the status of the given run to inactive, and stores it as a historic run. * Throws TimeoutException if some step in this job is still being run. */ public void finish(RunId id) throws TimeoutException { Deque<Mutex> locks = new ArrayDeque<>(); try { Run unlockedRun = run(id); locks.push(curator.lock(id.application(), id.type(), report)); for (Step step : report.allPrerequisites(unlockedRun.steps().keySet())) locks.push(curator.lock(id.application(), id.type(), step)); locked(id, run -> { if (run.status() == reset) { for (Step step : run.steps().keySet()) log(id, step, INFO, List.of(" return run.reset(); } if (run.status() == running && run.stepStatuses().values().stream().anyMatch(not(succeeded::equals))) return run; Run finishedRun = run.finished(controller.clock().instant()); locked(id.application(), id.type(), runs -> { runs.put(run.id(), finishedRun); long last = id.number(); long successes = runs.values().stream().filter(Run::hasSucceeded).count(); var oldEntries = runs.entrySet().iterator(); for (var old = oldEntries.next(); old.getKey().number() <= last - historyLength || old.getValue().start().isBefore(controller.clock().instant().minus(maxHistoryAge)); old = oldEntries.next()) { if ( successes == 1 && old.getValue().hasSucceeded() && ! old.getValue().start().isBefore(controller.clock().instant().minus(maxHistoryAge))) { oldEntries.next(); continue; } logs.delete(old.getKey()); oldEntries.remove(); } }); logs.flush(id); metric.jobFinished(run.id().job(), finishedRun.status()); pruneRevisions(unlockedRun); return finishedRun; }); } finally { for (Mutex lock : locks) { try { lock.close(); } catch (Throwable t) { log.log(WARNING, "Failed to close the lock " + lock + ": the lock may or may not " + "have been released in ZooKeeper, and if not this controller " + "must be restarted to release the lock", t); } } } } /** Marks the given run as aborted; no further normal steps will run, but run-always steps will try to succeed. */ public void abort(RunId id, String reason) { locked(id, run -> { run.stepStatuses().entrySet().stream() .filter(entry -> entry.getValue() == unfinished) .forEach(entry -> log(id, entry.getKey(), INFO, "Aborting run: " + reason)); return run.aborted(); }); } /** Accepts and stores a new application package and test jar pair under a generated application version key. */ public ApplicationVersion submit(TenantAndApplicationId id, Submission submission, long projectId) { ApplicationController applications = controller.applications(); AtomicReference<ApplicationVersion> version = new AtomicReference<>(); applications.lockApplicationOrThrow(id, application -> { Optional<ApplicationVersion> previousVersion = application.get().revisions().last(); Optional<ApplicationPackage> previousPackage = previousVersion.flatMap(previous -> applications.applicationStore().find(id.tenant(), id.application(), previous.buildNumber().getAsLong())) .map(ApplicationPackage::new); long previousBuild = previousVersion.map(latestVersion -> latestVersion.buildNumber().getAsLong()).orElse(0L); version.set(submission.toApplicationVersion(1 + previousBuild)); byte[] diff = previousPackage.map(previous -> ApplicationPackageDiff.diff(previous, submission.applicationPackage())) .orElseGet(() -> ApplicationPackageDiff.diffAgainstEmpty(submission.applicationPackage())); applications.applicationStore().put(id.tenant(), id.application(), version.get().id(), submission.applicationPackage().zippedContent(), submission.testPackage(), diff); applications.applicationStore().putMeta(id.tenant(), id.application(), controller.clock().instant(), submission.applicationPackage().metaDataZip()); application = application.withProjectId(projectId == -1 ? OptionalLong.empty() : OptionalLong.of(projectId)); application = application.withRevisions(revisions -> revisions.with(version.get())); application = withPrunedPackages(application, version.get().id()); validate(id, submission); applications.storeWithUpdatedConfig(application, submission.applicationPackage()); if (application.get().projectId().isPresent()) applications.deploymentTrigger().triggerNewRevision(id); }); return version.get(); } private void validate(TenantAndApplicationId id, Submission submission) { validateTests(id, submission); validateParentVersion(id, submission); validateMajorVersion(id, submission); } private void validateTests(TenantAndApplicationId id, Submission submission) { TestSummary testSummary = TestPackage.validateTests(submission.applicationPackage().deploymentSpec(), submission.testPackage()); if (testSummary.problems().isEmpty()) controller.notificationsDb().removeNotification(NotificationSource.from(id), Type.testPackage); else controller.notificationsDb().setNotification(NotificationSource.from(id), Type.testPackage, Notification.Level.warning, testSummary.problems()); } private void validateParentVersion(TenantAndApplicationId id, Submission submission) { submission.applicationPackage().parentVersion().ifPresent(parent -> { if (parent.getMajor() < controller.readSystemVersion().getMajor()) controller.notificationsDb().setNotification(NotificationSource.from(id), Type.submission, Notification.Level.warning, "Parent version used to compile the application is on a " + "lower major version than the current Vespa Cloud version"); else controller.notificationsDb().removeNotification(NotificationSource.from(id), Type.submission); }); } private LockedApplication withPrunedPackages(LockedApplication application, RevisionId latest){ TenantAndApplicationId id = application.get().id(); Application wrapped = application.get(); RevisionId oldestDeployed = application.get().oldestDeployedRevision() .or(() -> wrapped.instances().values().stream() .flatMap(instance -> instance.change().revision().stream()) .min(naturalOrder())) .orElse(latest); controller.applications().applicationStore().prune(id.tenant(), id.application(), oldestDeployed); for (ApplicationVersion version : application.get().revisions().withPackage()) if (version.id().compareTo(oldestDeployed) < 0) application = application.withRevisions(revisions -> revisions.with(version.withoutPackage())); return application; } /** Forget revisions no longer present in any relevant job history. */ private void pruneRevisions(Run run) { TenantAndApplicationId applicationId = TenantAndApplicationId.from(run.id().application()); boolean isProduction = run.versions().targetRevision().isProduction(); (isProduction ? deploymentStatus(controller.applications().requireApplication(applicationId)).jobs().asList().stream() : Stream.of(jobStatus(run.id().job()))) .flatMap(jobs -> jobs.runs().values().stream()) .map(r -> r.versions().targetRevision()) .filter(id -> id.isProduction() == isProduction) .min(naturalOrder()) .ifPresent(oldestRevision -> { controller.applications().lockApplicationOrThrow(applicationId, application -> { if (isProduction) { controller.applications().applicationStore().pruneDiffs(run.id().application().tenant(), run.id().application().application(), oldestRevision.number()); controller.applications().store(application.withRevisions(revisions -> revisions.withoutOlderThan(oldestRevision))); } else { controller.applications().applicationStore().pruneDevDiffs(new DeploymentId(run.id().application(), run.id().job().type().zone()), oldestRevision.number()); controller.applications().store(application.withRevisions(revisions -> revisions.withoutOlderThan(oldestRevision, run.id().job()))); } }); }); } /** Orders a run of the given type, or throws an IllegalStateException if that job type is already running. */ public void start(ApplicationId id, JobType type, Versions versions, boolean isRedeployment, Optional<String> reason) { start(id, type, versions, isRedeployment, JobProfile.of(type), reason); } /** Orders a run of the given type, or throws an IllegalStateException if that job type is already running. */ public void start(ApplicationId id, JobType type, Versions versions, boolean isRedeployment, JobProfile profile, Optional<String> reason) { ApplicationVersion revision = controller.applications().requireApplication(TenantAndApplicationId.from(id)).revisions().get(versions.targetRevision()); if (revision.compileVersion() .map(version -> controller.applications().versionCompatibility(id).refuse(versions.targetPlatform(), version)) .orElse(false)) throw new IllegalArgumentException("Will not start " + type + " for " + id + " with incompatible platform version (" + versions.targetPlatform() + ") " + "and compile versions (" + revision.compileVersion().get() + ")"); locked(id, type, __ -> { Optional<Run> last = last(id, type); if (last.flatMap(run -> active(run.id())).isPresent()) throw new IllegalArgumentException("Cannot start " + type + " for " + id + "; it is already running!"); RunId newId = new RunId(id, type, last.map(run -> run.id().number()).orElse(0L) + 1); curator.writeLastRun(Run.initial(newId, versions, isRedeployment, controller.clock().instant(), profile, reason)); metric.jobStarted(newId.job()); }); } /** Stores the given package and starts a deployment of it, after aborting any such ongoing deployment. */ public void deploy(ApplicationId id, JobType type, Optional<Version> platform, ApplicationPackage applicationPackage) { deploy(id, type, platform, applicationPackage, false); } /** Stores the given package and starts a deployment of it, after aborting any such ongoing deployment.*/ public void deploy(ApplicationId id, JobType type, Optional<Version> platform, ApplicationPackage applicationPackage, boolean dryRun) { if ( ! controller.zoneRegistry().hasZone(type.zone())) throw new IllegalArgumentException(type.zone() + " is not present in this system"); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { if ( ! application.get().instances().containsKey(id.instance())) application = controller.applications().withNewInstance(application, id); controller.applications().store(application); }); DeploymentId deploymentId = new DeploymentId(id, type.zone()); Optional<Run> lastRun = last(id, type); lastRun.filter(run -> ! run.hasEnded()).ifPresent(run -> abortAndWait(run.id(), Duration.ofMinutes(2))); long build = 1 + lastRun.map(run -> run.versions().targetRevision().number()).orElse(0L); RevisionId revisionId = RevisionId.forDevelopment(build, new JobId(id, type)); ApplicationVersion version = ApplicationVersion.forDevelopment(revisionId, applicationPackage.compileVersion()); byte[] diff = getDiff(applicationPackage, deploymentId, lastRun); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { controller.applications().applicationStore().putDev(deploymentId, version.id(), applicationPackage.zippedContent(), diff); Version targetPlatform = platform.orElseGet(() -> findTargetPlatform(applicationPackage, deploymentId, application.get().get(id.instance()))); controller.applications().store(application.withRevisions(revisions -> revisions.with(version))); start(id, type, new Versions(targetPlatform, version.id(), lastRun.map(run -> run.versions().targetPlatform()), lastRun.map(run -> run.versions().targetRevision())), false, dryRun ? JobProfile.developmentDryRun : JobProfile.development, Optional.empty()); }); locked(id, type, __ -> { runner.get().accept(last(id, type).get()); }); } /* Application package diff against previous version, or against empty version if previous does not exist or is invalid */ private byte[] getDiff(ApplicationPackage applicationPackage, DeploymentId deploymentId, Optional<Run> lastRun) { return lastRun.map(run -> run.versions().targetRevision()) .map(prevVersion -> { ApplicationPackage previous; try { previous = new ApplicationPackage(controller.applications().applicationStore().get(deploymentId, prevVersion)); } catch (IllegalArgumentException e) { return ApplicationPackageDiff.diffAgainstEmpty(applicationPackage); } return ApplicationPackageDiff.diff(previous, applicationPackage); }) .orElseGet(() -> ApplicationPackageDiff.diffAgainstEmpty(applicationPackage)); } private Version findTargetPlatform(ApplicationPackage applicationPackage, DeploymentId id, Optional<Instance> instance) { List<Version> versions = controller.readVersionStatus().deployableVersions().stream() .map(VespaVersion::versionNumber) .collect(toList()); instance.map(Instance::deployments) .map(deployments -> deployments.get(id.zoneId())) .map(Deployment::version) .ifPresent(versions::add); if (versions.isEmpty()) throw new IllegalStateException("no deployable platform version found in the system"); VersionCompatibility compatibility = controller.applications().versionCompatibility(id.applicationId()); List<Version> compatibleVersions = new ArrayList<>(); for (Version target : reversed(versions)) if (applicationPackage.compileVersion().isEmpty() || compatibility.accept(target, applicationPackage.compileVersion().get())) compatibleVersions.add(target); if (compatibleVersions.isEmpty()) throw new IllegalArgumentException("no platforms are compatible with compile version " + applicationPackage.compileVersion().get()); Optional<Integer> major = applicationPackage.deploymentSpec().majorVersion(); List<Version> versionOnRightMajor = new ArrayList<>(); for (Version target : reversed(versions)) if (major.isEmpty() || major.get() == target.getMajor()) versionOnRightMajor.add(target); if (versionOnRightMajor.isEmpty()) throw new IllegalArgumentException("no platforms were found for major version " + major.get() + " specified in deployment.xml"); for (Version target : compatibleVersions) if (versionOnRightMajor.contains(target)) return target; throw new IllegalArgumentException("no platforms on major version " + major.get() + " specified in deployment.xml " + "are compatible with compile version " + applicationPackage.compileVersion().get()); } /** Aborts a run and waits for it complete. */ private void abortAndWait(RunId id, Duration timeout) { abort(id, "replaced by new deployment"); runner.get().accept(last(id.application(), id.type()).get()); Instant doom = controller.clock().instant().plus(timeout); Duration sleep = Duration.ofMillis(100); while ( ! last(id.application(), id.type()).get().hasEnded()) { if (controller.clock().instant().plus(sleep).isAfter(doom)) throw new UncheckedTimeoutException("timeout waiting for " + id + " to abort and finish"); try { Thread.sleep(sleep.toMillis()); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new RuntimeException(e); } } } /** Deletes run data and tester deployments for applications which are unknown, or no longer built internally. */ public void collectGarbage() { Set<ApplicationId> applicationsToBuild = new HashSet<>(instances()); curator.applicationsWithJobs().stream() .filter(id -> ! applicationsToBuild.contains(id)) .forEach(id -> { try { TesterId tester = TesterId.of(id); for (JobType type : jobs(id)) locked(id, type, deactivateTester, __ -> { try (Mutex ___ = curator.lock(id, type)) { try { deactivateTester(tester, type); } catch (Exception e) { } curator.deleteRunData(id, type); } }); logs.delete(id); curator.deleteRunData(id); } catch (Exception e) { log.log(WARNING, "failed cleaning up after deleted application", e); } }); } public void deactivateTester(TesterId id, JobType type) { controller.serviceRegistry().configServer().deactivate(new DeploymentId(id.id(), type.zone())); } /** Locks all runs and modifies the list of historic runs for the given application and job type. */ private void locked(ApplicationId id, JobType type, Consumer<SortedMap<RunId, Run>> modifications) { try (Mutex __ = curator.lock(id, type)) { SortedMap<RunId, Run> runs = new TreeMap<>(curator.readHistoricRuns(id, type)); modifications.accept(runs); curator.writeHistoricRuns(id, type, runs.values()); } } /** Locks and modifies the run with the given id, provided it is still active. */ public void locked(RunId id, UnaryOperator<Run> modifications) { try (Mutex __ = curator.lock(id.application(), id.type())) { active(id).ifPresent(run -> { Run modified = modifications.apply(run); if (modified != null) curator.writeLastRun(modified); }); } } /** Locks the given step and checks none of its prerequisites are running, then performs the given actions. */ public void locked(ApplicationId id, JobType type, Step step, Consumer<LockedStep> action) throws TimeoutException { try (Mutex lock = curator.lock(id, type, step)) { for (Step prerequisite : step.allPrerequisites(last(id, type).get().steps().keySet())) try (Mutex __ = curator.lock(id, type, prerequisite)) { ; } action.accept(new LockedStep(lock, step)); } } }
class JobController { public static final Duration maxHistoryAge = Duration.ofDays(60); private static final Logger log = Logger.getLogger(JobController.class.getName()); private final int historyLength; private final Controller controller; private final CuratorDb curator; private final BufferedLogStore logs; private final TesterCloud cloud; private final JobMetrics metric; private final AtomicReference<Consumer<Run>> runner = new AtomicReference<>(__ -> { }); public JobController(Controller controller) { this.historyLength = controller.system().isCd() ? 256 : 64; this.controller = controller; this.curator = controller.curator(); this.logs = new BufferedLogStore(curator, controller.serviceRegistry().runDataStore()); this.cloud = controller.serviceRegistry().testerCloud(); this.metric = new JobMetrics(controller.metric(), controller::system); } public TesterCloud cloud() { return cloud; } public int historyLength() { return historyLength; } public void setRunner(Consumer<Run> runner) { this.runner.set(runner); } /** Rewrite all job data with the newest format. */ public void updateStorage() { for (ApplicationId id : instances()) for (JobType type : jobs(id)) { locked(id, type, runs -> { curator.readLastRun(id, type).ifPresent(curator::writeLastRun); }); } } /** Returns all entries currently logged for the given run. */ public Optional<RunLog> details(RunId id) { return details(id, -1); } /** Returns the logged entries for the given run, which are after the given id threshold. */ public Optional<RunLog> details(RunId id, long after) { try (Mutex __ = curator.lock(id.application(), id.type())) { Run run = runs(id.application(), id.type()).get(id); if (run == null) return Optional.empty(); return active(id).isPresent() ? Optional.of(logs.readActive(id.application(), id.type(), after)) : logs.readFinished(id, after); } } /** Stores the given log entries for the given run and step. */ public void log(RunId id, Step step, List<LogEntry> entries) { locked(id, __ -> { logs.append(id.application(), id.type(), step, entries, true); return __; }); } /** Stores the given log messages for the given run and step. */ public void log(RunId id, Step step, Level level, List<String> messages) { log(id, step, messages.stream() .map(message -> new LogEntry(0, controller.clock().instant(), LogEntry.typeOf(level), message)) .collect(toList())); } /** Stores the given log message for the given run and step. */ public void log(RunId id, Step step, Level level, String message) { log(id, step, level, Collections.singletonList(message)); } /** Fetches any new Vespa log entries, and records the timestamp of the last of these, for continuation. */ public void updateVespaLog(RunId id) { locked(id, run -> { if ( ! run.hasStep(copyVespaLogs)) return run; storeVespaLogs(id); ZoneId zone = id.type().zone(); Optional<Deployment> deployment = Optional.ofNullable(controller.applications().requireInstance(id.application()) .deployments().get(zone)); if (deployment.isEmpty() || deployment.get().at().isBefore(run.start())) return run; List<LogEntry> log; Instant deployedAt; Instant from; if ( ! run.id().type().isProduction()) { deployedAt = run.stepInfo(installInitialReal).or(() -> run.stepInfo(installReal)).flatMap(StepInfo::startTime).orElseThrow(); from = run.lastVespaLogTimestamp().isAfter(run.start()) ? run.lastVespaLogTimestamp() : deployedAt.minusSeconds(10); log = LogEntry.parseVespaLog(controller.serviceRegistry().configServer() .getLogs(new DeploymentId(id.application(), zone), Map.of("from", Long.toString(from.toEpochMilli()))), from); } else log = List.of(); if (id.type().isTest()) { deployedAt = run.stepInfo(installTester).flatMap(StepInfo::startTime).orElseThrow(); from = run.lastVespaLogTimestamp().isAfter(run.start()) ? run.lastVespaLogTimestamp() : deployedAt.minusSeconds(10); List<LogEntry> testerLog = LogEntry.parseVespaLog(controller.serviceRegistry().configServer() .getLogs(new DeploymentId(id.tester().id(), zone), Map.of("from", Long.toString(from.toEpochMilli()))), from); Instant justNow = controller.clock().instant().minusSeconds(2); log = Stream.concat(log.stream(), testerLog.stream()) .filter(entry -> entry.at().isBefore(justNow)) .sorted(comparing(LogEntry::at)) .collect(toUnmodifiableList()); } if (log.isEmpty()) return run; logs.append(id.application(), id.type(), Step.copyVespaLogs, log, false); return run.with(log.get(log.size() - 1).at()); }); } public InputStream getVespaLogs(RunId id, long fromMillis, boolean tester) { Run run = run(id); return run.stepStatus(copyVespaLogs).map(succeeded::equals).orElse(false) ? controller.serviceRegistry().runDataStore().getLogs(id, tester) : getVespaLogsFromLogserver(run, fromMillis, tester).orElse(InputStream.nullInputStream()); } public static Optional<Instant> deploymentCompletedAt(Run run, boolean tester) { return (tester ? run.stepInfo(installTester) : run.stepInfo(installInitialReal).or(() -> run.stepInfo(installReal))) .flatMap(StepInfo::startTime).map(start -> start.minusSeconds(10)); } public void storeVespaLogs(RunId id) { Run run = run(id); if ( ! id.type().isProduction()) { getVespaLogsFromLogserver(run, 0, false).ifPresent(logs -> { try (logs) { controller.serviceRegistry().runDataStore().putLogs(id, false, logs); } catch (IOException e) { throw new UncheckedIOException(e); } }); } if (id.type().isTest()) { getVespaLogsFromLogserver(run, 0, true).ifPresent(logs -> { try (logs) { controller.serviceRegistry().runDataStore().putLogs(id, true, logs); } catch(IOException e){ throw new UncheckedIOException(e); } }); } } private Optional<InputStream> getVespaLogsFromLogserver(Run run, long fromMillis, boolean tester) { return deploymentCompletedAt(run, tester).map(at -> controller.serviceRegistry().configServer().getLogs(new DeploymentId(tester ? run.id().tester().id() : run.id().application(), run.id().type().zone()), Map.of("from", Long.toString(Math.max(fromMillis, at.toEpochMilli())), "to", Long.toString(run.end().orElse(controller.clock().instant()).toEpochMilli())))); } /** Fetches any new test log entries, and records the id of the last of these, for continuation. */ public void updateTestLog(RunId id) { locked(id, run -> { Optional<Step> step = Stream.of(endStagingSetup, endTests) .filter(run.readySteps()::contains) .findAny(); if (step.isEmpty()) return run; List<LogEntry> entries = cloud.getLog(new DeploymentId(id.tester().id(), id.type().zone()), run.lastTestLogEntry()); if (entries.isEmpty()) return run; logs.append(id.application(), id.type(), step.get(), entries, false); return run.with(entries.stream().mapToLong(LogEntry::id).max().getAsLong()); }); } public void updateTestReport(RunId id) { locked(id, run -> { Optional<TestReport> report = cloud.getTestReport(new DeploymentId(id.tester().id(), id.type().zone())); if (report.isEmpty()) { return run; } logs.writeTestReport(id, report.get()); return run; }); } public Optional<String> getTestReports(RunId id) { return logs.readTestReports(id); } /** Stores the given certificate as the tester certificate for this run, or throws if it's already set. */ public void storeTesterCertificate(RunId id, X509Certificate testerCertificate) { locked(id, run -> run.with(testerCertificate)); } /** Returns a list of all instances of applications which have registered. */ public List<ApplicationId> instances() { return controller.applications().readable().stream() .flatMap(application -> application.instances().values().stream()) .map(Instance::id) .collect(toUnmodifiableList()); } /** Returns all job types which have been run for the given application. */ private List<JobType> jobs(ApplicationId id) { return JobType.allIn(controller.zoneRegistry()).stream() .filter(type -> last(id, type).isPresent()) .collect(toUnmodifiableList()); } /** Returns an immutable map of all known runs for the given application and job type. */ public NavigableMap<RunId, Run> runs(JobId id) { return runs(id.application(), id.type()); } /** Lists the start time of non-redeployment runs of the given job, in order of increasing age. */ public List<Instant> jobStarts(JobId id) { return runs(id).descendingMap().values().stream() .filter(run -> ! run.isRedeployment()) .map(Run::start) .collect(toUnmodifiableList()); } /** Returns when given deployment last started deploying, falling back to time of deployment if it cannot be determined from job runs */ public Instant lastDeploymentStart(ApplicationId instanceId, Deployment deployment) { return jobStarts(new JobId(instanceId, JobType.deploymentTo(deployment.zone()))).stream() .findFirst() .orElseGet(deployment::at); } /** Returns an immutable map of all known runs for the given application and job type. */ public NavigableMap<RunId, Run> runs(ApplicationId id, JobType type) { ImmutableSortedMap.Builder<RunId, Run> runs = ImmutableSortedMap.orderedBy(Comparator.comparing(RunId::number)); Optional<Run> last = last(id, type); curator.readHistoricRuns(id, type).forEach((runId, run) -> { if (last.isEmpty() || ! runId.equals(last.get().id())) runs.put(runId, run); }); last.ifPresent(run -> runs.put(run.id(), run)); return runs.build(); } /** Returns the run with the given id, or throws if no such run exists. */ public Run run(RunId id) { return runs(id.application(), id.type()).values().stream() .filter(run -> run.id().equals(id)) .findAny() .orElseThrow(() -> new NoSuchElementException("no run with id '" + id + "' exists")); } /** Returns the last run of the given type, for the given application, if one has been run. */ public Optional<Run> last(JobId job) { return curator.readLastRun(job.application(), job.type()); } /** Returns the last run of the given type, for the given application, if one has been run. */ public Optional<Run> last(ApplicationId id, JobType type) { return curator.readLastRun(id, type); } /** Returns the last completed of the given job. */ public Optional<Run> lastCompleted(JobId id) { return JobStatus.lastCompleted(runs(id)); } /** Returns the first failing of the given job. */ public Optional<Run> firstFailing(JobId id) { return JobStatus.firstFailing(runs(id)); } /** Returns the last success of the given job. */ public Optional<Run> lastSuccess(JobId id) { return JobStatus.lastSuccess(runs(id)); } /** Returns the run with the given id, provided it is still active. */ public Optional<Run> active(RunId id) { return last(id.application(), id.type()) .filter(run -> ! run.hasEnded()) .filter(run -> run.id().equals(id)); } /** Returns a list of all active runs. */ public List<Run> active() { return controller.applications().idList().stream() .flatMap(id -> active(id).stream()) .toList(); } /** Returns a list of all active runs for the given application. */ public List<Run> active(TenantAndApplicationId id) { return controller.applications().requireApplication(id).instances().keySet().stream() .flatMap(name -> JobType.allIn(controller.zoneRegistry()).stream() .map(type -> last(id.instance(name), type)) .flatMap(Optional::stream) .filter(run -> ! run.hasEnded())) .toList(); } /** Returns a list of all active runs for the given instance. */ public List<Run> active(ApplicationId id) { return JobType.allIn(controller.zoneRegistry()).stream() .map(type -> last(id, type)) .flatMap(Optional::stream) .filter(run -> !run.hasEnded()) .toList(); } /** Returns the job status of the given job, possibly empty. */ public JobStatus jobStatus(JobId id) { return new JobStatus(id, runs(id)); } /** Returns the deployment status of the given application. */ public DeploymentStatus deploymentStatus(Application application) { VersionStatus versionStatus = controller.readVersionStatus(); return deploymentStatus(application, versionStatus, controller.systemVersion(versionStatus)); } private DeploymentStatus deploymentStatus(Application application, VersionStatus versionStatus, Version systemVersion) { return new DeploymentStatus(application, this::jobStatus, controller.zoneRegistry(), versionStatus, systemVersion, instance -> controller.applications().versionCompatibility(application.id().instance(instance)), controller.clock().instant()); } /** Adds deployment status to each of the given applications. */ public DeploymentStatusList deploymentStatuses(ApplicationList applications, VersionStatus versionStatus) { Version systemVersion = controller.systemVersion(versionStatus); return DeploymentStatusList.from(applications.asList().stream() .map(application -> deploymentStatus(application, versionStatus, systemVersion)) .toList()); } /** Adds deployment status to each of the given applications. Calling this will do an implicit read of the controller's version status */ public DeploymentStatusList deploymentStatuses(ApplicationList applications) { VersionStatus versionStatus = controller.readVersionStatus(); return deploymentStatuses(applications, versionStatus); } /** Changes the status of the given step, for the given run, provided it is still active. */ public void update(RunId id, RunStatus status, LockedStep step) { locked(id, run -> run.with(status, step)); } /** * Changes the status of the given run to inactive, and stores it as a historic run. * Throws TimeoutException if some step in this job is still being run. */ public void finish(RunId id) throws TimeoutException { Deque<Mutex> locks = new ArrayDeque<>(); try { Run unlockedRun = run(id); locks.push(curator.lock(id.application(), id.type(), report)); for (Step step : report.allPrerequisites(unlockedRun.steps().keySet())) locks.push(curator.lock(id.application(), id.type(), step)); locked(id, run -> { if (run.status() == reset) { for (Step step : run.steps().keySet()) log(id, step, INFO, List.of(" return run.reset(); } if (run.status() == running && run.stepStatuses().values().stream().anyMatch(not(succeeded::equals))) return run; Run finishedRun = run.finished(controller.clock().instant()); locked(id.application(), id.type(), runs -> { runs.put(run.id(), finishedRun); long last = id.number(); long successes = runs.values().stream().filter(Run::hasSucceeded).count(); var oldEntries = runs.entrySet().iterator(); for (var old = oldEntries.next(); old.getKey().number() <= last - historyLength || old.getValue().start().isBefore(controller.clock().instant().minus(maxHistoryAge)); old = oldEntries.next()) { if ( successes == 1 && old.getValue().hasSucceeded() && ! old.getValue().start().isBefore(controller.clock().instant().minus(maxHistoryAge))) { oldEntries.next(); continue; } logs.delete(old.getKey()); oldEntries.remove(); } }); logs.flush(id); metric.jobFinished(run.id().job(), finishedRun.status()); pruneRevisions(unlockedRun); return finishedRun; }); } finally { for (Mutex lock : locks) { try { lock.close(); } catch (Throwable t) { log.log(WARNING, "Failed to close the lock " + lock + ": the lock may or may not " + "have been released in ZooKeeper, and if not this controller " + "must be restarted to release the lock", t); } } } } /** Marks the given run as aborted; no further normal steps will run, but run-always steps will try to succeed. */ public void abort(RunId id, String reason) { locked(id, run -> { run.stepStatuses().entrySet().stream() .filter(entry -> entry.getValue() == unfinished) .forEach(entry -> log(id, entry.getKey(), INFO, "Aborting run: " + reason)); return run.aborted(); }); } /** Accepts and stores a new application package and test jar pair under a generated application version key. */ public ApplicationVersion submit(TenantAndApplicationId id, Submission submission, long projectId) { ApplicationController applications = controller.applications(); AtomicReference<ApplicationVersion> version = new AtomicReference<>(); applications.lockApplicationOrThrow(id, application -> { Optional<ApplicationVersion> previousVersion = application.get().revisions().last(); Optional<ApplicationPackage> previousPackage = previousVersion.flatMap(previous -> applications.applicationStore().find(id.tenant(), id.application(), previous.buildNumber().getAsLong())) .map(ApplicationPackage::new); long previousBuild = previousVersion.map(latestVersion -> latestVersion.buildNumber().getAsLong()).orElse(0L); version.set(submission.toApplicationVersion(1 + previousBuild)); byte[] diff = previousPackage.map(previous -> ApplicationPackageDiff.diff(previous, submission.applicationPackage())) .orElseGet(() -> ApplicationPackageDiff.diffAgainstEmpty(submission.applicationPackage())); applications.applicationStore().put(id.tenant(), id.application(), version.get().id(), submission.applicationPackage().zippedContent(), submission.testPackage(), diff); applications.applicationStore().putMeta(id.tenant(), id.application(), controller.clock().instant(), submission.applicationPackage().metaDataZip()); application = application.withProjectId(projectId == -1 ? OptionalLong.empty() : OptionalLong.of(projectId)); application = application.withRevisions(revisions -> revisions.with(version.get())); application = withPrunedPackages(application, version.get().id()); validate(id, submission); applications.storeWithUpdatedConfig(application, submission.applicationPackage()); if (application.get().projectId().isPresent()) applications.deploymentTrigger().triggerNewRevision(id); }); return version.get(); } private void validate(TenantAndApplicationId id, Submission submission) { validateTests(id, submission); validateParentVersion(id, submission); validateMajorVersion(id, submission); } private void validateTests(TenantAndApplicationId id, Submission submission) { TestSummary testSummary = TestPackage.validateTests(submission.applicationPackage().deploymentSpec(), submission.testPackage()); if (testSummary.problems().isEmpty()) controller.notificationsDb().removeNotification(NotificationSource.from(id), Type.testPackage); else controller.notificationsDb().setNotification(NotificationSource.from(id), Type.testPackage, Notification.Level.warning, testSummary.problems()); } private void validateParentVersion(TenantAndApplicationId id, Submission submission) { submission.applicationPackage().parentVersion().ifPresent(parent -> { if (parent.getMajor() < controller.readSystemVersion().getMajor()) controller.notificationsDb().setNotification(NotificationSource.from(id), Type.submission, Notification.Level.warning, "Parent version used to compile the application is on a " + "lower major version than the current Vespa Cloud version"); else controller.notificationsDb().removeNotification(NotificationSource.from(id), Type.submission); }); } private LockedApplication withPrunedPackages(LockedApplication application, RevisionId latest){ TenantAndApplicationId id = application.get().id(); Application wrapped = application.get(); RevisionId oldestDeployed = application.get().oldestDeployedRevision() .or(() -> wrapped.instances().values().stream() .flatMap(instance -> instance.change().revision().stream()) .min(naturalOrder())) .orElse(latest); controller.applications().applicationStore().prune(id.tenant(), id.application(), oldestDeployed); for (ApplicationVersion version : application.get().revisions().withPackage()) if (version.id().compareTo(oldestDeployed) < 0) application = application.withRevisions(revisions -> revisions.with(version.withoutPackage())); return application; } /** Forget revisions no longer present in any relevant job history. */ private void pruneRevisions(Run run) { TenantAndApplicationId applicationId = TenantAndApplicationId.from(run.id().application()); boolean isProduction = run.versions().targetRevision().isProduction(); (isProduction ? deploymentStatus(controller.applications().requireApplication(applicationId)).jobs().asList().stream() : Stream.of(jobStatus(run.id().job()))) .flatMap(jobs -> jobs.runs().values().stream()) .map(r -> r.versions().targetRevision()) .filter(id -> id.isProduction() == isProduction) .min(naturalOrder()) .ifPresent(oldestRevision -> { controller.applications().lockApplicationOrThrow(applicationId, application -> { if (isProduction) { controller.applications().applicationStore().pruneDiffs(run.id().application().tenant(), run.id().application().application(), oldestRevision.number()); controller.applications().store(application.withRevisions(revisions -> revisions.withoutOlderThan(oldestRevision))); } else { controller.applications().applicationStore().pruneDevDiffs(new DeploymentId(run.id().application(), run.id().job().type().zone()), oldestRevision.number()); controller.applications().store(application.withRevisions(revisions -> revisions.withoutOlderThan(oldestRevision, run.id().job()))); } }); }); } /** Orders a run of the given type, or throws an IllegalStateException if that job type is already running. */ public void start(ApplicationId id, JobType type, Versions versions, boolean isRedeployment, Optional<String> reason) { start(id, type, versions, isRedeployment, JobProfile.of(type), reason); } /** Orders a run of the given type, or throws an IllegalStateException if that job type is already running. */ public void start(ApplicationId id, JobType type, Versions versions, boolean isRedeployment, JobProfile profile, Optional<String> reason) { ApplicationVersion revision = controller.applications().requireApplication(TenantAndApplicationId.from(id)).revisions().get(versions.targetRevision()); if (revision.compileVersion() .map(version -> controller.applications().versionCompatibility(id).refuse(versions.targetPlatform(), version)) .orElse(false)) throw new IllegalArgumentException("Will not start " + type + " for " + id + " with incompatible platform version (" + versions.targetPlatform() + ") " + "and compile versions (" + revision.compileVersion().get() + ")"); locked(id, type, __ -> { Optional<Run> last = last(id, type); if (last.flatMap(run -> active(run.id())).isPresent()) throw new IllegalArgumentException("Cannot start " + type + " for " + id + "; it is already running!"); RunId newId = new RunId(id, type, last.map(run -> run.id().number()).orElse(0L) + 1); curator.writeLastRun(Run.initial(newId, versions, isRedeployment, controller.clock().instant(), profile, reason)); metric.jobStarted(newId.job()); }); } /** Stores the given package and starts a deployment of it, after aborting any such ongoing deployment. */ public void deploy(ApplicationId id, JobType type, Optional<Version> platform, ApplicationPackage applicationPackage) { deploy(id, type, platform, applicationPackage, false); } /** Stores the given package and starts a deployment of it, after aborting any such ongoing deployment.*/ public void deploy(ApplicationId id, JobType type, Optional<Version> platform, ApplicationPackage applicationPackage, boolean dryRun) { if ( ! controller.zoneRegistry().hasZone(type.zone())) throw new IllegalArgumentException(type.zone() + " is not present in this system"); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { if ( ! application.get().instances().containsKey(id.instance())) application = controller.applications().withNewInstance(application, id); controller.applications().store(application); }); DeploymentId deploymentId = new DeploymentId(id, type.zone()); Optional<Run> lastRun = last(id, type); lastRun.filter(run -> ! run.hasEnded()).ifPresent(run -> abortAndWait(run.id(), Duration.ofMinutes(2))); long build = 1 + lastRun.map(run -> run.versions().targetRevision().number()).orElse(0L); RevisionId revisionId = RevisionId.forDevelopment(build, new JobId(id, type)); ApplicationVersion version = ApplicationVersion.forDevelopment(revisionId, applicationPackage.compileVersion()); byte[] diff = getDiff(applicationPackage, deploymentId, lastRun); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { controller.applications().applicationStore().putDev(deploymentId, version.id(), applicationPackage.zippedContent(), diff); Version targetPlatform = platform.orElseGet(() -> findTargetPlatform(applicationPackage, deploymentId, application.get().get(id.instance()))); controller.applications().store(application.withRevisions(revisions -> revisions.with(version))); start(id, type, new Versions(targetPlatform, version.id(), lastRun.map(run -> run.versions().targetPlatform()), lastRun.map(run -> run.versions().targetRevision())), false, dryRun ? JobProfile.developmentDryRun : JobProfile.development, Optional.empty()); }); locked(id, type, __ -> { runner.get().accept(last(id, type).get()); }); } /* Application package diff against previous version, or against empty version if previous does not exist or is invalid */ private byte[] getDiff(ApplicationPackage applicationPackage, DeploymentId deploymentId, Optional<Run> lastRun) { return lastRun.map(run -> run.versions().targetRevision()) .map(prevVersion -> { ApplicationPackage previous; try { previous = new ApplicationPackage(controller.applications().applicationStore().get(deploymentId, prevVersion)); } catch (IllegalArgumentException e) { return ApplicationPackageDiff.diffAgainstEmpty(applicationPackage); } return ApplicationPackageDiff.diff(previous, applicationPackage); }) .orElseGet(() -> ApplicationPackageDiff.diffAgainstEmpty(applicationPackage)); } private Version findTargetPlatform(ApplicationPackage applicationPackage, DeploymentId id, Optional<Instance> instance) { List<Version> versions = controller.readVersionStatus().deployableVersions().stream() .map(VespaVersion::versionNumber) .collect(toList()); instance.map(Instance::deployments) .map(deployments -> deployments.get(id.zoneId())) .map(Deployment::version) .ifPresent(versions::add); if (versions.isEmpty()) throw new IllegalStateException("no deployable platform version found in the system"); VersionCompatibility compatibility = controller.applications().versionCompatibility(id.applicationId()); List<Version> compatibleVersions = new ArrayList<>(); for (Version target : reversed(versions)) if (applicationPackage.compileVersion().isEmpty() || compatibility.accept(target, applicationPackage.compileVersion().get())) compatibleVersions.add(target); if (compatibleVersions.isEmpty()) throw new IllegalArgumentException("no platforms are compatible with compile version " + applicationPackage.compileVersion().get()); Optional<Integer> major = applicationPackage.deploymentSpec().majorVersion(); List<Version> versionOnRightMajor = new ArrayList<>(); for (Version target : reversed(versions)) if (major.isEmpty() || major.get() == target.getMajor()) versionOnRightMajor.add(target); if (versionOnRightMajor.isEmpty()) throw new IllegalArgumentException("no platforms were found for major version " + major.get() + " specified in deployment.xml"); for (Version target : compatibleVersions) if (versionOnRightMajor.contains(target)) return target; throw new IllegalArgumentException("no platforms on major version " + major.get() + " specified in deployment.xml " + "are compatible with compile version " + applicationPackage.compileVersion().get()); } /** Aborts a run and waits for it complete. */ private void abortAndWait(RunId id, Duration timeout) { abort(id, "replaced by new deployment"); runner.get().accept(last(id.application(), id.type()).get()); Instant doom = controller.clock().instant().plus(timeout); Duration sleep = Duration.ofMillis(100); while ( ! last(id.application(), id.type()).get().hasEnded()) { if (controller.clock().instant().plus(sleep).isAfter(doom)) throw new UncheckedTimeoutException("timeout waiting for " + id + " to abort and finish"); try { Thread.sleep(sleep.toMillis()); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new RuntimeException(e); } } } /** Deletes run data and tester deployments for applications which are unknown, or no longer built internally. */ public void collectGarbage() { Set<ApplicationId> applicationsToBuild = new HashSet<>(instances()); curator.applicationsWithJobs().stream() .filter(id -> ! applicationsToBuild.contains(id)) .forEach(id -> { try { TesterId tester = TesterId.of(id); for (JobType type : jobs(id)) locked(id, type, deactivateTester, __ -> { try (Mutex ___ = curator.lock(id, type)) { try { deactivateTester(tester, type); } catch (Exception e) { } curator.deleteRunData(id, type); } }); logs.delete(id); curator.deleteRunData(id); } catch (Exception e) { log.log(WARNING, "failed cleaning up after deleted application", e); } }); } public void deactivateTester(TesterId id, JobType type) { controller.serviceRegistry().configServer().deactivate(new DeploymentId(id.id(), type.zone())); } /** Locks all runs and modifies the list of historic runs for the given application and job type. */ private void locked(ApplicationId id, JobType type, Consumer<SortedMap<RunId, Run>> modifications) { try (Mutex __ = curator.lock(id, type)) { SortedMap<RunId, Run> runs = new TreeMap<>(curator.readHistoricRuns(id, type)); modifications.accept(runs); curator.writeHistoricRuns(id, type, runs.values()); } } /** Locks and modifies the run with the given id, provided it is still active. */ public void locked(RunId id, UnaryOperator<Run> modifications) { try (Mutex __ = curator.lock(id.application(), id.type())) { active(id).ifPresent(run -> { Run modified = modifications.apply(run); if (modified != null) curator.writeLastRun(modified); }); } } /** Locks the given step and checks none of its prerequisites are running, then performs the given actions. */ public void locked(ApplicationId id, JobType type, Step step, Consumer<LockedStep> action) throws TimeoutException { try (Mutex lock = curator.lock(id, type, step)) { for (Step prerequisite : step.allPrerequisites(last(id, type).get().steps().keySet())) try (Mutex __ = curator.lock(id, type, prerequisite)) { ; } action.accept(new LockedStep(lock, step)); } } }
https://github.com/vespa-engine/vespa/pull/23930
public void writeVersionStatus(VersionStatus status) { curator.set(versionStatusPath(), asJson(versionStatusSerializer.toSlime(status))); }
curator.set(versionStatusPath(), asJson(versionStatusSerializer.toSlime(status)));
public void writeVersionStatus(VersionStatus status) { curator.set(versionStatusPath(), asJson(versionStatusSerializer.toSlime(status))); }
class CuratorDb { private static final Logger log = Logger.getLogger(CuratorDb.class.getName()); private static final Duration deployLockTimeout = Duration.ofMinutes(30); private static final Duration defaultLockTimeout = Duration.ofMinutes(5); private static final Duration defaultTryLockTimeout = Duration.ofSeconds(1); private static final Path root = Path.fromString("/controller/v1"); private static final Path lockRoot = root.append("locks"); private static final Path tenantRoot = root.append("tenants"); private static final Path applicationRoot = root.append("applications"); private static final Path jobRoot = root.append("jobs"); private static final Path controllerRoot = root.append("controllers"); private static final Path routingPoliciesRoot = root.append("routingPolicies"); private static final Path zoneRoutingPoliciesRoot = root.append("zoneRoutingPolicies"); private static final Path endpointCertificateRoot = root.append("applicationCertificates"); private static final Path archiveBucketsRoot = root.append("archiveBuckets"); private static final Path changeRequestsRoot = root.append("changeRequests"); private static final Path notificationsRoot = root.append("notifications"); private static final Path supportAccessRoot = root.append("supportAccess"); private final NodeVersionSerializer nodeVersionSerializer = new NodeVersionSerializer(); private final VersionStatusSerializer versionStatusSerializer = new VersionStatusSerializer(nodeVersionSerializer); private final ControllerVersionSerializer controllerVersionSerializer = new ControllerVersionSerializer(); private final ConfidenceOverrideSerializer confidenceOverrideSerializer = new ConfidenceOverrideSerializer(); private final TenantSerializer tenantSerializer = new TenantSerializer(); private final OsVersionSerializer osVersionSerializer = new OsVersionSerializer(); private final OsVersionTargetSerializer osVersionTargetSerializer = new OsVersionTargetSerializer(osVersionSerializer); private final OsVersionStatusSerializer osVersionStatusSerializer = new OsVersionStatusSerializer(osVersionSerializer, nodeVersionSerializer); private final RoutingPolicySerializer routingPolicySerializer = new RoutingPolicySerializer(); private final ZoneRoutingPolicySerializer zoneRoutingPolicySerializer = new ZoneRoutingPolicySerializer(routingPolicySerializer); private final AuditLogSerializer auditLogSerializer = new AuditLogSerializer(); private final NameServiceQueueSerializer nameServiceQueueSerializer = new NameServiceQueueSerializer(); private final ApplicationSerializer applicationSerializer = new ApplicationSerializer(); private final RunSerializer runSerializer = new RunSerializer(); private final RetriggerEntrySerializer retriggerEntrySerializer = new RetriggerEntrySerializer(); private final NotificationsSerializer notificationsSerializer = new NotificationsSerializer(); private final Curator curator; private final Duration tryLockTimeout; private final Map<Path, Pair<Integer, Application>> cachedApplications = new ConcurrentHashMap<>(); private final Map<Path, Pair<Integer, NavigableMap<RunId, Run>>> cachedHistoricRuns = new ConcurrentHashMap<>(); @Inject public CuratorDb(Curator curator, ServiceRegistry services) { this(curator, defaultTryLockTimeout, services.zoneRegistry().system()); } CuratorDb(Curator curator, Duration tryLockTimeout, SystemName system) { this.curator = curator; this.tryLockTimeout = tryLockTimeout; } /** Returns all hostnames configured to be part of this ZooKeeper cluster */ public List<String> cluster() { return Arrays.stream(curator.zooKeeperEnsembleConnectionSpec().split(",")) .filter(hostAndPort -> !hostAndPort.isEmpty()) .map(hostAndPort -> hostAndPort.split(":")[0]) .collect(Collectors.toUnmodifiableList()); } public Mutex lock(TenantName name) { return curator.lock(lockPath(name), defaultLockTimeout.multipliedBy(2)); } public Mutex lock(TenantAndApplicationId id) { return curator.lock(lockPath(id), defaultLockTimeout.multipliedBy(2)); } public Mutex lockForDeployment(ApplicationId id, ZoneId zone) { return curator.lock(lockPath(id, zone), deployLockTimeout); } public Mutex lock(ApplicationId id, JobType type) { return curator.lock(lockPath(id, type), defaultLockTimeout); } public Mutex lock(ApplicationId id, JobType type, Step step) throws TimeoutException { return tryLock(lockPath(id, type, step)); } public Mutex lockRotations() { return curator.lock(lockRoot.append("rotations"), defaultLockTimeout); } public Mutex lockConfidenceOverrides() { return curator.lock(lockRoot.append("confidenceOverrides"), defaultLockTimeout); } public Mutex lockMaintenanceJob(String jobName) { try { return tryLock(lockRoot.append("maintenanceJobLocks").append(jobName)); } catch (TimeoutException e) { throw new UncheckedTimeoutException(e); } } public Mutex lockProvisionState(String provisionStateId) { return curator.lock(lockPath(provisionStateId), Duration.ofSeconds(1)); } public Mutex lockOsVersions() { return curator.lock(lockRoot.append("osTargetVersion"), defaultLockTimeout); } public Mutex lockOsVersionStatus() { return curator.lock(lockRoot.append("osVersionStatus"), defaultLockTimeout); } public Mutex lockRoutingPolicies() { return curator.lock(lockRoot.append("routingPolicies"), defaultLockTimeout); } public Mutex lockAuditLog() { return curator.lock(lockRoot.append("auditLog"), defaultLockTimeout); } public Mutex lockNameServiceQueue() { return curator.lock(lockRoot.append("nameServiceQueue"), defaultLockTimeout); } public Mutex lockMeteringRefreshTime() throws TimeoutException { return tryLock(lockRoot.append("meteringRefreshTime")); } public Mutex lockArchiveBuckets(ZoneId zoneId) { return curator.lock(lockRoot.append("archiveBuckets").append(zoneId.value()), defaultLockTimeout); } public Mutex lockChangeRequests() { return curator.lock(lockRoot.append("changeRequests"), defaultLockTimeout); } public Mutex lockNotifications(TenantName tenantName) { return curator.lock(lockRoot.append("notifications").append(tenantName.value()), defaultLockTimeout); } public Mutex lockSupportAccess(DeploymentId deploymentId) { return curator.lock(lockRoot.append("supportAccess").append(deploymentId.dottedString()), defaultLockTimeout); } public Mutex lockDeploymentRetriggerQueue() { return curator.lock(lockRoot.append("deploymentRetriggerQueue"), defaultLockTimeout); } /** Try locking with a low timeout, meaning it is OK to fail lock acquisition. * * Useful for maintenance jobs, where there is no point in running the jobs back to back. */ private Mutex tryLock(Path path) throws TimeoutException { try { return curator.lock(path, tryLockTimeout); } catch (UncheckedTimeoutException e) { throw new TimeoutException(e.getMessage()); } } private <T> Optional<T> read(Path path, Function<byte[], T> mapper) { return curator.getData(path).filter(data -> data.length > 0).map(mapper); } private Optional<Slime> readSlime(Path path) { return read(path, SlimeUtils::jsonToSlime); } private static byte[] asJson(Slime slime) { try { return SlimeUtils.toJsonBytes(slime); } catch (IOException e) { throw new UncheckedIOException(e); } } public double readUpgradesPerMinute() { return read(upgradesPerMinutePath(), ByteBuffer::wrap).map(ByteBuffer::getDouble).orElse(0.125); } public void writeUpgradesPerMinute(double n) { curator.set(upgradesPerMinutePath(), ByteBuffer.allocate(Double.BYTES).putDouble(n).array()); } public VersionStatus readVersionStatus() { return readSlime(versionStatusPath()).map(versionStatusSerializer::fromSlime).orElseGet(VersionStatus::empty); } public void writeConfidenceOverrides(Map<Version, VespaVersion.Confidence> overrides) { curator.set(confidenceOverridesPath(), asJson(confidenceOverrideSerializer.toSlime(overrides))); } public Map<Version, VespaVersion.Confidence> readConfidenceOverrides() { return readSlime(confidenceOverridesPath()).map(confidenceOverrideSerializer::fromSlime) .orElseGet(Collections::emptyMap); } public void writeControllerVersion(HostName hostname, ControllerVersion version) { curator.set(controllerPath(hostname.value()), asJson(controllerVersionSerializer.toSlime(version))); } public ControllerVersion readControllerVersion(HostName hostname) { return readSlime(controllerPath(hostname.value())) .map(controllerVersionSerializer::fromSlime) .orElse(ControllerVersion.CURRENT); } public void writeOsVersionTargets(Set<OsVersionTarget> versions) { curator.set(osVersionTargetsPath(), asJson(osVersionTargetSerializer.toSlime(versions))); } public Set<OsVersionTarget> readOsVersionTargets() { return readSlime(osVersionTargetsPath()).map(osVersionTargetSerializer::fromSlime).orElseGet(Collections::emptySet); } public void writeOsVersionStatus(OsVersionStatus status) { curator.set(osVersionStatusPath(), asJson(osVersionStatusSerializer.toSlime(status))); } public OsVersionStatus readOsVersionStatus() { return readSlime(osVersionStatusPath()).map(osVersionStatusSerializer::fromSlime).orElse(OsVersionStatus.empty); } public void writeTenant(Tenant tenant) { curator.set(tenantPath(tenant.name()), asJson(tenantSerializer.toSlime(tenant))); } public Optional<Tenant> readTenant(TenantName name) { return readSlime(tenantPath(name)).map(tenantSerializer::tenantFrom); } public List<Tenant> readTenants() { return readTenantNames().stream() .map(this::readTenant) .flatMap(Optional::stream) .collect(collectingAndThen(Collectors.toList(), Collections::unmodifiableList)); } public List<TenantName> readTenantNames() { return curator.getChildren(tenantRoot).stream() .map(TenantName::from) .collect(Collectors.toList()); } public void removeTenant(TenantName name) { curator.delete(tenantPath(name)); } public void writeApplication(Application application) { curator.set(applicationPath(application.id()), asJson(applicationSerializer.toSlime(application))); } public Optional<Application> readApplication(TenantAndApplicationId application) { Path path = applicationPath(application); return curator.getStat(path) .map(stat -> cachedApplications.compute(path, (__, old) -> old != null && old.getFirst() == stat.getVersion() ? old : new Pair<>(stat.getVersion(), read(path, bytes -> applicationSerializer.fromSlime(bytes)).get())).getSecond()); } public List<Application> readApplications(boolean canFail) { return readApplications(ignored -> true, canFail); } public List<Application> readApplications(TenantName name) { return readApplications(application -> application.tenant().equals(name), false); } private List<Application> readApplications(Predicate<TenantAndApplicationId> applicationFilter, boolean canFail) { var applicationIds = readApplicationIds(); var applications = new ArrayList<Application>(applicationIds.size()); for (var id : applicationIds) { if (!applicationFilter.test(id)) continue; try { readApplication(id).ifPresent(applications::add); } catch (Exception e) { if (canFail) { log.log(Level.SEVERE, "Failed to read application '" + id + "', this must be fixed through " + "manual intervention", e); } else { throw e; } } } return Collections.unmodifiableList(applications); } public List<TenantAndApplicationId> readApplicationIds() { return curator.getChildren(applicationRoot).stream() .map(TenantAndApplicationId::fromSerialized) .sorted() .collect(toUnmodifiableList()); } public void removeApplication(TenantAndApplicationId id) { curator.delete(applicationPath(id)); } public void writeLastRun(Run run) { curator.set(lastRunPath(run.id().application(), run.id().type()), asJson(runSerializer.toSlime(run))); } public void writeHistoricRuns(ApplicationId id, JobType type, Iterable<Run> runs) { Path path = runsPath(id, type); curator.set(path, asJson(runSerializer.toSlime(runs))); } public Optional<Run> readLastRun(ApplicationId id, JobType type) { return readSlime(lastRunPath(id, type)).map(runSerializer::runFromSlime); } public NavigableMap<RunId, Run> readHistoricRuns(ApplicationId id, JobType type) { Path path = runsPath(id, type); return curator.getStat(path) .map(stat -> cachedHistoricRuns.compute(path, (__, old) -> old != null && old.getFirst() == stat.getVersion() ? old : new Pair<>(stat.getVersion(), runSerializer.runsFromSlime(readSlime(path).get()))).getSecond()) .orElseGet(Collections::emptyNavigableMap); } public void deleteRunData(ApplicationId id, JobType type) { curator.delete(runsPath(id, type)); curator.delete(lastRunPath(id, type)); } public void deleteRunData(ApplicationId id) { curator.delete(jobRoot.append(id.serializedForm())); } public List<ApplicationId> applicationsWithJobs() { return curator.getChildren(jobRoot).stream() .map(ApplicationId::fromSerializedForm) .collect(Collectors.toList()); } public Optional<byte[]> readLog(ApplicationId id, JobType type, long chunkId) { return curator.getData(logPath(id, type, chunkId)); } public void writeLog(ApplicationId id, JobType type, long chunkId, byte[] log) { curator.set(logPath(id, type, chunkId), log); } public void deleteLog(ApplicationId id, JobType type) { curator.delete(runsPath(id, type).append("logs")); } public Optional<Long> readLastLogEntryId(ApplicationId id, JobType type) { return curator.getData(lastLogPath(id, type)) .map(String::new).map(Long::parseLong); } public void writeLastLogEntryId(ApplicationId id, JobType type, long lastId) { curator.set(lastLogPath(id, type), Long.toString(lastId).getBytes()); } public LongStream getLogChunkIds(ApplicationId id, JobType type) { return curator.getChildren(runsPath(id, type).append("logs")).stream() .mapToLong(Long::parseLong) .sorted(); } public AuditLog readAuditLog() { return readSlime(auditLogPath()).map(auditLogSerializer::fromSlime) .orElse(AuditLog.empty); } public void writeAuditLog(AuditLog log) { curator.set(auditLogPath(), asJson(auditLogSerializer.toSlime(log))); } public NameServiceQueue readNameServiceQueue() { return readSlime(nameServiceQueuePath()).map(nameServiceQueueSerializer::fromSlime) .orElse(NameServiceQueue.EMPTY); } public void writeNameServiceQueue(NameServiceQueue queue) { curator.set(nameServiceQueuePath(), asJson(nameServiceQueueSerializer.toSlime(queue))); } @SuppressWarnings("unused") public Optional<byte[]> readProvisionState(String provisionId) { return curator.getData(provisionStatePath(provisionId)); } @SuppressWarnings("unused") public void writeProvisionState(String provisionId, byte[] data) { curator.set(provisionStatePath(provisionId), data); } @SuppressWarnings("unused") public List<String> readProvisionStateIds() { return curator.getChildren(provisionStatePath()); } public void writeRoutingPolicies(ApplicationId application, List<RoutingPolicy> policies) { for (var policy : policies) { if (!policy.id().owner().equals(application)) { throw new IllegalArgumentException(policy.id() + " does not belong to the application being written: " + application.toShortString()); } } curator.set(routingPolicyPath(application), asJson(routingPolicySerializer.toSlime(policies))); } public Map<ApplicationId, List<RoutingPolicy>> readRoutingPolicies() { return readRoutingPolicies((instance) -> true); } public Map<ApplicationId, List<RoutingPolicy>> readRoutingPolicies(Predicate<ApplicationId> filter) { return curator.getChildren(routingPoliciesRoot).stream() .map(ApplicationId::fromSerializedForm) .filter(filter) .collect(Collectors.toUnmodifiableMap(Function.identity(), this::readRoutingPolicies)); } public List<RoutingPolicy> readRoutingPolicies(ApplicationId application) { return readSlime(routingPolicyPath(application)).map(slime -> routingPolicySerializer.fromSlime(application, slime)) .orElseGet(List::of); } public void writeZoneRoutingPolicy(ZoneRoutingPolicy policy) { curator.set(zoneRoutingPolicyPath(policy.zone()), asJson(zoneRoutingPolicySerializer.toSlime(policy))); } public ZoneRoutingPolicy readZoneRoutingPolicy(ZoneId zone) { return readSlime(zoneRoutingPolicyPath(zone)).map(data -> zoneRoutingPolicySerializer.fromSlime(zone, data)) .orElseGet(() -> new ZoneRoutingPolicy(zone, RoutingStatus.DEFAULT)); } public void writeEndpointCertificateMetadata(ApplicationId applicationId, EndpointCertificateMetadata endpointCertificateMetadata) { curator.set(endpointCertificatePath(applicationId), asJson(EndpointCertificateMetadataSerializer.toSlime(endpointCertificateMetadata))); } public void deleteEndpointCertificateMetadata(ApplicationId applicationId) { curator.delete(endpointCertificatePath(applicationId)); } public Optional<EndpointCertificateMetadata> readEndpointCertificateMetadata(ApplicationId applicationId) { return curator.getData(endpointCertificatePath(applicationId)).map(String::new).map(EndpointCertificateMetadataSerializer::fromJsonString); } public Map<ApplicationId, EndpointCertificateMetadata> readAllEndpointCertificateMetadata() { Map<ApplicationId, EndpointCertificateMetadata> allEndpointCertificateMetadata = new HashMap<>(); for (String appIdString : curator.getChildren(endpointCertificateRoot)) { ApplicationId applicationId = ApplicationId.fromSerializedForm(appIdString); Optional<EndpointCertificateMetadata> endpointCertificateMetadata = readEndpointCertificateMetadata(applicationId); allEndpointCertificateMetadata.put(applicationId, endpointCertificateMetadata.orElseThrow()); } return allEndpointCertificateMetadata; } public void writeMeteringRefreshTime(long timestamp) { curator.set(meteringRefreshPath(), Long.toString(timestamp).getBytes()); } public long readMeteringRefreshTime() { return curator.getData(meteringRefreshPath()) .map(String::new).map(Long::parseLong) .orElse(0L); } public Set<ArchiveBucket> readArchiveBuckets(ZoneId zoneId) { return curator.getData(archiveBucketsPath(zoneId)).map(String::new).map(ArchiveBucketsSerializer::fromJsonString) .orElseGet(Set::of); } public void writeArchiveBuckets(ZoneId zoneid, Set<ArchiveBucket> archiveBuckets) { curator.set(archiveBucketsPath(zoneid), asJson(ArchiveBucketsSerializer.toSlime(archiveBuckets))); } public Optional<VespaChangeRequest> readChangeRequest(String changeRequestId) { return readSlime(changeRequestPath(changeRequestId)).map(ChangeRequestSerializer::fromSlime); } public List<VespaChangeRequest> readChangeRequests() { return curator.getChildren(changeRequestsRoot) .stream() .map(this::readChangeRequest) .flatMap(Optional::stream) .collect(Collectors.toList()); } public void writeChangeRequest(VespaChangeRequest changeRequest) { curator.set(changeRequestPath(changeRequest.getId()), asJson(ChangeRequestSerializer.toSlime(changeRequest))); } public void deleteChangeRequest(VespaChangeRequest changeRequest) { curator.delete(changeRequestPath(changeRequest.getId())); } public List<Notification> readNotifications(TenantName tenantName) { return readSlime(notificationsPath(tenantName)) .map(slime -> notificationsSerializer.fromSlime(tenantName, slime)).orElseGet(List::of); } public List<TenantName> listTenantsWithNotifications() { return curator.getChildren(notificationsRoot).stream() .map(TenantName::from) .collect(Collectors.toUnmodifiableList()); } public void writeNotifications(TenantName tenantName, List<Notification> notifications) { curator.set(notificationsPath(tenantName), asJson(notificationsSerializer.toSlime(notifications))); } public void deleteNotifications(TenantName tenantName) { curator.delete(notificationsPath(tenantName)); } public SupportAccess readSupportAccess(DeploymentId deploymentId) { return readSlime(supportAccessPath(deploymentId)).map(SupportAccessSerializer::fromSlime).orElse(SupportAccess.DISALLOWED_NO_HISTORY); } /** Take lock before reading before writing */ public void writeSupportAccess(DeploymentId deploymentId, SupportAccess supportAccess) { curator.set(supportAccessPath(deploymentId), asJson(SupportAccessSerializer.toSlime(supportAccess))); } public List<RetriggerEntry> readRetriggerEntries() { return readSlime(deploymentRetriggerPath()).map(retriggerEntrySerializer::fromSlime).orElseGet(List::of); } public void writeRetriggerEntries(List<RetriggerEntry> retriggerEntries) { curator.set(deploymentRetriggerPath(), asJson(retriggerEntrySerializer.toSlime(retriggerEntries))); } private Path lockPath(TenantName tenant) { return lockRoot .append(tenant.value()); } private Path lockPath(TenantAndApplicationId application) { return lockRoot.append(application.tenant().value() + ":" + application.application().value()); } private Path lockPath(ApplicationId instance, ZoneId zone) { return lockRoot.append(instance.serializedForm() + ":" + zone.environment().value() + ":" + zone.region().value()); } private Path lockPath(ApplicationId instance, JobType type) { return lockRoot.append(instance.serializedForm() + ":" + type.jobName()); } private Path lockPath(ApplicationId instance, JobType type, Step step) { return lockRoot.append(instance.serializedForm() + ":" + type.jobName() + ":" + step.name()); } private Path lockPath(String provisionId) { return lockRoot .append(provisionStatePath()) .append(provisionId); } private static Path upgradesPerMinutePath() { return root.append("upgrader").append("upgradesPerMinute"); } private static Path targetMajorVersionPath() { return root.append("upgrader").append("targetMajorVersion"); } private static Path confidenceOverridesPath() { return root.append("upgrader").append("confidenceOverrides"); } private static Path osVersionTargetsPath() { return root.append("osUpgrader").append("targetVersion"); } private static Path osVersionStatusPath() { return root.append("osVersionStatus"); } private static Path versionStatusPath() { return root.append("versionStatus"); } private static Path routingPolicyPath(ApplicationId application) { return routingPoliciesRoot.append(application.serializedForm()); } private static Path zoneRoutingPolicyPath(ZoneId zone) { return zoneRoutingPoliciesRoot.append(zone.value()); } private static Path nameServiceQueuePath() { return root.append("nameServiceQueue"); } private static Path auditLogPath() { return root.append("auditLog"); } private static Path provisionStatePath() { return root.append("provisioning").append("states"); } private static Path provisionStatePath(String provisionId) { return provisionStatePath().append(provisionId); } private static Path tenantPath(TenantName name) { return tenantRoot.append(name.value()); } private static Path applicationPath(TenantAndApplicationId id) { return applicationRoot.append(id.serialized()); } private static Path runsPath(ApplicationId id, JobType type) { return jobRoot.append(id.serializedForm()).append(type.jobName()); } private static Path lastRunPath(ApplicationId id, JobType type) { return runsPath(id, type).append("last"); } private static Path logPath(ApplicationId id, JobType type, long first) { return runsPath(id, type).append("logs").append(Long.toString(first)); } private static Path lastLogPath(ApplicationId id, JobType type) { return runsPath(id, type).append("logs"); } private static Path controllerPath(String hostname) { return controllerRoot.append(hostname); } private static Path endpointCertificatePath(ApplicationId id) { return endpointCertificateRoot.append(id.serializedForm()); } private static Path meteringRefreshPath() { return root.append("meteringRefreshTime"); } private static Path archiveBucketsPath(ZoneId zoneId) { return archiveBucketsRoot.append(zoneId.value()); } private static Path changeRequestPath(String id) { return changeRequestsRoot.append(id); } private static Path notificationsPath(TenantName tenantName) { return notificationsRoot.append(tenantName.value()); } private static Path supportAccessPath(DeploymentId deploymentId) { return supportAccessRoot.append(deploymentId.dottedString()); } private static Path deploymentRetriggerPath() { return root.append("deploymentRetriggerQueue"); } }
class CuratorDb { private static final Logger log = Logger.getLogger(CuratorDb.class.getName()); private static final Duration deployLockTimeout = Duration.ofMinutes(30); private static final Duration defaultLockTimeout = Duration.ofMinutes(5); private static final Duration defaultTryLockTimeout = Duration.ofSeconds(1); private static final Path root = Path.fromString("/controller/v1"); private static final Path lockRoot = root.append("locks"); private static final Path tenantRoot = root.append("tenants"); private static final Path applicationRoot = root.append("applications"); private static final Path jobRoot = root.append("jobs"); private static final Path controllerRoot = root.append("controllers"); private static final Path routingPoliciesRoot = root.append("routingPolicies"); private static final Path zoneRoutingPoliciesRoot = root.append("zoneRoutingPolicies"); private static final Path endpointCertificateRoot = root.append("applicationCertificates"); private static final Path archiveBucketsRoot = root.append("archiveBuckets"); private static final Path changeRequestsRoot = root.append("changeRequests"); private static final Path notificationsRoot = root.append("notifications"); private static final Path supportAccessRoot = root.append("supportAccess"); private final NodeVersionSerializer nodeVersionSerializer = new NodeVersionSerializer(); private final VersionStatusSerializer versionStatusSerializer = new VersionStatusSerializer(nodeVersionSerializer); private final ControllerVersionSerializer controllerVersionSerializer = new ControllerVersionSerializer(); private final ConfidenceOverrideSerializer confidenceOverrideSerializer = new ConfidenceOverrideSerializer(); private final TenantSerializer tenantSerializer = new TenantSerializer(); private final OsVersionSerializer osVersionSerializer = new OsVersionSerializer(); private final OsVersionTargetSerializer osVersionTargetSerializer = new OsVersionTargetSerializer(osVersionSerializer); private final OsVersionStatusSerializer osVersionStatusSerializer = new OsVersionStatusSerializer(osVersionSerializer, nodeVersionSerializer); private final RoutingPolicySerializer routingPolicySerializer = new RoutingPolicySerializer(); private final ZoneRoutingPolicySerializer zoneRoutingPolicySerializer = new ZoneRoutingPolicySerializer(routingPolicySerializer); private final AuditLogSerializer auditLogSerializer = new AuditLogSerializer(); private final NameServiceQueueSerializer nameServiceQueueSerializer = new NameServiceQueueSerializer(); private final ApplicationSerializer applicationSerializer = new ApplicationSerializer(); private final RunSerializer runSerializer = new RunSerializer(); private final RetriggerEntrySerializer retriggerEntrySerializer = new RetriggerEntrySerializer(); private final NotificationsSerializer notificationsSerializer = new NotificationsSerializer(); private final Curator curator; private final Duration tryLockTimeout; private final Map<Path, Pair<Integer, Application>> cachedApplications = new ConcurrentHashMap<>(); private final Map<Path, Pair<Integer, NavigableMap<RunId, Run>>> cachedHistoricRuns = new ConcurrentHashMap<>(); @Inject public CuratorDb(Curator curator, ServiceRegistry services) { this(curator, defaultTryLockTimeout, services.zoneRegistry().system()); } CuratorDb(Curator curator, Duration tryLockTimeout, SystemName system) { this.curator = curator; this.tryLockTimeout = tryLockTimeout; } /** Returns all hostnames configured to be part of this ZooKeeper cluster */ public List<String> cluster() { return Arrays.stream(curator.zooKeeperEnsembleConnectionSpec().split(",")) .filter(hostAndPort -> !hostAndPort.isEmpty()) .map(hostAndPort -> hostAndPort.split(":")[0]) .collect(Collectors.toUnmodifiableList()); } public Mutex lock(TenantName name) { return curator.lock(lockPath(name), defaultLockTimeout.multipliedBy(2)); } public Mutex lock(TenantAndApplicationId id) { return curator.lock(lockPath(id), defaultLockTimeout.multipliedBy(2)); } public Mutex lockForDeployment(ApplicationId id, ZoneId zone) { return curator.lock(lockPath(id, zone), deployLockTimeout); } public Mutex lock(ApplicationId id, JobType type) { return curator.lock(lockPath(id, type), defaultLockTimeout); } public Mutex lock(ApplicationId id, JobType type, Step step) throws TimeoutException { return tryLock(lockPath(id, type, step)); } public Mutex lockRotations() { return curator.lock(lockRoot.append("rotations"), defaultLockTimeout); } public Mutex lockConfidenceOverrides() { return curator.lock(lockRoot.append("confidenceOverrides"), defaultLockTimeout); } public Mutex lockMaintenanceJob(String jobName) { try { return tryLock(lockRoot.append("maintenanceJobLocks").append(jobName)); } catch (TimeoutException e) { throw new UncheckedTimeoutException(e); } } public Mutex lockProvisionState(String provisionStateId) { return curator.lock(lockPath(provisionStateId), Duration.ofSeconds(1)); } public Mutex lockOsVersions() { return curator.lock(lockRoot.append("osTargetVersion"), defaultLockTimeout); } public Mutex lockOsVersionStatus() { return curator.lock(lockRoot.append("osVersionStatus"), defaultLockTimeout); } public Mutex lockRoutingPolicies() { return curator.lock(lockRoot.append("routingPolicies"), defaultLockTimeout); } public Mutex lockAuditLog() { return curator.lock(lockRoot.append("auditLog"), defaultLockTimeout); } public Mutex lockNameServiceQueue() { return curator.lock(lockRoot.append("nameServiceQueue"), defaultLockTimeout); } public Mutex lockMeteringRefreshTime() throws TimeoutException { return tryLock(lockRoot.append("meteringRefreshTime")); } public Mutex lockArchiveBuckets(ZoneId zoneId) { return curator.lock(lockRoot.append("archiveBuckets").append(zoneId.value()), defaultLockTimeout); } public Mutex lockChangeRequests() { return curator.lock(lockRoot.append("changeRequests"), defaultLockTimeout); } public Mutex lockNotifications(TenantName tenantName) { return curator.lock(lockRoot.append("notifications").append(tenantName.value()), defaultLockTimeout); } public Mutex lockSupportAccess(DeploymentId deploymentId) { return curator.lock(lockRoot.append("supportAccess").append(deploymentId.dottedString()), defaultLockTimeout); } public Mutex lockDeploymentRetriggerQueue() { return curator.lock(lockRoot.append("deploymentRetriggerQueue"), defaultLockTimeout); } /** Try locking with a low timeout, meaning it is OK to fail lock acquisition. * * Useful for maintenance jobs, where there is no point in running the jobs back to back. */ private Mutex tryLock(Path path) throws TimeoutException { try { return curator.lock(path, tryLockTimeout); } catch (UncheckedTimeoutException e) { throw new TimeoutException(e.getMessage()); } } private <T> Optional<T> read(Path path, Function<byte[], T> mapper) { return curator.getData(path).filter(data -> data.length > 0).map(mapper); } private Optional<Slime> readSlime(Path path) { return read(path, SlimeUtils::jsonToSlime); } private static byte[] asJson(Slime slime) { try { return SlimeUtils.toJsonBytes(slime); } catch (IOException e) { throw new UncheckedIOException(e); } } public double readUpgradesPerMinute() { return read(upgradesPerMinutePath(), ByteBuffer::wrap).map(ByteBuffer::getDouble).orElse(0.125); } public void writeUpgradesPerMinute(double n) { curator.set(upgradesPerMinutePath(), ByteBuffer.allocate(Double.BYTES).putDouble(n).array()); } public VersionStatus readVersionStatus() { return readSlime(versionStatusPath()).map(versionStatusSerializer::fromSlime).orElseGet(VersionStatus::empty); } public void writeConfidenceOverrides(Map<Version, VespaVersion.Confidence> overrides) { curator.set(confidenceOverridesPath(), asJson(confidenceOverrideSerializer.toSlime(overrides))); } public Map<Version, VespaVersion.Confidence> readConfidenceOverrides() { return readSlime(confidenceOverridesPath()).map(confidenceOverrideSerializer::fromSlime) .orElseGet(Collections::emptyMap); } public void writeControllerVersion(HostName hostname, ControllerVersion version) { curator.set(controllerPath(hostname.value()), asJson(controllerVersionSerializer.toSlime(version))); } public ControllerVersion readControllerVersion(HostName hostname) { return readSlime(controllerPath(hostname.value())) .map(controllerVersionSerializer::fromSlime) .orElse(ControllerVersion.CURRENT); } public void writeOsVersionTargets(Set<OsVersionTarget> versions) { curator.set(osVersionTargetsPath(), asJson(osVersionTargetSerializer.toSlime(versions))); } public Set<OsVersionTarget> readOsVersionTargets() { return readSlime(osVersionTargetsPath()).map(osVersionTargetSerializer::fromSlime).orElseGet(Collections::emptySet); } public void writeOsVersionStatus(OsVersionStatus status) { curator.set(osVersionStatusPath(), asJson(osVersionStatusSerializer.toSlime(status))); } public OsVersionStatus readOsVersionStatus() { return readSlime(osVersionStatusPath()).map(osVersionStatusSerializer::fromSlime).orElse(OsVersionStatus.empty); } public void writeTenant(Tenant tenant) { curator.set(tenantPath(tenant.name()), asJson(tenantSerializer.toSlime(tenant))); } public Optional<Tenant> readTenant(TenantName name) { return readSlime(tenantPath(name)).map(tenantSerializer::tenantFrom); } public List<Tenant> readTenants() { return readTenantNames().stream() .map(this::readTenant) .flatMap(Optional::stream) .collect(collectingAndThen(Collectors.toList(), Collections::unmodifiableList)); } public List<TenantName> readTenantNames() { return curator.getChildren(tenantRoot).stream() .map(TenantName::from) .collect(Collectors.toList()); } public void removeTenant(TenantName name) { curator.delete(tenantPath(name)); } public void writeApplication(Application application) { curator.set(applicationPath(application.id()), asJson(applicationSerializer.toSlime(application))); } public Optional<Application> readApplication(TenantAndApplicationId application) { Path path = applicationPath(application); return curator.getStat(path) .map(stat -> cachedApplications.compute(path, (__, old) -> old != null && old.getFirst() == stat.getVersion() ? old : new Pair<>(stat.getVersion(), read(path, bytes -> applicationSerializer.fromSlime(bytes)).get())).getSecond()); } public List<Application> readApplications(boolean canFail) { return readApplications(ignored -> true, canFail); } public List<Application> readApplications(TenantName name) { return readApplications(application -> application.tenant().equals(name), false); } private List<Application> readApplications(Predicate<TenantAndApplicationId> applicationFilter, boolean canFail) { var applicationIds = readApplicationIds(); var applications = new ArrayList<Application>(applicationIds.size()); for (var id : applicationIds) { if (!applicationFilter.test(id)) continue; try { readApplication(id).ifPresent(applications::add); } catch (Exception e) { if (canFail) { log.log(Level.SEVERE, "Failed to read application '" + id + "', this must be fixed through " + "manual intervention", e); } else { throw e; } } } return Collections.unmodifiableList(applications); } public List<TenantAndApplicationId> readApplicationIds() { return curator.getChildren(applicationRoot).stream() .map(TenantAndApplicationId::fromSerialized) .sorted() .collect(toUnmodifiableList()); } public void removeApplication(TenantAndApplicationId id) { curator.delete(applicationPath(id)); } public void writeLastRun(Run run) { curator.set(lastRunPath(run.id().application(), run.id().type()), asJson(runSerializer.toSlime(run))); } public void writeHistoricRuns(ApplicationId id, JobType type, Iterable<Run> runs) { Path path = runsPath(id, type); curator.set(path, asJson(runSerializer.toSlime(runs))); } public Optional<Run> readLastRun(ApplicationId id, JobType type) { return readSlime(lastRunPath(id, type)).map(runSerializer::runFromSlime); } public NavigableMap<RunId, Run> readHistoricRuns(ApplicationId id, JobType type) { Path path = runsPath(id, type); return curator.getStat(path) .map(stat -> cachedHistoricRuns.compute(path, (__, old) -> old != null && old.getFirst() == stat.getVersion() ? old : new Pair<>(stat.getVersion(), runSerializer.runsFromSlime(readSlime(path).get()))).getSecond()) .orElseGet(Collections::emptyNavigableMap); } public void deleteRunData(ApplicationId id, JobType type) { curator.delete(runsPath(id, type)); curator.delete(lastRunPath(id, type)); } public void deleteRunData(ApplicationId id) { curator.delete(jobRoot.append(id.serializedForm())); } public List<ApplicationId> applicationsWithJobs() { return curator.getChildren(jobRoot).stream() .map(ApplicationId::fromSerializedForm) .collect(Collectors.toList()); } public Optional<byte[]> readLog(ApplicationId id, JobType type, long chunkId) { return curator.getData(logPath(id, type, chunkId)); } public void writeLog(ApplicationId id, JobType type, long chunkId, byte[] log) { curator.set(logPath(id, type, chunkId), log); } public void deleteLog(ApplicationId id, JobType type) { curator.delete(runsPath(id, type).append("logs")); } public Optional<Long> readLastLogEntryId(ApplicationId id, JobType type) { return curator.getData(lastLogPath(id, type)) .map(String::new).map(Long::parseLong); } public void writeLastLogEntryId(ApplicationId id, JobType type, long lastId) { curator.set(lastLogPath(id, type), Long.toString(lastId).getBytes()); } public LongStream getLogChunkIds(ApplicationId id, JobType type) { return curator.getChildren(runsPath(id, type).append("logs")).stream() .mapToLong(Long::parseLong) .sorted(); } public AuditLog readAuditLog() { return readSlime(auditLogPath()).map(auditLogSerializer::fromSlime) .orElse(AuditLog.empty); } public void writeAuditLog(AuditLog log) { curator.set(auditLogPath(), asJson(auditLogSerializer.toSlime(log))); } public NameServiceQueue readNameServiceQueue() { return readSlime(nameServiceQueuePath()).map(nameServiceQueueSerializer::fromSlime) .orElse(NameServiceQueue.EMPTY); } public void writeNameServiceQueue(NameServiceQueue queue) { curator.set(nameServiceQueuePath(), asJson(nameServiceQueueSerializer.toSlime(queue))); } @SuppressWarnings("unused") public Optional<byte[]> readProvisionState(String provisionId) { return curator.getData(provisionStatePath(provisionId)); } @SuppressWarnings("unused") public void writeProvisionState(String provisionId, byte[] data) { curator.set(provisionStatePath(provisionId), data); } @SuppressWarnings("unused") public List<String> readProvisionStateIds() { return curator.getChildren(provisionStatePath()); } public void writeRoutingPolicies(ApplicationId application, List<RoutingPolicy> policies) { for (var policy : policies) { if (!policy.id().owner().equals(application)) { throw new IllegalArgumentException(policy.id() + " does not belong to the application being written: " + application.toShortString()); } } curator.set(routingPolicyPath(application), asJson(routingPolicySerializer.toSlime(policies))); } public Map<ApplicationId, List<RoutingPolicy>> readRoutingPolicies() { return readRoutingPolicies((instance) -> true); } public Map<ApplicationId, List<RoutingPolicy>> readRoutingPolicies(Predicate<ApplicationId> filter) { return curator.getChildren(routingPoliciesRoot).stream() .map(ApplicationId::fromSerializedForm) .filter(filter) .collect(Collectors.toUnmodifiableMap(Function.identity(), this::readRoutingPolicies)); } public List<RoutingPolicy> readRoutingPolicies(ApplicationId application) { return readSlime(routingPolicyPath(application)).map(slime -> routingPolicySerializer.fromSlime(application, slime)) .orElseGet(List::of); } public void writeZoneRoutingPolicy(ZoneRoutingPolicy policy) { curator.set(zoneRoutingPolicyPath(policy.zone()), asJson(zoneRoutingPolicySerializer.toSlime(policy))); } public ZoneRoutingPolicy readZoneRoutingPolicy(ZoneId zone) { return readSlime(zoneRoutingPolicyPath(zone)).map(data -> zoneRoutingPolicySerializer.fromSlime(zone, data)) .orElseGet(() -> new ZoneRoutingPolicy(zone, RoutingStatus.DEFAULT)); } public void writeEndpointCertificateMetadata(ApplicationId applicationId, EndpointCertificateMetadata endpointCertificateMetadata) { curator.set(endpointCertificatePath(applicationId), asJson(EndpointCertificateMetadataSerializer.toSlime(endpointCertificateMetadata))); } public void deleteEndpointCertificateMetadata(ApplicationId applicationId) { curator.delete(endpointCertificatePath(applicationId)); } public Optional<EndpointCertificateMetadata> readEndpointCertificateMetadata(ApplicationId applicationId) { return curator.getData(endpointCertificatePath(applicationId)).map(String::new).map(EndpointCertificateMetadataSerializer::fromJsonString); } public Map<ApplicationId, EndpointCertificateMetadata> readAllEndpointCertificateMetadata() { Map<ApplicationId, EndpointCertificateMetadata> allEndpointCertificateMetadata = new HashMap<>(); for (String appIdString : curator.getChildren(endpointCertificateRoot)) { ApplicationId applicationId = ApplicationId.fromSerializedForm(appIdString); Optional<EndpointCertificateMetadata> endpointCertificateMetadata = readEndpointCertificateMetadata(applicationId); allEndpointCertificateMetadata.put(applicationId, endpointCertificateMetadata.orElseThrow()); } return allEndpointCertificateMetadata; } public void writeMeteringRefreshTime(long timestamp) { curator.set(meteringRefreshPath(), Long.toString(timestamp).getBytes()); } public long readMeteringRefreshTime() { return curator.getData(meteringRefreshPath()) .map(String::new).map(Long::parseLong) .orElse(0L); } public Set<ArchiveBucket> readArchiveBuckets(ZoneId zoneId) { return curator.getData(archiveBucketsPath(zoneId)).map(String::new).map(ArchiveBucketsSerializer::fromJsonString) .orElseGet(Set::of); } public void writeArchiveBuckets(ZoneId zoneid, Set<ArchiveBucket> archiveBuckets) { curator.set(archiveBucketsPath(zoneid), asJson(ArchiveBucketsSerializer.toSlime(archiveBuckets))); } public Optional<VespaChangeRequest> readChangeRequest(String changeRequestId) { return readSlime(changeRequestPath(changeRequestId)).map(ChangeRequestSerializer::fromSlime); } public List<VespaChangeRequest> readChangeRequests() { return curator.getChildren(changeRequestsRoot) .stream() .map(this::readChangeRequest) .flatMap(Optional::stream) .collect(Collectors.toList()); } public void writeChangeRequest(VespaChangeRequest changeRequest) { curator.set(changeRequestPath(changeRequest.getId()), asJson(ChangeRequestSerializer.toSlime(changeRequest))); } public void deleteChangeRequest(VespaChangeRequest changeRequest) { curator.delete(changeRequestPath(changeRequest.getId())); } public List<Notification> readNotifications(TenantName tenantName) { return readSlime(notificationsPath(tenantName)) .map(slime -> notificationsSerializer.fromSlime(tenantName, slime)).orElseGet(List::of); } public List<TenantName> listTenantsWithNotifications() { return curator.getChildren(notificationsRoot).stream() .map(TenantName::from) .collect(Collectors.toUnmodifiableList()); } public void writeNotifications(TenantName tenantName, List<Notification> notifications) { curator.set(notificationsPath(tenantName), asJson(notificationsSerializer.toSlime(notifications))); } public void deleteNotifications(TenantName tenantName) { curator.delete(notificationsPath(tenantName)); } public SupportAccess readSupportAccess(DeploymentId deploymentId) { return readSlime(supportAccessPath(deploymentId)).map(SupportAccessSerializer::fromSlime).orElse(SupportAccess.DISALLOWED_NO_HISTORY); } /** Take lock before reading before writing */ public void writeSupportAccess(DeploymentId deploymentId, SupportAccess supportAccess) { curator.set(supportAccessPath(deploymentId), asJson(SupportAccessSerializer.toSlime(supportAccess))); } public List<RetriggerEntry> readRetriggerEntries() { return readSlime(deploymentRetriggerPath()).map(retriggerEntrySerializer::fromSlime).orElseGet(List::of); } public void writeRetriggerEntries(List<RetriggerEntry> retriggerEntries) { curator.set(deploymentRetriggerPath(), asJson(retriggerEntrySerializer.toSlime(retriggerEntries))); } private Path lockPath(TenantName tenant) { return lockRoot .append(tenant.value()); } private Path lockPath(TenantAndApplicationId application) { return lockRoot.append(application.tenant().value() + ":" + application.application().value()); } private Path lockPath(ApplicationId instance, ZoneId zone) { return lockRoot.append(instance.serializedForm() + ":" + zone.environment().value() + ":" + zone.region().value()); } private Path lockPath(ApplicationId instance, JobType type) { return lockRoot.append(instance.serializedForm() + ":" + type.jobName()); } private Path lockPath(ApplicationId instance, JobType type, Step step) { return lockRoot.append(instance.serializedForm() + ":" + type.jobName() + ":" + step.name()); } private Path lockPath(String provisionId) { return lockRoot .append(provisionStatePath()) .append(provisionId); } private static Path upgradesPerMinutePath() { return root.append("upgrader").append("upgradesPerMinute"); } private static Path confidenceOverridesPath() { return root.append("upgrader").append("confidenceOverrides"); } private static Path osVersionTargetsPath() { return root.append("osUpgrader").append("targetVersion"); } private static Path osVersionStatusPath() { return root.append("osVersionStatus"); } private static Path versionStatusPath() { return root.append("versionStatus"); } private static Path routingPolicyPath(ApplicationId application) { return routingPoliciesRoot.append(application.serializedForm()); } private static Path zoneRoutingPolicyPath(ZoneId zone) { return zoneRoutingPoliciesRoot.append(zone.value()); } private static Path nameServiceQueuePath() { return root.append("nameServiceQueue"); } private static Path auditLogPath() { return root.append("auditLog"); } private static Path provisionStatePath() { return root.append("provisioning").append("states"); } private static Path provisionStatePath(String provisionId) { return provisionStatePath().append(provisionId); } private static Path tenantPath(TenantName name) { return tenantRoot.append(name.value()); } private static Path applicationPath(TenantAndApplicationId id) { return applicationRoot.append(id.serialized()); } private static Path runsPath(ApplicationId id, JobType type) { return jobRoot.append(id.serializedForm()).append(type.jobName()); } private static Path lastRunPath(ApplicationId id, JobType type) { return runsPath(id, type).append("last"); } private static Path logPath(ApplicationId id, JobType type, long first) { return runsPath(id, type).append("logs").append(Long.toString(first)); } private static Path lastLogPath(ApplicationId id, JobType type) { return runsPath(id, type).append("logs"); } private static Path controllerPath(String hostname) { return controllerRoot.append(hostname); } private static Path endpointCertificatePath(ApplicationId id) { return endpointCertificateRoot.append(id.serializedForm()); } private static Path meteringRefreshPath() { return root.append("meteringRefreshTime"); } private static Path archiveBucketsPath(ZoneId zoneId) { return archiveBucketsRoot.append(zoneId.value()); } private static Path changeRequestPath(String id) { return changeRequestsRoot.append(id); } private static Path notificationsPath(TenantName tenantName) { return notificationsRoot.append(tenantName.value()); } private static Path supportAccessPath(DeploymentId deploymentId) { return supportAccessRoot.append(deploymentId.dottedString()); } private static Path deploymentRetriggerPath() { return root.append("deploymentRetriggerQueue"); } }
Typo
public void clearTargetMajor() { curator.delete(root.append("ugprader").append("targetMajorVersion")); }
curator.delete(root.append("ugprader").append("targetMajorVersion"));
public void clearTargetMajor() { curator.delete(root.append("upgrader").append("targetMajorVersion")); }
class CuratorDb { private static final Logger log = Logger.getLogger(CuratorDb.class.getName()); private static final Duration deployLockTimeout = Duration.ofMinutes(30); private static final Duration defaultLockTimeout = Duration.ofMinutes(5); private static final Duration defaultTryLockTimeout = Duration.ofSeconds(1); private static final Path root = Path.fromString("/controller/v1"); private static final Path lockRoot = root.append("locks"); private static final Path tenantRoot = root.append("tenants"); private static final Path applicationRoot = root.append("applications"); private static final Path jobRoot = root.append("jobs"); private static final Path controllerRoot = root.append("controllers"); private static final Path routingPoliciesRoot = root.append("routingPolicies"); private static final Path zoneRoutingPoliciesRoot = root.append("zoneRoutingPolicies"); private static final Path endpointCertificateRoot = root.append("applicationCertificates"); private static final Path archiveBucketsRoot = root.append("archiveBuckets"); private static final Path changeRequestsRoot = root.append("changeRequests"); private static final Path notificationsRoot = root.append("notifications"); private static final Path supportAccessRoot = root.append("supportAccess"); private final NodeVersionSerializer nodeVersionSerializer = new NodeVersionSerializer(); private final VersionStatusSerializer versionStatusSerializer = new VersionStatusSerializer(nodeVersionSerializer); private final ControllerVersionSerializer controllerVersionSerializer = new ControllerVersionSerializer(); private final ConfidenceOverrideSerializer confidenceOverrideSerializer = new ConfidenceOverrideSerializer(); private final TenantSerializer tenantSerializer = new TenantSerializer(); private final OsVersionSerializer osVersionSerializer = new OsVersionSerializer(); private final OsVersionTargetSerializer osVersionTargetSerializer = new OsVersionTargetSerializer(osVersionSerializer); private final OsVersionStatusSerializer osVersionStatusSerializer = new OsVersionStatusSerializer(osVersionSerializer, nodeVersionSerializer); private final RoutingPolicySerializer routingPolicySerializer = new RoutingPolicySerializer(); private final ZoneRoutingPolicySerializer zoneRoutingPolicySerializer = new ZoneRoutingPolicySerializer(routingPolicySerializer); private final AuditLogSerializer auditLogSerializer = new AuditLogSerializer(); private final NameServiceQueueSerializer nameServiceQueueSerializer = new NameServiceQueueSerializer(); private final ApplicationSerializer applicationSerializer = new ApplicationSerializer(); private final RunSerializer runSerializer = new RunSerializer(); private final RetriggerEntrySerializer retriggerEntrySerializer = new RetriggerEntrySerializer(); private final NotificationsSerializer notificationsSerializer = new NotificationsSerializer(); private final Curator curator; private final Duration tryLockTimeout; private final Map<Path, Pair<Integer, Application>> cachedApplications = new ConcurrentHashMap<>(); private final Map<Path, Pair<Integer, NavigableMap<RunId, Run>>> cachedHistoricRuns = new ConcurrentHashMap<>(); @Inject public CuratorDb(Curator curator, ServiceRegistry services) { this(curator, defaultTryLockTimeout, services.zoneRegistry().system()); } CuratorDb(Curator curator, Duration tryLockTimeout, SystemName system) { this.curator = curator; this.tryLockTimeout = tryLockTimeout; } /** Returns all hostnames configured to be part of this ZooKeeper cluster */ public List<String> cluster() { return Arrays.stream(curator.zooKeeperEnsembleConnectionSpec().split(",")) .filter(hostAndPort -> !hostAndPort.isEmpty()) .map(hostAndPort -> hostAndPort.split(":")[0]) .collect(Collectors.toUnmodifiableList()); } public Mutex lock(TenantName name) { return curator.lock(lockPath(name), defaultLockTimeout.multipliedBy(2)); } public Mutex lock(TenantAndApplicationId id) { return curator.lock(lockPath(id), defaultLockTimeout.multipliedBy(2)); } public Mutex lockForDeployment(ApplicationId id, ZoneId zone) { return curator.lock(lockPath(id, zone), deployLockTimeout); } public Mutex lock(ApplicationId id, JobType type) { return curator.lock(lockPath(id, type), defaultLockTimeout); } public Mutex lock(ApplicationId id, JobType type, Step step) throws TimeoutException { return tryLock(lockPath(id, type, step)); } public Mutex lockRotations() { return curator.lock(lockRoot.append("rotations"), defaultLockTimeout); } public Mutex lockConfidenceOverrides() { return curator.lock(lockRoot.append("confidenceOverrides"), defaultLockTimeout); } public Mutex lockMaintenanceJob(String jobName) { try { return tryLock(lockRoot.append("maintenanceJobLocks").append(jobName)); } catch (TimeoutException e) { throw new UncheckedTimeoutException(e); } } public Mutex lockProvisionState(String provisionStateId) { return curator.lock(lockPath(provisionStateId), Duration.ofSeconds(1)); } public Mutex lockOsVersions() { return curator.lock(lockRoot.append("osTargetVersion"), defaultLockTimeout); } public Mutex lockOsVersionStatus() { return curator.lock(lockRoot.append("osVersionStatus"), defaultLockTimeout); } public Mutex lockRoutingPolicies() { return curator.lock(lockRoot.append("routingPolicies"), defaultLockTimeout); } public Mutex lockAuditLog() { return curator.lock(lockRoot.append("auditLog"), defaultLockTimeout); } public Mutex lockNameServiceQueue() { return curator.lock(lockRoot.append("nameServiceQueue"), defaultLockTimeout); } public Mutex lockMeteringRefreshTime() throws TimeoutException { return tryLock(lockRoot.append("meteringRefreshTime")); } public Mutex lockArchiveBuckets(ZoneId zoneId) { return curator.lock(lockRoot.append("archiveBuckets").append(zoneId.value()), defaultLockTimeout); } public Mutex lockChangeRequests() { return curator.lock(lockRoot.append("changeRequests"), defaultLockTimeout); } public Mutex lockNotifications(TenantName tenantName) { return curator.lock(lockRoot.append("notifications").append(tenantName.value()), defaultLockTimeout); } public Mutex lockSupportAccess(DeploymentId deploymentId) { return curator.lock(lockRoot.append("supportAccess").append(deploymentId.dottedString()), defaultLockTimeout); } public Mutex lockDeploymentRetriggerQueue() { return curator.lock(lockRoot.append("deploymentRetriggerQueue"), defaultLockTimeout); } /** Try locking with a low timeout, meaning it is OK to fail lock acquisition. * * Useful for maintenance jobs, where there is no point in running the jobs back to back. */ private Mutex tryLock(Path path) throws TimeoutException { try { return curator.lock(path, tryLockTimeout); } catch (UncheckedTimeoutException e) { throw new TimeoutException(e.getMessage()); } } private <T> Optional<T> read(Path path, Function<byte[], T> mapper) { return curator.getData(path).filter(data -> data.length > 0).map(mapper); } private Optional<Slime> readSlime(Path path) { return read(path, SlimeUtils::jsonToSlime); } private static byte[] asJson(Slime slime) { try { return SlimeUtils.toJsonBytes(slime); } catch (IOException e) { throw new UncheckedIOException(e); } } public double readUpgradesPerMinute() { return read(upgradesPerMinutePath(), ByteBuffer::wrap).map(ByteBuffer::getDouble).orElse(0.125); } public void writeUpgradesPerMinute(double n) { curator.set(upgradesPerMinutePath(), ByteBuffer.allocate(Double.BYTES).putDouble(n).array()); } public void writeVersionStatus(VersionStatus status) { curator.set(versionStatusPath(), asJson(versionStatusSerializer.toSlime(status))); } public VersionStatus readVersionStatus() { return readSlime(versionStatusPath()).map(versionStatusSerializer::fromSlime).orElseGet(VersionStatus::empty); } public void writeConfidenceOverrides(Map<Version, VespaVersion.Confidence> overrides) { curator.set(confidenceOverridesPath(), asJson(confidenceOverrideSerializer.toSlime(overrides))); } public Map<Version, VespaVersion.Confidence> readConfidenceOverrides() { return readSlime(confidenceOverridesPath()).map(confidenceOverrideSerializer::fromSlime) .orElseGet(Collections::emptyMap); } public void writeControllerVersion(HostName hostname, ControllerVersion version) { curator.set(controllerPath(hostname.value()), asJson(controllerVersionSerializer.toSlime(version))); } public ControllerVersion readControllerVersion(HostName hostname) { return readSlime(controllerPath(hostname.value())) .map(controllerVersionSerializer::fromSlime) .orElse(ControllerVersion.CURRENT); } public void writeOsVersionTargets(Set<OsVersionTarget> versions) { curator.set(osVersionTargetsPath(), asJson(osVersionTargetSerializer.toSlime(versions))); } public Set<OsVersionTarget> readOsVersionTargets() { return readSlime(osVersionTargetsPath()).map(osVersionTargetSerializer::fromSlime).orElseGet(Collections::emptySet); } public void writeOsVersionStatus(OsVersionStatus status) { curator.set(osVersionStatusPath(), asJson(osVersionStatusSerializer.toSlime(status))); } public OsVersionStatus readOsVersionStatus() { return readSlime(osVersionStatusPath()).map(osVersionStatusSerializer::fromSlime).orElse(OsVersionStatus.empty); } public void writeTenant(Tenant tenant) { curator.set(tenantPath(tenant.name()), asJson(tenantSerializer.toSlime(tenant))); } public Optional<Tenant> readTenant(TenantName name) { return readSlime(tenantPath(name)).map(tenantSerializer::tenantFrom); } public List<Tenant> readTenants() { return readTenantNames().stream() .map(this::readTenant) .flatMap(Optional::stream) .collect(collectingAndThen(Collectors.toList(), Collections::unmodifiableList)); } public List<TenantName> readTenantNames() { return curator.getChildren(tenantRoot).stream() .map(TenantName::from) .collect(Collectors.toList()); } public void removeTenant(TenantName name) { curator.delete(tenantPath(name)); } public void writeApplication(Application application) { curator.set(applicationPath(application.id()), asJson(applicationSerializer.toSlime(application))); } public Optional<Application> readApplication(TenantAndApplicationId application) { Path path = applicationPath(application); return curator.getStat(path) .map(stat -> cachedApplications.compute(path, (__, old) -> old != null && old.getFirst() == stat.getVersion() ? old : new Pair<>(stat.getVersion(), read(path, bytes -> applicationSerializer.fromSlime(bytes)).get())).getSecond()); } public List<Application> readApplications(boolean canFail) { return readApplications(ignored -> true, canFail); } public List<Application> readApplications(TenantName name) { return readApplications(application -> application.tenant().equals(name), false); } private List<Application> readApplications(Predicate<TenantAndApplicationId> applicationFilter, boolean canFail) { var applicationIds = readApplicationIds(); var applications = new ArrayList<Application>(applicationIds.size()); for (var id : applicationIds) { if (!applicationFilter.test(id)) continue; try { readApplication(id).ifPresent(applications::add); } catch (Exception e) { if (canFail) { log.log(Level.SEVERE, "Failed to read application '" + id + "', this must be fixed through " + "manual intervention", e); } else { throw e; } } } return Collections.unmodifiableList(applications); } public List<TenantAndApplicationId> readApplicationIds() { return curator.getChildren(applicationRoot).stream() .map(TenantAndApplicationId::fromSerialized) .sorted() .collect(toUnmodifiableList()); } public void removeApplication(TenantAndApplicationId id) { curator.delete(applicationPath(id)); } public void writeLastRun(Run run) { curator.set(lastRunPath(run.id().application(), run.id().type()), asJson(runSerializer.toSlime(run))); } public void writeHistoricRuns(ApplicationId id, JobType type, Iterable<Run> runs) { Path path = runsPath(id, type); curator.set(path, asJson(runSerializer.toSlime(runs))); } public Optional<Run> readLastRun(ApplicationId id, JobType type) { return readSlime(lastRunPath(id, type)).map(runSerializer::runFromSlime); } public NavigableMap<RunId, Run> readHistoricRuns(ApplicationId id, JobType type) { Path path = runsPath(id, type); return curator.getStat(path) .map(stat -> cachedHistoricRuns.compute(path, (__, old) -> old != null && old.getFirst() == stat.getVersion() ? old : new Pair<>(stat.getVersion(), runSerializer.runsFromSlime(readSlime(path).get()))).getSecond()) .orElseGet(Collections::emptyNavigableMap); } public void deleteRunData(ApplicationId id, JobType type) { curator.delete(runsPath(id, type)); curator.delete(lastRunPath(id, type)); } public void deleteRunData(ApplicationId id) { curator.delete(jobRoot.append(id.serializedForm())); } public List<ApplicationId> applicationsWithJobs() { return curator.getChildren(jobRoot).stream() .map(ApplicationId::fromSerializedForm) .collect(Collectors.toList()); } public Optional<byte[]> readLog(ApplicationId id, JobType type, long chunkId) { return curator.getData(logPath(id, type, chunkId)); } public void writeLog(ApplicationId id, JobType type, long chunkId, byte[] log) { curator.set(logPath(id, type, chunkId), log); } public void deleteLog(ApplicationId id, JobType type) { curator.delete(runsPath(id, type).append("logs")); } public Optional<Long> readLastLogEntryId(ApplicationId id, JobType type) { return curator.getData(lastLogPath(id, type)) .map(String::new).map(Long::parseLong); } public void writeLastLogEntryId(ApplicationId id, JobType type, long lastId) { curator.set(lastLogPath(id, type), Long.toString(lastId).getBytes()); } public LongStream getLogChunkIds(ApplicationId id, JobType type) { return curator.getChildren(runsPath(id, type).append("logs")).stream() .mapToLong(Long::parseLong) .sorted(); } public AuditLog readAuditLog() { return readSlime(auditLogPath()).map(auditLogSerializer::fromSlime) .orElse(AuditLog.empty); } public void writeAuditLog(AuditLog log) { curator.set(auditLogPath(), asJson(auditLogSerializer.toSlime(log))); } public NameServiceQueue readNameServiceQueue() { return readSlime(nameServiceQueuePath()).map(nameServiceQueueSerializer::fromSlime) .orElse(NameServiceQueue.EMPTY); } public void writeNameServiceQueue(NameServiceQueue queue) { curator.set(nameServiceQueuePath(), asJson(nameServiceQueueSerializer.toSlime(queue))); } @SuppressWarnings("unused") public Optional<byte[]> readProvisionState(String provisionId) { return curator.getData(provisionStatePath(provisionId)); } @SuppressWarnings("unused") public void writeProvisionState(String provisionId, byte[] data) { curator.set(provisionStatePath(provisionId), data); } @SuppressWarnings("unused") public List<String> readProvisionStateIds() { return curator.getChildren(provisionStatePath()); } public void writeRoutingPolicies(ApplicationId application, List<RoutingPolicy> policies) { for (var policy : policies) { if (!policy.id().owner().equals(application)) { throw new IllegalArgumentException(policy.id() + " does not belong to the application being written: " + application.toShortString()); } } curator.set(routingPolicyPath(application), asJson(routingPolicySerializer.toSlime(policies))); } public Map<ApplicationId, List<RoutingPolicy>> readRoutingPolicies() { return readRoutingPolicies((instance) -> true); } public Map<ApplicationId, List<RoutingPolicy>> readRoutingPolicies(Predicate<ApplicationId> filter) { return curator.getChildren(routingPoliciesRoot).stream() .map(ApplicationId::fromSerializedForm) .filter(filter) .collect(Collectors.toUnmodifiableMap(Function.identity(), this::readRoutingPolicies)); } public List<RoutingPolicy> readRoutingPolicies(ApplicationId application) { return readSlime(routingPolicyPath(application)).map(slime -> routingPolicySerializer.fromSlime(application, slime)) .orElseGet(List::of); } public void writeZoneRoutingPolicy(ZoneRoutingPolicy policy) { curator.set(zoneRoutingPolicyPath(policy.zone()), asJson(zoneRoutingPolicySerializer.toSlime(policy))); } public ZoneRoutingPolicy readZoneRoutingPolicy(ZoneId zone) { return readSlime(zoneRoutingPolicyPath(zone)).map(data -> zoneRoutingPolicySerializer.fromSlime(zone, data)) .orElseGet(() -> new ZoneRoutingPolicy(zone, RoutingStatus.DEFAULT)); } public void writeEndpointCertificateMetadata(ApplicationId applicationId, EndpointCertificateMetadata endpointCertificateMetadata) { curator.set(endpointCertificatePath(applicationId), asJson(EndpointCertificateMetadataSerializer.toSlime(endpointCertificateMetadata))); } public void deleteEndpointCertificateMetadata(ApplicationId applicationId) { curator.delete(endpointCertificatePath(applicationId)); } public Optional<EndpointCertificateMetadata> readEndpointCertificateMetadata(ApplicationId applicationId) { return curator.getData(endpointCertificatePath(applicationId)).map(String::new).map(EndpointCertificateMetadataSerializer::fromJsonString); } public Map<ApplicationId, EndpointCertificateMetadata> readAllEndpointCertificateMetadata() { Map<ApplicationId, EndpointCertificateMetadata> allEndpointCertificateMetadata = new HashMap<>(); for (String appIdString : curator.getChildren(endpointCertificateRoot)) { ApplicationId applicationId = ApplicationId.fromSerializedForm(appIdString); Optional<EndpointCertificateMetadata> endpointCertificateMetadata = readEndpointCertificateMetadata(applicationId); allEndpointCertificateMetadata.put(applicationId, endpointCertificateMetadata.orElseThrow()); } return allEndpointCertificateMetadata; } public void writeMeteringRefreshTime(long timestamp) { curator.set(meteringRefreshPath(), Long.toString(timestamp).getBytes()); } public long readMeteringRefreshTime() { return curator.getData(meteringRefreshPath()) .map(String::new).map(Long::parseLong) .orElse(0L); } public Set<ArchiveBucket> readArchiveBuckets(ZoneId zoneId) { return curator.getData(archiveBucketsPath(zoneId)).map(String::new).map(ArchiveBucketsSerializer::fromJsonString) .orElseGet(Set::of); } public void writeArchiveBuckets(ZoneId zoneid, Set<ArchiveBucket> archiveBuckets) { curator.set(archiveBucketsPath(zoneid), asJson(ArchiveBucketsSerializer.toSlime(archiveBuckets))); } public Optional<VespaChangeRequest> readChangeRequest(String changeRequestId) { return readSlime(changeRequestPath(changeRequestId)).map(ChangeRequestSerializer::fromSlime); } public List<VespaChangeRequest> readChangeRequests() { return curator.getChildren(changeRequestsRoot) .stream() .map(this::readChangeRequest) .flatMap(Optional::stream) .collect(Collectors.toList()); } public void writeChangeRequest(VespaChangeRequest changeRequest) { curator.set(changeRequestPath(changeRequest.getId()), asJson(ChangeRequestSerializer.toSlime(changeRequest))); } public void deleteChangeRequest(VespaChangeRequest changeRequest) { curator.delete(changeRequestPath(changeRequest.getId())); } public List<Notification> readNotifications(TenantName tenantName) { return readSlime(notificationsPath(tenantName)) .map(slime -> notificationsSerializer.fromSlime(tenantName, slime)).orElseGet(List::of); } public List<TenantName> listTenantsWithNotifications() { return curator.getChildren(notificationsRoot).stream() .map(TenantName::from) .collect(Collectors.toUnmodifiableList()); } public void writeNotifications(TenantName tenantName, List<Notification> notifications) { curator.set(notificationsPath(tenantName), asJson(notificationsSerializer.toSlime(notifications))); } public void deleteNotifications(TenantName tenantName) { curator.delete(notificationsPath(tenantName)); } public SupportAccess readSupportAccess(DeploymentId deploymentId) { return readSlime(supportAccessPath(deploymentId)).map(SupportAccessSerializer::fromSlime).orElse(SupportAccess.DISALLOWED_NO_HISTORY); } /** Take lock before reading before writing */ public void writeSupportAccess(DeploymentId deploymentId, SupportAccess supportAccess) { curator.set(supportAccessPath(deploymentId), asJson(SupportAccessSerializer.toSlime(supportAccess))); } public List<RetriggerEntry> readRetriggerEntries() { return readSlime(deploymentRetriggerPath()).map(retriggerEntrySerializer::fromSlime).orElseGet(List::of); } public void writeRetriggerEntries(List<RetriggerEntry> retriggerEntries) { curator.set(deploymentRetriggerPath(), asJson(retriggerEntrySerializer.toSlime(retriggerEntries))); } private Path lockPath(TenantName tenant) { return lockRoot .append(tenant.value()); } private Path lockPath(TenantAndApplicationId application) { return lockRoot.append(application.tenant().value() + ":" + application.application().value()); } private Path lockPath(ApplicationId instance, ZoneId zone) { return lockRoot.append(instance.serializedForm() + ":" + zone.environment().value() + ":" + zone.region().value()); } private Path lockPath(ApplicationId instance, JobType type) { return lockRoot.append(instance.serializedForm() + ":" + type.jobName()); } private Path lockPath(ApplicationId instance, JobType type, Step step) { return lockRoot.append(instance.serializedForm() + ":" + type.jobName() + ":" + step.name()); } private Path lockPath(String provisionId) { return lockRoot .append(provisionStatePath()) .append(provisionId); } private static Path upgradesPerMinutePath() { return root.append("upgrader").append("upgradesPerMinute"); } private static Path confidenceOverridesPath() { return root.append("upgrader").append("confidenceOverrides"); } private static Path osVersionTargetsPath() { return root.append("osUpgrader").append("targetVersion"); } private static Path osVersionStatusPath() { return root.append("osVersionStatus"); } private static Path versionStatusPath() { return root.append("versionStatus"); } private static Path routingPolicyPath(ApplicationId application) { return routingPoliciesRoot.append(application.serializedForm()); } private static Path zoneRoutingPolicyPath(ZoneId zone) { return zoneRoutingPoliciesRoot.append(zone.value()); } private static Path nameServiceQueuePath() { return root.append("nameServiceQueue"); } private static Path auditLogPath() { return root.append("auditLog"); } private static Path provisionStatePath() { return root.append("provisioning").append("states"); } private static Path provisionStatePath(String provisionId) { return provisionStatePath().append(provisionId); } private static Path tenantPath(TenantName name) { return tenantRoot.append(name.value()); } private static Path applicationPath(TenantAndApplicationId id) { return applicationRoot.append(id.serialized()); } private static Path runsPath(ApplicationId id, JobType type) { return jobRoot.append(id.serializedForm()).append(type.jobName()); } private static Path lastRunPath(ApplicationId id, JobType type) { return runsPath(id, type).append("last"); } private static Path logPath(ApplicationId id, JobType type, long first) { return runsPath(id, type).append("logs").append(Long.toString(first)); } private static Path lastLogPath(ApplicationId id, JobType type) { return runsPath(id, type).append("logs"); } private static Path controllerPath(String hostname) { return controllerRoot.append(hostname); } private static Path endpointCertificatePath(ApplicationId id) { return endpointCertificateRoot.append(id.serializedForm()); } private static Path meteringRefreshPath() { return root.append("meteringRefreshTime"); } private static Path archiveBucketsPath(ZoneId zoneId) { return archiveBucketsRoot.append(zoneId.value()); } private static Path changeRequestPath(String id) { return changeRequestsRoot.append(id); } private static Path notificationsPath(TenantName tenantName) { return notificationsRoot.append(tenantName.value()); } private static Path supportAccessPath(DeploymentId deploymentId) { return supportAccessRoot.append(deploymentId.dottedString()); } private static Path deploymentRetriggerPath() { return root.append("deploymentRetriggerQueue"); } }
class CuratorDb { private static final Logger log = Logger.getLogger(CuratorDb.class.getName()); private static final Duration deployLockTimeout = Duration.ofMinutes(30); private static final Duration defaultLockTimeout = Duration.ofMinutes(5); private static final Duration defaultTryLockTimeout = Duration.ofSeconds(1); private static final Path root = Path.fromString("/controller/v1"); private static final Path lockRoot = root.append("locks"); private static final Path tenantRoot = root.append("tenants"); private static final Path applicationRoot = root.append("applications"); private static final Path jobRoot = root.append("jobs"); private static final Path controllerRoot = root.append("controllers"); private static final Path routingPoliciesRoot = root.append("routingPolicies"); private static final Path zoneRoutingPoliciesRoot = root.append("zoneRoutingPolicies"); private static final Path endpointCertificateRoot = root.append("applicationCertificates"); private static final Path archiveBucketsRoot = root.append("archiveBuckets"); private static final Path changeRequestsRoot = root.append("changeRequests"); private static final Path notificationsRoot = root.append("notifications"); private static final Path supportAccessRoot = root.append("supportAccess"); private final NodeVersionSerializer nodeVersionSerializer = new NodeVersionSerializer(); private final VersionStatusSerializer versionStatusSerializer = new VersionStatusSerializer(nodeVersionSerializer); private final ControllerVersionSerializer controllerVersionSerializer = new ControllerVersionSerializer(); private final ConfidenceOverrideSerializer confidenceOverrideSerializer = new ConfidenceOverrideSerializer(); private final TenantSerializer tenantSerializer = new TenantSerializer(); private final OsVersionSerializer osVersionSerializer = new OsVersionSerializer(); private final OsVersionTargetSerializer osVersionTargetSerializer = new OsVersionTargetSerializer(osVersionSerializer); private final OsVersionStatusSerializer osVersionStatusSerializer = new OsVersionStatusSerializer(osVersionSerializer, nodeVersionSerializer); private final RoutingPolicySerializer routingPolicySerializer = new RoutingPolicySerializer(); private final ZoneRoutingPolicySerializer zoneRoutingPolicySerializer = new ZoneRoutingPolicySerializer(routingPolicySerializer); private final AuditLogSerializer auditLogSerializer = new AuditLogSerializer(); private final NameServiceQueueSerializer nameServiceQueueSerializer = new NameServiceQueueSerializer(); private final ApplicationSerializer applicationSerializer = new ApplicationSerializer(); private final RunSerializer runSerializer = new RunSerializer(); private final RetriggerEntrySerializer retriggerEntrySerializer = new RetriggerEntrySerializer(); private final NotificationsSerializer notificationsSerializer = new NotificationsSerializer(); private final Curator curator; private final Duration tryLockTimeout; private final Map<Path, Pair<Integer, Application>> cachedApplications = new ConcurrentHashMap<>(); private final Map<Path, Pair<Integer, NavigableMap<RunId, Run>>> cachedHistoricRuns = new ConcurrentHashMap<>(); @Inject public CuratorDb(Curator curator, ServiceRegistry services) { this(curator, defaultTryLockTimeout, services.zoneRegistry().system()); } CuratorDb(Curator curator, Duration tryLockTimeout, SystemName system) { this.curator = curator; this.tryLockTimeout = tryLockTimeout; } /** Returns all hostnames configured to be part of this ZooKeeper cluster */ public List<String> cluster() { return Arrays.stream(curator.zooKeeperEnsembleConnectionSpec().split(",")) .filter(hostAndPort -> !hostAndPort.isEmpty()) .map(hostAndPort -> hostAndPort.split(":")[0]) .collect(Collectors.toUnmodifiableList()); } public Mutex lock(TenantName name) { return curator.lock(lockPath(name), defaultLockTimeout.multipliedBy(2)); } public Mutex lock(TenantAndApplicationId id) { return curator.lock(lockPath(id), defaultLockTimeout.multipliedBy(2)); } public Mutex lockForDeployment(ApplicationId id, ZoneId zone) { return curator.lock(lockPath(id, zone), deployLockTimeout); } public Mutex lock(ApplicationId id, JobType type) { return curator.lock(lockPath(id, type), defaultLockTimeout); } public Mutex lock(ApplicationId id, JobType type, Step step) throws TimeoutException { return tryLock(lockPath(id, type, step)); } public Mutex lockRotations() { return curator.lock(lockRoot.append("rotations"), defaultLockTimeout); } public Mutex lockConfidenceOverrides() { return curator.lock(lockRoot.append("confidenceOverrides"), defaultLockTimeout); } public Mutex lockMaintenanceJob(String jobName) { try { return tryLock(lockRoot.append("maintenanceJobLocks").append(jobName)); } catch (TimeoutException e) { throw new UncheckedTimeoutException(e); } } public Mutex lockProvisionState(String provisionStateId) { return curator.lock(lockPath(provisionStateId), Duration.ofSeconds(1)); } public Mutex lockOsVersions() { return curator.lock(lockRoot.append("osTargetVersion"), defaultLockTimeout); } public Mutex lockOsVersionStatus() { return curator.lock(lockRoot.append("osVersionStatus"), defaultLockTimeout); } public Mutex lockRoutingPolicies() { return curator.lock(lockRoot.append("routingPolicies"), defaultLockTimeout); } public Mutex lockAuditLog() { return curator.lock(lockRoot.append("auditLog"), defaultLockTimeout); } public Mutex lockNameServiceQueue() { return curator.lock(lockRoot.append("nameServiceQueue"), defaultLockTimeout); } public Mutex lockMeteringRefreshTime() throws TimeoutException { return tryLock(lockRoot.append("meteringRefreshTime")); } public Mutex lockArchiveBuckets(ZoneId zoneId) { return curator.lock(lockRoot.append("archiveBuckets").append(zoneId.value()), defaultLockTimeout); } public Mutex lockChangeRequests() { return curator.lock(lockRoot.append("changeRequests"), defaultLockTimeout); } public Mutex lockNotifications(TenantName tenantName) { return curator.lock(lockRoot.append("notifications").append(tenantName.value()), defaultLockTimeout); } public Mutex lockSupportAccess(DeploymentId deploymentId) { return curator.lock(lockRoot.append("supportAccess").append(deploymentId.dottedString()), defaultLockTimeout); } public Mutex lockDeploymentRetriggerQueue() { return curator.lock(lockRoot.append("deploymentRetriggerQueue"), defaultLockTimeout); } /** Try locking with a low timeout, meaning it is OK to fail lock acquisition. * * Useful for maintenance jobs, where there is no point in running the jobs back to back. */ private Mutex tryLock(Path path) throws TimeoutException { try { return curator.lock(path, tryLockTimeout); } catch (UncheckedTimeoutException e) { throw new TimeoutException(e.getMessage()); } } private <T> Optional<T> read(Path path, Function<byte[], T> mapper) { return curator.getData(path).filter(data -> data.length > 0).map(mapper); } private Optional<Slime> readSlime(Path path) { return read(path, SlimeUtils::jsonToSlime); } private static byte[] asJson(Slime slime) { try { return SlimeUtils.toJsonBytes(slime); } catch (IOException e) { throw new UncheckedIOException(e); } } public double readUpgradesPerMinute() { return read(upgradesPerMinutePath(), ByteBuffer::wrap).map(ByteBuffer::getDouble).orElse(0.125); } public void writeUpgradesPerMinute(double n) { curator.set(upgradesPerMinutePath(), ByteBuffer.allocate(Double.BYTES).putDouble(n).array()); } public void writeVersionStatus(VersionStatus status) { curator.set(versionStatusPath(), asJson(versionStatusSerializer.toSlime(status))); } public VersionStatus readVersionStatus() { return readSlime(versionStatusPath()).map(versionStatusSerializer::fromSlime).orElseGet(VersionStatus::empty); } public void writeConfidenceOverrides(Map<Version, VespaVersion.Confidence> overrides) { curator.set(confidenceOverridesPath(), asJson(confidenceOverrideSerializer.toSlime(overrides))); } public Map<Version, VespaVersion.Confidence> readConfidenceOverrides() { return readSlime(confidenceOverridesPath()).map(confidenceOverrideSerializer::fromSlime) .orElseGet(Collections::emptyMap); } public void writeControllerVersion(HostName hostname, ControllerVersion version) { curator.set(controllerPath(hostname.value()), asJson(controllerVersionSerializer.toSlime(version))); } public ControllerVersion readControllerVersion(HostName hostname) { return readSlime(controllerPath(hostname.value())) .map(controllerVersionSerializer::fromSlime) .orElse(ControllerVersion.CURRENT); } public void writeOsVersionTargets(Set<OsVersionTarget> versions) { curator.set(osVersionTargetsPath(), asJson(osVersionTargetSerializer.toSlime(versions))); } public Set<OsVersionTarget> readOsVersionTargets() { return readSlime(osVersionTargetsPath()).map(osVersionTargetSerializer::fromSlime).orElseGet(Collections::emptySet); } public void writeOsVersionStatus(OsVersionStatus status) { curator.set(osVersionStatusPath(), asJson(osVersionStatusSerializer.toSlime(status))); } public OsVersionStatus readOsVersionStatus() { return readSlime(osVersionStatusPath()).map(osVersionStatusSerializer::fromSlime).orElse(OsVersionStatus.empty); } public void writeTenant(Tenant tenant) { curator.set(tenantPath(tenant.name()), asJson(tenantSerializer.toSlime(tenant))); } public Optional<Tenant> readTenant(TenantName name) { return readSlime(tenantPath(name)).map(tenantSerializer::tenantFrom); } public List<Tenant> readTenants() { return readTenantNames().stream() .map(this::readTenant) .flatMap(Optional::stream) .collect(collectingAndThen(Collectors.toList(), Collections::unmodifiableList)); } public List<TenantName> readTenantNames() { return curator.getChildren(tenantRoot).stream() .map(TenantName::from) .collect(Collectors.toList()); } public void removeTenant(TenantName name) { curator.delete(tenantPath(name)); } public void writeApplication(Application application) { curator.set(applicationPath(application.id()), asJson(applicationSerializer.toSlime(application))); } public Optional<Application> readApplication(TenantAndApplicationId application) { Path path = applicationPath(application); return curator.getStat(path) .map(stat -> cachedApplications.compute(path, (__, old) -> old != null && old.getFirst() == stat.getVersion() ? old : new Pair<>(stat.getVersion(), read(path, bytes -> applicationSerializer.fromSlime(bytes)).get())).getSecond()); } public List<Application> readApplications(boolean canFail) { return readApplications(ignored -> true, canFail); } public List<Application> readApplications(TenantName name) { return readApplications(application -> application.tenant().equals(name), false); } private List<Application> readApplications(Predicate<TenantAndApplicationId> applicationFilter, boolean canFail) { var applicationIds = readApplicationIds(); var applications = new ArrayList<Application>(applicationIds.size()); for (var id : applicationIds) { if (!applicationFilter.test(id)) continue; try { readApplication(id).ifPresent(applications::add); } catch (Exception e) { if (canFail) { log.log(Level.SEVERE, "Failed to read application '" + id + "', this must be fixed through " + "manual intervention", e); } else { throw e; } } } return Collections.unmodifiableList(applications); } public List<TenantAndApplicationId> readApplicationIds() { return curator.getChildren(applicationRoot).stream() .map(TenantAndApplicationId::fromSerialized) .sorted() .collect(toUnmodifiableList()); } public void removeApplication(TenantAndApplicationId id) { curator.delete(applicationPath(id)); } public void writeLastRun(Run run) { curator.set(lastRunPath(run.id().application(), run.id().type()), asJson(runSerializer.toSlime(run))); } public void writeHistoricRuns(ApplicationId id, JobType type, Iterable<Run> runs) { Path path = runsPath(id, type); curator.set(path, asJson(runSerializer.toSlime(runs))); } public Optional<Run> readLastRun(ApplicationId id, JobType type) { return readSlime(lastRunPath(id, type)).map(runSerializer::runFromSlime); } public NavigableMap<RunId, Run> readHistoricRuns(ApplicationId id, JobType type) { Path path = runsPath(id, type); return curator.getStat(path) .map(stat -> cachedHistoricRuns.compute(path, (__, old) -> old != null && old.getFirst() == stat.getVersion() ? old : new Pair<>(stat.getVersion(), runSerializer.runsFromSlime(readSlime(path).get()))).getSecond()) .orElseGet(Collections::emptyNavigableMap); } public void deleteRunData(ApplicationId id, JobType type) { curator.delete(runsPath(id, type)); curator.delete(lastRunPath(id, type)); } public void deleteRunData(ApplicationId id) { curator.delete(jobRoot.append(id.serializedForm())); } public List<ApplicationId> applicationsWithJobs() { return curator.getChildren(jobRoot).stream() .map(ApplicationId::fromSerializedForm) .collect(Collectors.toList()); } public Optional<byte[]> readLog(ApplicationId id, JobType type, long chunkId) { return curator.getData(logPath(id, type, chunkId)); } public void writeLog(ApplicationId id, JobType type, long chunkId, byte[] log) { curator.set(logPath(id, type, chunkId), log); } public void deleteLog(ApplicationId id, JobType type) { curator.delete(runsPath(id, type).append("logs")); } public Optional<Long> readLastLogEntryId(ApplicationId id, JobType type) { return curator.getData(lastLogPath(id, type)) .map(String::new).map(Long::parseLong); } public void writeLastLogEntryId(ApplicationId id, JobType type, long lastId) { curator.set(lastLogPath(id, type), Long.toString(lastId).getBytes()); } public LongStream getLogChunkIds(ApplicationId id, JobType type) { return curator.getChildren(runsPath(id, type).append("logs")).stream() .mapToLong(Long::parseLong) .sorted(); } public AuditLog readAuditLog() { return readSlime(auditLogPath()).map(auditLogSerializer::fromSlime) .orElse(AuditLog.empty); } public void writeAuditLog(AuditLog log) { curator.set(auditLogPath(), asJson(auditLogSerializer.toSlime(log))); } public NameServiceQueue readNameServiceQueue() { return readSlime(nameServiceQueuePath()).map(nameServiceQueueSerializer::fromSlime) .orElse(NameServiceQueue.EMPTY); } public void writeNameServiceQueue(NameServiceQueue queue) { curator.set(nameServiceQueuePath(), asJson(nameServiceQueueSerializer.toSlime(queue))); } @SuppressWarnings("unused") public Optional<byte[]> readProvisionState(String provisionId) { return curator.getData(provisionStatePath(provisionId)); } @SuppressWarnings("unused") public void writeProvisionState(String provisionId, byte[] data) { curator.set(provisionStatePath(provisionId), data); } @SuppressWarnings("unused") public List<String> readProvisionStateIds() { return curator.getChildren(provisionStatePath()); } public void writeRoutingPolicies(ApplicationId application, List<RoutingPolicy> policies) { for (var policy : policies) { if (!policy.id().owner().equals(application)) { throw new IllegalArgumentException(policy.id() + " does not belong to the application being written: " + application.toShortString()); } } curator.set(routingPolicyPath(application), asJson(routingPolicySerializer.toSlime(policies))); } public Map<ApplicationId, List<RoutingPolicy>> readRoutingPolicies() { return readRoutingPolicies((instance) -> true); } public Map<ApplicationId, List<RoutingPolicy>> readRoutingPolicies(Predicate<ApplicationId> filter) { return curator.getChildren(routingPoliciesRoot).stream() .map(ApplicationId::fromSerializedForm) .filter(filter) .collect(Collectors.toUnmodifiableMap(Function.identity(), this::readRoutingPolicies)); } public List<RoutingPolicy> readRoutingPolicies(ApplicationId application) { return readSlime(routingPolicyPath(application)).map(slime -> routingPolicySerializer.fromSlime(application, slime)) .orElseGet(List::of); } public void writeZoneRoutingPolicy(ZoneRoutingPolicy policy) { curator.set(zoneRoutingPolicyPath(policy.zone()), asJson(zoneRoutingPolicySerializer.toSlime(policy))); } public ZoneRoutingPolicy readZoneRoutingPolicy(ZoneId zone) { return readSlime(zoneRoutingPolicyPath(zone)).map(data -> zoneRoutingPolicySerializer.fromSlime(zone, data)) .orElseGet(() -> new ZoneRoutingPolicy(zone, RoutingStatus.DEFAULT)); } public void writeEndpointCertificateMetadata(ApplicationId applicationId, EndpointCertificateMetadata endpointCertificateMetadata) { curator.set(endpointCertificatePath(applicationId), asJson(EndpointCertificateMetadataSerializer.toSlime(endpointCertificateMetadata))); } public void deleteEndpointCertificateMetadata(ApplicationId applicationId) { curator.delete(endpointCertificatePath(applicationId)); } public Optional<EndpointCertificateMetadata> readEndpointCertificateMetadata(ApplicationId applicationId) { return curator.getData(endpointCertificatePath(applicationId)).map(String::new).map(EndpointCertificateMetadataSerializer::fromJsonString); } public Map<ApplicationId, EndpointCertificateMetadata> readAllEndpointCertificateMetadata() { Map<ApplicationId, EndpointCertificateMetadata> allEndpointCertificateMetadata = new HashMap<>(); for (String appIdString : curator.getChildren(endpointCertificateRoot)) { ApplicationId applicationId = ApplicationId.fromSerializedForm(appIdString); Optional<EndpointCertificateMetadata> endpointCertificateMetadata = readEndpointCertificateMetadata(applicationId); allEndpointCertificateMetadata.put(applicationId, endpointCertificateMetadata.orElseThrow()); } return allEndpointCertificateMetadata; } public void writeMeteringRefreshTime(long timestamp) { curator.set(meteringRefreshPath(), Long.toString(timestamp).getBytes()); } public long readMeteringRefreshTime() { return curator.getData(meteringRefreshPath()) .map(String::new).map(Long::parseLong) .orElse(0L); } public Set<ArchiveBucket> readArchiveBuckets(ZoneId zoneId) { return curator.getData(archiveBucketsPath(zoneId)).map(String::new).map(ArchiveBucketsSerializer::fromJsonString) .orElseGet(Set::of); } public void writeArchiveBuckets(ZoneId zoneid, Set<ArchiveBucket> archiveBuckets) { curator.set(archiveBucketsPath(zoneid), asJson(ArchiveBucketsSerializer.toSlime(archiveBuckets))); } public Optional<VespaChangeRequest> readChangeRequest(String changeRequestId) { return readSlime(changeRequestPath(changeRequestId)).map(ChangeRequestSerializer::fromSlime); } public List<VespaChangeRequest> readChangeRequests() { return curator.getChildren(changeRequestsRoot) .stream() .map(this::readChangeRequest) .flatMap(Optional::stream) .collect(Collectors.toList()); } public void writeChangeRequest(VespaChangeRequest changeRequest) { curator.set(changeRequestPath(changeRequest.getId()), asJson(ChangeRequestSerializer.toSlime(changeRequest))); } public void deleteChangeRequest(VespaChangeRequest changeRequest) { curator.delete(changeRequestPath(changeRequest.getId())); } public List<Notification> readNotifications(TenantName tenantName) { return readSlime(notificationsPath(tenantName)) .map(slime -> notificationsSerializer.fromSlime(tenantName, slime)).orElseGet(List::of); } public List<TenantName> listTenantsWithNotifications() { return curator.getChildren(notificationsRoot).stream() .map(TenantName::from) .collect(Collectors.toUnmodifiableList()); } public void writeNotifications(TenantName tenantName, List<Notification> notifications) { curator.set(notificationsPath(tenantName), asJson(notificationsSerializer.toSlime(notifications))); } public void deleteNotifications(TenantName tenantName) { curator.delete(notificationsPath(tenantName)); } public SupportAccess readSupportAccess(DeploymentId deploymentId) { return readSlime(supportAccessPath(deploymentId)).map(SupportAccessSerializer::fromSlime).orElse(SupportAccess.DISALLOWED_NO_HISTORY); } /** Take lock before reading before writing */ public void writeSupportAccess(DeploymentId deploymentId, SupportAccess supportAccess) { curator.set(supportAccessPath(deploymentId), asJson(SupportAccessSerializer.toSlime(supportAccess))); } public List<RetriggerEntry> readRetriggerEntries() { return readSlime(deploymentRetriggerPath()).map(retriggerEntrySerializer::fromSlime).orElseGet(List::of); } public void writeRetriggerEntries(List<RetriggerEntry> retriggerEntries) { curator.set(deploymentRetriggerPath(), asJson(retriggerEntrySerializer.toSlime(retriggerEntries))); } private Path lockPath(TenantName tenant) { return lockRoot .append(tenant.value()); } private Path lockPath(TenantAndApplicationId application) { return lockRoot.append(application.tenant().value() + ":" + application.application().value()); } private Path lockPath(ApplicationId instance, ZoneId zone) { return lockRoot.append(instance.serializedForm() + ":" + zone.environment().value() + ":" + zone.region().value()); } private Path lockPath(ApplicationId instance, JobType type) { return lockRoot.append(instance.serializedForm() + ":" + type.jobName()); } private Path lockPath(ApplicationId instance, JobType type, Step step) { return lockRoot.append(instance.serializedForm() + ":" + type.jobName() + ":" + step.name()); } private Path lockPath(String provisionId) { return lockRoot .append(provisionStatePath()) .append(provisionId); } private static Path upgradesPerMinutePath() { return root.append("upgrader").append("upgradesPerMinute"); } private static Path confidenceOverridesPath() { return root.append("upgrader").append("confidenceOverrides"); } private static Path osVersionTargetsPath() { return root.append("osUpgrader").append("targetVersion"); } private static Path osVersionStatusPath() { return root.append("osVersionStatus"); } private static Path versionStatusPath() { return root.append("versionStatus"); } private static Path routingPolicyPath(ApplicationId application) { return routingPoliciesRoot.append(application.serializedForm()); } private static Path zoneRoutingPolicyPath(ZoneId zone) { return zoneRoutingPoliciesRoot.append(zone.value()); } private static Path nameServiceQueuePath() { return root.append("nameServiceQueue"); } private static Path auditLogPath() { return root.append("auditLog"); } private static Path provisionStatePath() { return root.append("provisioning").append("states"); } private static Path provisionStatePath(String provisionId) { return provisionStatePath().append(provisionId); } private static Path tenantPath(TenantName name) { return tenantRoot.append(name.value()); } private static Path applicationPath(TenantAndApplicationId id) { return applicationRoot.append(id.serialized()); } private static Path runsPath(ApplicationId id, JobType type) { return jobRoot.append(id.serializedForm()).append(type.jobName()); } private static Path lastRunPath(ApplicationId id, JobType type) { return runsPath(id, type).append("last"); } private static Path logPath(ApplicationId id, JobType type, long first) { return runsPath(id, type).append("logs").append(Long.toString(first)); } private static Path lastLogPath(ApplicationId id, JobType type) { return runsPath(id, type).append("logs"); } private static Path controllerPath(String hostname) { return controllerRoot.append(hostname); } private static Path endpointCertificatePath(ApplicationId id) { return endpointCertificateRoot.append(id.serializedForm()); } private static Path meteringRefreshPath() { return root.append("meteringRefreshTime"); } private static Path archiveBucketsPath(ZoneId zoneId) { return archiveBucketsRoot.append(zoneId.value()); } private static Path changeRequestPath(String id) { return changeRequestsRoot.append(id); } private static Path notificationsPath(TenantName tenantName) { return notificationsRoot.append(tenantName.value()); } private static Path supportAccessPath(DeploymentId deploymentId) { return supportAccessRoot.append(deploymentId.dottedString()); } private static Path deploymentRetriggerPath() { return root.append("deploymentRetriggerQueue"); } }
```suggestion curator.delete(root.append("upgrader").append("targetMajorVersion")); ``` (((\(✘෴✘)/)))
public void clearTargetMajor() { curator.delete(root.append("ugprader").append("targetMajorVersion")); }
curator.delete(root.append("ugprader").append("targetMajorVersion"));
public void clearTargetMajor() { curator.delete(root.append("upgrader").append("targetMajorVersion")); }
class CuratorDb { private static final Logger log = Logger.getLogger(CuratorDb.class.getName()); private static final Duration deployLockTimeout = Duration.ofMinutes(30); private static final Duration defaultLockTimeout = Duration.ofMinutes(5); private static final Duration defaultTryLockTimeout = Duration.ofSeconds(1); private static final Path root = Path.fromString("/controller/v1"); private static final Path lockRoot = root.append("locks"); private static final Path tenantRoot = root.append("tenants"); private static final Path applicationRoot = root.append("applications"); private static final Path jobRoot = root.append("jobs"); private static final Path controllerRoot = root.append("controllers"); private static final Path routingPoliciesRoot = root.append("routingPolicies"); private static final Path zoneRoutingPoliciesRoot = root.append("zoneRoutingPolicies"); private static final Path endpointCertificateRoot = root.append("applicationCertificates"); private static final Path archiveBucketsRoot = root.append("archiveBuckets"); private static final Path changeRequestsRoot = root.append("changeRequests"); private static final Path notificationsRoot = root.append("notifications"); private static final Path supportAccessRoot = root.append("supportAccess"); private final NodeVersionSerializer nodeVersionSerializer = new NodeVersionSerializer(); private final VersionStatusSerializer versionStatusSerializer = new VersionStatusSerializer(nodeVersionSerializer); private final ControllerVersionSerializer controllerVersionSerializer = new ControllerVersionSerializer(); private final ConfidenceOverrideSerializer confidenceOverrideSerializer = new ConfidenceOverrideSerializer(); private final TenantSerializer tenantSerializer = new TenantSerializer(); private final OsVersionSerializer osVersionSerializer = new OsVersionSerializer(); private final OsVersionTargetSerializer osVersionTargetSerializer = new OsVersionTargetSerializer(osVersionSerializer); private final OsVersionStatusSerializer osVersionStatusSerializer = new OsVersionStatusSerializer(osVersionSerializer, nodeVersionSerializer); private final RoutingPolicySerializer routingPolicySerializer = new RoutingPolicySerializer(); private final ZoneRoutingPolicySerializer zoneRoutingPolicySerializer = new ZoneRoutingPolicySerializer(routingPolicySerializer); private final AuditLogSerializer auditLogSerializer = new AuditLogSerializer(); private final NameServiceQueueSerializer nameServiceQueueSerializer = new NameServiceQueueSerializer(); private final ApplicationSerializer applicationSerializer = new ApplicationSerializer(); private final RunSerializer runSerializer = new RunSerializer(); private final RetriggerEntrySerializer retriggerEntrySerializer = new RetriggerEntrySerializer(); private final NotificationsSerializer notificationsSerializer = new NotificationsSerializer(); private final Curator curator; private final Duration tryLockTimeout; private final Map<Path, Pair<Integer, Application>> cachedApplications = new ConcurrentHashMap<>(); private final Map<Path, Pair<Integer, NavigableMap<RunId, Run>>> cachedHistoricRuns = new ConcurrentHashMap<>(); @Inject public CuratorDb(Curator curator, ServiceRegistry services) { this(curator, defaultTryLockTimeout, services.zoneRegistry().system()); } CuratorDb(Curator curator, Duration tryLockTimeout, SystemName system) { this.curator = curator; this.tryLockTimeout = tryLockTimeout; } /** Returns all hostnames configured to be part of this ZooKeeper cluster */ public List<String> cluster() { return Arrays.stream(curator.zooKeeperEnsembleConnectionSpec().split(",")) .filter(hostAndPort -> !hostAndPort.isEmpty()) .map(hostAndPort -> hostAndPort.split(":")[0]) .collect(Collectors.toUnmodifiableList()); } public Mutex lock(TenantName name) { return curator.lock(lockPath(name), defaultLockTimeout.multipliedBy(2)); } public Mutex lock(TenantAndApplicationId id) { return curator.lock(lockPath(id), defaultLockTimeout.multipliedBy(2)); } public Mutex lockForDeployment(ApplicationId id, ZoneId zone) { return curator.lock(lockPath(id, zone), deployLockTimeout); } public Mutex lock(ApplicationId id, JobType type) { return curator.lock(lockPath(id, type), defaultLockTimeout); } public Mutex lock(ApplicationId id, JobType type, Step step) throws TimeoutException { return tryLock(lockPath(id, type, step)); } public Mutex lockRotations() { return curator.lock(lockRoot.append("rotations"), defaultLockTimeout); } public Mutex lockConfidenceOverrides() { return curator.lock(lockRoot.append("confidenceOverrides"), defaultLockTimeout); } public Mutex lockMaintenanceJob(String jobName) { try { return tryLock(lockRoot.append("maintenanceJobLocks").append(jobName)); } catch (TimeoutException e) { throw new UncheckedTimeoutException(e); } } public Mutex lockProvisionState(String provisionStateId) { return curator.lock(lockPath(provisionStateId), Duration.ofSeconds(1)); } public Mutex lockOsVersions() { return curator.lock(lockRoot.append("osTargetVersion"), defaultLockTimeout); } public Mutex lockOsVersionStatus() { return curator.lock(lockRoot.append("osVersionStatus"), defaultLockTimeout); } public Mutex lockRoutingPolicies() { return curator.lock(lockRoot.append("routingPolicies"), defaultLockTimeout); } public Mutex lockAuditLog() { return curator.lock(lockRoot.append("auditLog"), defaultLockTimeout); } public Mutex lockNameServiceQueue() { return curator.lock(lockRoot.append("nameServiceQueue"), defaultLockTimeout); } public Mutex lockMeteringRefreshTime() throws TimeoutException { return tryLock(lockRoot.append("meteringRefreshTime")); } public Mutex lockArchiveBuckets(ZoneId zoneId) { return curator.lock(lockRoot.append("archiveBuckets").append(zoneId.value()), defaultLockTimeout); } public Mutex lockChangeRequests() { return curator.lock(lockRoot.append("changeRequests"), defaultLockTimeout); } public Mutex lockNotifications(TenantName tenantName) { return curator.lock(lockRoot.append("notifications").append(tenantName.value()), defaultLockTimeout); } public Mutex lockSupportAccess(DeploymentId deploymentId) { return curator.lock(lockRoot.append("supportAccess").append(deploymentId.dottedString()), defaultLockTimeout); } public Mutex lockDeploymentRetriggerQueue() { return curator.lock(lockRoot.append("deploymentRetriggerQueue"), defaultLockTimeout); } /** Try locking with a low timeout, meaning it is OK to fail lock acquisition. * * Useful for maintenance jobs, where there is no point in running the jobs back to back. */ private Mutex tryLock(Path path) throws TimeoutException { try { return curator.lock(path, tryLockTimeout); } catch (UncheckedTimeoutException e) { throw new TimeoutException(e.getMessage()); } } private <T> Optional<T> read(Path path, Function<byte[], T> mapper) { return curator.getData(path).filter(data -> data.length > 0).map(mapper); } private Optional<Slime> readSlime(Path path) { return read(path, SlimeUtils::jsonToSlime); } private static byte[] asJson(Slime slime) { try { return SlimeUtils.toJsonBytes(slime); } catch (IOException e) { throw new UncheckedIOException(e); } } public double readUpgradesPerMinute() { return read(upgradesPerMinutePath(), ByteBuffer::wrap).map(ByteBuffer::getDouble).orElse(0.125); } public void writeUpgradesPerMinute(double n) { curator.set(upgradesPerMinutePath(), ByteBuffer.allocate(Double.BYTES).putDouble(n).array()); } public void writeVersionStatus(VersionStatus status) { curator.set(versionStatusPath(), asJson(versionStatusSerializer.toSlime(status))); } public VersionStatus readVersionStatus() { return readSlime(versionStatusPath()).map(versionStatusSerializer::fromSlime).orElseGet(VersionStatus::empty); } public void writeConfidenceOverrides(Map<Version, VespaVersion.Confidence> overrides) { curator.set(confidenceOverridesPath(), asJson(confidenceOverrideSerializer.toSlime(overrides))); } public Map<Version, VespaVersion.Confidence> readConfidenceOverrides() { return readSlime(confidenceOverridesPath()).map(confidenceOverrideSerializer::fromSlime) .orElseGet(Collections::emptyMap); } public void writeControllerVersion(HostName hostname, ControllerVersion version) { curator.set(controllerPath(hostname.value()), asJson(controllerVersionSerializer.toSlime(version))); } public ControllerVersion readControllerVersion(HostName hostname) { return readSlime(controllerPath(hostname.value())) .map(controllerVersionSerializer::fromSlime) .orElse(ControllerVersion.CURRENT); } public void writeOsVersionTargets(Set<OsVersionTarget> versions) { curator.set(osVersionTargetsPath(), asJson(osVersionTargetSerializer.toSlime(versions))); } public Set<OsVersionTarget> readOsVersionTargets() { return readSlime(osVersionTargetsPath()).map(osVersionTargetSerializer::fromSlime).orElseGet(Collections::emptySet); } public void writeOsVersionStatus(OsVersionStatus status) { curator.set(osVersionStatusPath(), asJson(osVersionStatusSerializer.toSlime(status))); } public OsVersionStatus readOsVersionStatus() { return readSlime(osVersionStatusPath()).map(osVersionStatusSerializer::fromSlime).orElse(OsVersionStatus.empty); } public void writeTenant(Tenant tenant) { curator.set(tenantPath(tenant.name()), asJson(tenantSerializer.toSlime(tenant))); } public Optional<Tenant> readTenant(TenantName name) { return readSlime(tenantPath(name)).map(tenantSerializer::tenantFrom); } public List<Tenant> readTenants() { return readTenantNames().stream() .map(this::readTenant) .flatMap(Optional::stream) .collect(collectingAndThen(Collectors.toList(), Collections::unmodifiableList)); } public List<TenantName> readTenantNames() { return curator.getChildren(tenantRoot).stream() .map(TenantName::from) .collect(Collectors.toList()); } public void removeTenant(TenantName name) { curator.delete(tenantPath(name)); } public void writeApplication(Application application) { curator.set(applicationPath(application.id()), asJson(applicationSerializer.toSlime(application))); } public Optional<Application> readApplication(TenantAndApplicationId application) { Path path = applicationPath(application); return curator.getStat(path) .map(stat -> cachedApplications.compute(path, (__, old) -> old != null && old.getFirst() == stat.getVersion() ? old : new Pair<>(stat.getVersion(), read(path, bytes -> applicationSerializer.fromSlime(bytes)).get())).getSecond()); } public List<Application> readApplications(boolean canFail) { return readApplications(ignored -> true, canFail); } public List<Application> readApplications(TenantName name) { return readApplications(application -> application.tenant().equals(name), false); } private List<Application> readApplications(Predicate<TenantAndApplicationId> applicationFilter, boolean canFail) { var applicationIds = readApplicationIds(); var applications = new ArrayList<Application>(applicationIds.size()); for (var id : applicationIds) { if (!applicationFilter.test(id)) continue; try { readApplication(id).ifPresent(applications::add); } catch (Exception e) { if (canFail) { log.log(Level.SEVERE, "Failed to read application '" + id + "', this must be fixed through " + "manual intervention", e); } else { throw e; } } } return Collections.unmodifiableList(applications); } public List<TenantAndApplicationId> readApplicationIds() { return curator.getChildren(applicationRoot).stream() .map(TenantAndApplicationId::fromSerialized) .sorted() .collect(toUnmodifiableList()); } public void removeApplication(TenantAndApplicationId id) { curator.delete(applicationPath(id)); } public void writeLastRun(Run run) { curator.set(lastRunPath(run.id().application(), run.id().type()), asJson(runSerializer.toSlime(run))); } public void writeHistoricRuns(ApplicationId id, JobType type, Iterable<Run> runs) { Path path = runsPath(id, type); curator.set(path, asJson(runSerializer.toSlime(runs))); } public Optional<Run> readLastRun(ApplicationId id, JobType type) { return readSlime(lastRunPath(id, type)).map(runSerializer::runFromSlime); } public NavigableMap<RunId, Run> readHistoricRuns(ApplicationId id, JobType type) { Path path = runsPath(id, type); return curator.getStat(path) .map(stat -> cachedHistoricRuns.compute(path, (__, old) -> old != null && old.getFirst() == stat.getVersion() ? old : new Pair<>(stat.getVersion(), runSerializer.runsFromSlime(readSlime(path).get()))).getSecond()) .orElseGet(Collections::emptyNavigableMap); } public void deleteRunData(ApplicationId id, JobType type) { curator.delete(runsPath(id, type)); curator.delete(lastRunPath(id, type)); } public void deleteRunData(ApplicationId id) { curator.delete(jobRoot.append(id.serializedForm())); } public List<ApplicationId> applicationsWithJobs() { return curator.getChildren(jobRoot).stream() .map(ApplicationId::fromSerializedForm) .collect(Collectors.toList()); } public Optional<byte[]> readLog(ApplicationId id, JobType type, long chunkId) { return curator.getData(logPath(id, type, chunkId)); } public void writeLog(ApplicationId id, JobType type, long chunkId, byte[] log) { curator.set(logPath(id, type, chunkId), log); } public void deleteLog(ApplicationId id, JobType type) { curator.delete(runsPath(id, type).append("logs")); } public Optional<Long> readLastLogEntryId(ApplicationId id, JobType type) { return curator.getData(lastLogPath(id, type)) .map(String::new).map(Long::parseLong); } public void writeLastLogEntryId(ApplicationId id, JobType type, long lastId) { curator.set(lastLogPath(id, type), Long.toString(lastId).getBytes()); } public LongStream getLogChunkIds(ApplicationId id, JobType type) { return curator.getChildren(runsPath(id, type).append("logs")).stream() .mapToLong(Long::parseLong) .sorted(); } public AuditLog readAuditLog() { return readSlime(auditLogPath()).map(auditLogSerializer::fromSlime) .orElse(AuditLog.empty); } public void writeAuditLog(AuditLog log) { curator.set(auditLogPath(), asJson(auditLogSerializer.toSlime(log))); } public NameServiceQueue readNameServiceQueue() { return readSlime(nameServiceQueuePath()).map(nameServiceQueueSerializer::fromSlime) .orElse(NameServiceQueue.EMPTY); } public void writeNameServiceQueue(NameServiceQueue queue) { curator.set(nameServiceQueuePath(), asJson(nameServiceQueueSerializer.toSlime(queue))); } @SuppressWarnings("unused") public Optional<byte[]> readProvisionState(String provisionId) { return curator.getData(provisionStatePath(provisionId)); } @SuppressWarnings("unused") public void writeProvisionState(String provisionId, byte[] data) { curator.set(provisionStatePath(provisionId), data); } @SuppressWarnings("unused") public List<String> readProvisionStateIds() { return curator.getChildren(provisionStatePath()); } public void writeRoutingPolicies(ApplicationId application, List<RoutingPolicy> policies) { for (var policy : policies) { if (!policy.id().owner().equals(application)) { throw new IllegalArgumentException(policy.id() + " does not belong to the application being written: " + application.toShortString()); } } curator.set(routingPolicyPath(application), asJson(routingPolicySerializer.toSlime(policies))); } public Map<ApplicationId, List<RoutingPolicy>> readRoutingPolicies() { return readRoutingPolicies((instance) -> true); } public Map<ApplicationId, List<RoutingPolicy>> readRoutingPolicies(Predicate<ApplicationId> filter) { return curator.getChildren(routingPoliciesRoot).stream() .map(ApplicationId::fromSerializedForm) .filter(filter) .collect(Collectors.toUnmodifiableMap(Function.identity(), this::readRoutingPolicies)); } public List<RoutingPolicy> readRoutingPolicies(ApplicationId application) { return readSlime(routingPolicyPath(application)).map(slime -> routingPolicySerializer.fromSlime(application, slime)) .orElseGet(List::of); } public void writeZoneRoutingPolicy(ZoneRoutingPolicy policy) { curator.set(zoneRoutingPolicyPath(policy.zone()), asJson(zoneRoutingPolicySerializer.toSlime(policy))); } public ZoneRoutingPolicy readZoneRoutingPolicy(ZoneId zone) { return readSlime(zoneRoutingPolicyPath(zone)).map(data -> zoneRoutingPolicySerializer.fromSlime(zone, data)) .orElseGet(() -> new ZoneRoutingPolicy(zone, RoutingStatus.DEFAULT)); } public void writeEndpointCertificateMetadata(ApplicationId applicationId, EndpointCertificateMetadata endpointCertificateMetadata) { curator.set(endpointCertificatePath(applicationId), asJson(EndpointCertificateMetadataSerializer.toSlime(endpointCertificateMetadata))); } public void deleteEndpointCertificateMetadata(ApplicationId applicationId) { curator.delete(endpointCertificatePath(applicationId)); } public Optional<EndpointCertificateMetadata> readEndpointCertificateMetadata(ApplicationId applicationId) { return curator.getData(endpointCertificatePath(applicationId)).map(String::new).map(EndpointCertificateMetadataSerializer::fromJsonString); } public Map<ApplicationId, EndpointCertificateMetadata> readAllEndpointCertificateMetadata() { Map<ApplicationId, EndpointCertificateMetadata> allEndpointCertificateMetadata = new HashMap<>(); for (String appIdString : curator.getChildren(endpointCertificateRoot)) { ApplicationId applicationId = ApplicationId.fromSerializedForm(appIdString); Optional<EndpointCertificateMetadata> endpointCertificateMetadata = readEndpointCertificateMetadata(applicationId); allEndpointCertificateMetadata.put(applicationId, endpointCertificateMetadata.orElseThrow()); } return allEndpointCertificateMetadata; } public void writeMeteringRefreshTime(long timestamp) { curator.set(meteringRefreshPath(), Long.toString(timestamp).getBytes()); } public long readMeteringRefreshTime() { return curator.getData(meteringRefreshPath()) .map(String::new).map(Long::parseLong) .orElse(0L); } public Set<ArchiveBucket> readArchiveBuckets(ZoneId zoneId) { return curator.getData(archiveBucketsPath(zoneId)).map(String::new).map(ArchiveBucketsSerializer::fromJsonString) .orElseGet(Set::of); } public void writeArchiveBuckets(ZoneId zoneid, Set<ArchiveBucket> archiveBuckets) { curator.set(archiveBucketsPath(zoneid), asJson(ArchiveBucketsSerializer.toSlime(archiveBuckets))); } public Optional<VespaChangeRequest> readChangeRequest(String changeRequestId) { return readSlime(changeRequestPath(changeRequestId)).map(ChangeRequestSerializer::fromSlime); } public List<VespaChangeRequest> readChangeRequests() { return curator.getChildren(changeRequestsRoot) .stream() .map(this::readChangeRequest) .flatMap(Optional::stream) .collect(Collectors.toList()); } public void writeChangeRequest(VespaChangeRequest changeRequest) { curator.set(changeRequestPath(changeRequest.getId()), asJson(ChangeRequestSerializer.toSlime(changeRequest))); } public void deleteChangeRequest(VespaChangeRequest changeRequest) { curator.delete(changeRequestPath(changeRequest.getId())); } public List<Notification> readNotifications(TenantName tenantName) { return readSlime(notificationsPath(tenantName)) .map(slime -> notificationsSerializer.fromSlime(tenantName, slime)).orElseGet(List::of); } public List<TenantName> listTenantsWithNotifications() { return curator.getChildren(notificationsRoot).stream() .map(TenantName::from) .collect(Collectors.toUnmodifiableList()); } public void writeNotifications(TenantName tenantName, List<Notification> notifications) { curator.set(notificationsPath(tenantName), asJson(notificationsSerializer.toSlime(notifications))); } public void deleteNotifications(TenantName tenantName) { curator.delete(notificationsPath(tenantName)); } public SupportAccess readSupportAccess(DeploymentId deploymentId) { return readSlime(supportAccessPath(deploymentId)).map(SupportAccessSerializer::fromSlime).orElse(SupportAccess.DISALLOWED_NO_HISTORY); } /** Take lock before reading before writing */ public void writeSupportAccess(DeploymentId deploymentId, SupportAccess supportAccess) { curator.set(supportAccessPath(deploymentId), asJson(SupportAccessSerializer.toSlime(supportAccess))); } public List<RetriggerEntry> readRetriggerEntries() { return readSlime(deploymentRetriggerPath()).map(retriggerEntrySerializer::fromSlime).orElseGet(List::of); } public void writeRetriggerEntries(List<RetriggerEntry> retriggerEntries) { curator.set(deploymentRetriggerPath(), asJson(retriggerEntrySerializer.toSlime(retriggerEntries))); } private Path lockPath(TenantName tenant) { return lockRoot .append(tenant.value()); } private Path lockPath(TenantAndApplicationId application) { return lockRoot.append(application.tenant().value() + ":" + application.application().value()); } private Path lockPath(ApplicationId instance, ZoneId zone) { return lockRoot.append(instance.serializedForm() + ":" + zone.environment().value() + ":" + zone.region().value()); } private Path lockPath(ApplicationId instance, JobType type) { return lockRoot.append(instance.serializedForm() + ":" + type.jobName()); } private Path lockPath(ApplicationId instance, JobType type, Step step) { return lockRoot.append(instance.serializedForm() + ":" + type.jobName() + ":" + step.name()); } private Path lockPath(String provisionId) { return lockRoot .append(provisionStatePath()) .append(provisionId); } private static Path upgradesPerMinutePath() { return root.append("upgrader").append("upgradesPerMinute"); } private static Path confidenceOverridesPath() { return root.append("upgrader").append("confidenceOverrides"); } private static Path osVersionTargetsPath() { return root.append("osUpgrader").append("targetVersion"); } private static Path osVersionStatusPath() { return root.append("osVersionStatus"); } private static Path versionStatusPath() { return root.append("versionStatus"); } private static Path routingPolicyPath(ApplicationId application) { return routingPoliciesRoot.append(application.serializedForm()); } private static Path zoneRoutingPolicyPath(ZoneId zone) { return zoneRoutingPoliciesRoot.append(zone.value()); } private static Path nameServiceQueuePath() { return root.append("nameServiceQueue"); } private static Path auditLogPath() { return root.append("auditLog"); } private static Path provisionStatePath() { return root.append("provisioning").append("states"); } private static Path provisionStatePath(String provisionId) { return provisionStatePath().append(provisionId); } private static Path tenantPath(TenantName name) { return tenantRoot.append(name.value()); } private static Path applicationPath(TenantAndApplicationId id) { return applicationRoot.append(id.serialized()); } private static Path runsPath(ApplicationId id, JobType type) { return jobRoot.append(id.serializedForm()).append(type.jobName()); } private static Path lastRunPath(ApplicationId id, JobType type) { return runsPath(id, type).append("last"); } private static Path logPath(ApplicationId id, JobType type, long first) { return runsPath(id, type).append("logs").append(Long.toString(first)); } private static Path lastLogPath(ApplicationId id, JobType type) { return runsPath(id, type).append("logs"); } private static Path controllerPath(String hostname) { return controllerRoot.append(hostname); } private static Path endpointCertificatePath(ApplicationId id) { return endpointCertificateRoot.append(id.serializedForm()); } private static Path meteringRefreshPath() { return root.append("meteringRefreshTime"); } private static Path archiveBucketsPath(ZoneId zoneId) { return archiveBucketsRoot.append(zoneId.value()); } private static Path changeRequestPath(String id) { return changeRequestsRoot.append(id); } private static Path notificationsPath(TenantName tenantName) { return notificationsRoot.append(tenantName.value()); } private static Path supportAccessPath(DeploymentId deploymentId) { return supportAccessRoot.append(deploymentId.dottedString()); } private static Path deploymentRetriggerPath() { return root.append("deploymentRetriggerQueue"); } }
class CuratorDb { private static final Logger log = Logger.getLogger(CuratorDb.class.getName()); private static final Duration deployLockTimeout = Duration.ofMinutes(30); private static final Duration defaultLockTimeout = Duration.ofMinutes(5); private static final Duration defaultTryLockTimeout = Duration.ofSeconds(1); private static final Path root = Path.fromString("/controller/v1"); private static final Path lockRoot = root.append("locks"); private static final Path tenantRoot = root.append("tenants"); private static final Path applicationRoot = root.append("applications"); private static final Path jobRoot = root.append("jobs"); private static final Path controllerRoot = root.append("controllers"); private static final Path routingPoliciesRoot = root.append("routingPolicies"); private static final Path zoneRoutingPoliciesRoot = root.append("zoneRoutingPolicies"); private static final Path endpointCertificateRoot = root.append("applicationCertificates"); private static final Path archiveBucketsRoot = root.append("archiveBuckets"); private static final Path changeRequestsRoot = root.append("changeRequests"); private static final Path notificationsRoot = root.append("notifications"); private static final Path supportAccessRoot = root.append("supportAccess"); private final NodeVersionSerializer nodeVersionSerializer = new NodeVersionSerializer(); private final VersionStatusSerializer versionStatusSerializer = new VersionStatusSerializer(nodeVersionSerializer); private final ControllerVersionSerializer controllerVersionSerializer = new ControllerVersionSerializer(); private final ConfidenceOverrideSerializer confidenceOverrideSerializer = new ConfidenceOverrideSerializer(); private final TenantSerializer tenantSerializer = new TenantSerializer(); private final OsVersionSerializer osVersionSerializer = new OsVersionSerializer(); private final OsVersionTargetSerializer osVersionTargetSerializer = new OsVersionTargetSerializer(osVersionSerializer); private final OsVersionStatusSerializer osVersionStatusSerializer = new OsVersionStatusSerializer(osVersionSerializer, nodeVersionSerializer); private final RoutingPolicySerializer routingPolicySerializer = new RoutingPolicySerializer(); private final ZoneRoutingPolicySerializer zoneRoutingPolicySerializer = new ZoneRoutingPolicySerializer(routingPolicySerializer); private final AuditLogSerializer auditLogSerializer = new AuditLogSerializer(); private final NameServiceQueueSerializer nameServiceQueueSerializer = new NameServiceQueueSerializer(); private final ApplicationSerializer applicationSerializer = new ApplicationSerializer(); private final RunSerializer runSerializer = new RunSerializer(); private final RetriggerEntrySerializer retriggerEntrySerializer = new RetriggerEntrySerializer(); private final NotificationsSerializer notificationsSerializer = new NotificationsSerializer(); private final Curator curator; private final Duration tryLockTimeout; private final Map<Path, Pair<Integer, Application>> cachedApplications = new ConcurrentHashMap<>(); private final Map<Path, Pair<Integer, NavigableMap<RunId, Run>>> cachedHistoricRuns = new ConcurrentHashMap<>(); @Inject public CuratorDb(Curator curator, ServiceRegistry services) { this(curator, defaultTryLockTimeout, services.zoneRegistry().system()); } CuratorDb(Curator curator, Duration tryLockTimeout, SystemName system) { this.curator = curator; this.tryLockTimeout = tryLockTimeout; } /** Returns all hostnames configured to be part of this ZooKeeper cluster */ public List<String> cluster() { return Arrays.stream(curator.zooKeeperEnsembleConnectionSpec().split(",")) .filter(hostAndPort -> !hostAndPort.isEmpty()) .map(hostAndPort -> hostAndPort.split(":")[0]) .collect(Collectors.toUnmodifiableList()); } public Mutex lock(TenantName name) { return curator.lock(lockPath(name), defaultLockTimeout.multipliedBy(2)); } public Mutex lock(TenantAndApplicationId id) { return curator.lock(lockPath(id), defaultLockTimeout.multipliedBy(2)); } public Mutex lockForDeployment(ApplicationId id, ZoneId zone) { return curator.lock(lockPath(id, zone), deployLockTimeout); } public Mutex lock(ApplicationId id, JobType type) { return curator.lock(lockPath(id, type), defaultLockTimeout); } public Mutex lock(ApplicationId id, JobType type, Step step) throws TimeoutException { return tryLock(lockPath(id, type, step)); } public Mutex lockRotations() { return curator.lock(lockRoot.append("rotations"), defaultLockTimeout); } public Mutex lockConfidenceOverrides() { return curator.lock(lockRoot.append("confidenceOverrides"), defaultLockTimeout); } public Mutex lockMaintenanceJob(String jobName) { try { return tryLock(lockRoot.append("maintenanceJobLocks").append(jobName)); } catch (TimeoutException e) { throw new UncheckedTimeoutException(e); } } public Mutex lockProvisionState(String provisionStateId) { return curator.lock(lockPath(provisionStateId), Duration.ofSeconds(1)); } public Mutex lockOsVersions() { return curator.lock(lockRoot.append("osTargetVersion"), defaultLockTimeout); } public Mutex lockOsVersionStatus() { return curator.lock(lockRoot.append("osVersionStatus"), defaultLockTimeout); } public Mutex lockRoutingPolicies() { return curator.lock(lockRoot.append("routingPolicies"), defaultLockTimeout); } public Mutex lockAuditLog() { return curator.lock(lockRoot.append("auditLog"), defaultLockTimeout); } public Mutex lockNameServiceQueue() { return curator.lock(lockRoot.append("nameServiceQueue"), defaultLockTimeout); } public Mutex lockMeteringRefreshTime() throws TimeoutException { return tryLock(lockRoot.append("meteringRefreshTime")); } public Mutex lockArchiveBuckets(ZoneId zoneId) { return curator.lock(lockRoot.append("archiveBuckets").append(zoneId.value()), defaultLockTimeout); } public Mutex lockChangeRequests() { return curator.lock(lockRoot.append("changeRequests"), defaultLockTimeout); } public Mutex lockNotifications(TenantName tenantName) { return curator.lock(lockRoot.append("notifications").append(tenantName.value()), defaultLockTimeout); } public Mutex lockSupportAccess(DeploymentId deploymentId) { return curator.lock(lockRoot.append("supportAccess").append(deploymentId.dottedString()), defaultLockTimeout); } public Mutex lockDeploymentRetriggerQueue() { return curator.lock(lockRoot.append("deploymentRetriggerQueue"), defaultLockTimeout); } /** Try locking with a low timeout, meaning it is OK to fail lock acquisition. * * Useful for maintenance jobs, where there is no point in running the jobs back to back. */ private Mutex tryLock(Path path) throws TimeoutException { try { return curator.lock(path, tryLockTimeout); } catch (UncheckedTimeoutException e) { throw new TimeoutException(e.getMessage()); } } private <T> Optional<T> read(Path path, Function<byte[], T> mapper) { return curator.getData(path).filter(data -> data.length > 0).map(mapper); } private Optional<Slime> readSlime(Path path) { return read(path, SlimeUtils::jsonToSlime); } private static byte[] asJson(Slime slime) { try { return SlimeUtils.toJsonBytes(slime); } catch (IOException e) { throw new UncheckedIOException(e); } } public double readUpgradesPerMinute() { return read(upgradesPerMinutePath(), ByteBuffer::wrap).map(ByteBuffer::getDouble).orElse(0.125); } public void writeUpgradesPerMinute(double n) { curator.set(upgradesPerMinutePath(), ByteBuffer.allocate(Double.BYTES).putDouble(n).array()); } public void writeVersionStatus(VersionStatus status) { curator.set(versionStatusPath(), asJson(versionStatusSerializer.toSlime(status))); } public VersionStatus readVersionStatus() { return readSlime(versionStatusPath()).map(versionStatusSerializer::fromSlime).orElseGet(VersionStatus::empty); } public void writeConfidenceOverrides(Map<Version, VespaVersion.Confidence> overrides) { curator.set(confidenceOverridesPath(), asJson(confidenceOverrideSerializer.toSlime(overrides))); } public Map<Version, VespaVersion.Confidence> readConfidenceOverrides() { return readSlime(confidenceOverridesPath()).map(confidenceOverrideSerializer::fromSlime) .orElseGet(Collections::emptyMap); } public void writeControllerVersion(HostName hostname, ControllerVersion version) { curator.set(controllerPath(hostname.value()), asJson(controllerVersionSerializer.toSlime(version))); } public ControllerVersion readControllerVersion(HostName hostname) { return readSlime(controllerPath(hostname.value())) .map(controllerVersionSerializer::fromSlime) .orElse(ControllerVersion.CURRENT); } public void writeOsVersionTargets(Set<OsVersionTarget> versions) { curator.set(osVersionTargetsPath(), asJson(osVersionTargetSerializer.toSlime(versions))); } public Set<OsVersionTarget> readOsVersionTargets() { return readSlime(osVersionTargetsPath()).map(osVersionTargetSerializer::fromSlime).orElseGet(Collections::emptySet); } public void writeOsVersionStatus(OsVersionStatus status) { curator.set(osVersionStatusPath(), asJson(osVersionStatusSerializer.toSlime(status))); } public OsVersionStatus readOsVersionStatus() { return readSlime(osVersionStatusPath()).map(osVersionStatusSerializer::fromSlime).orElse(OsVersionStatus.empty); } public void writeTenant(Tenant tenant) { curator.set(tenantPath(tenant.name()), asJson(tenantSerializer.toSlime(tenant))); } public Optional<Tenant> readTenant(TenantName name) { return readSlime(tenantPath(name)).map(tenantSerializer::tenantFrom); } public List<Tenant> readTenants() { return readTenantNames().stream() .map(this::readTenant) .flatMap(Optional::stream) .collect(collectingAndThen(Collectors.toList(), Collections::unmodifiableList)); } public List<TenantName> readTenantNames() { return curator.getChildren(tenantRoot).stream() .map(TenantName::from) .collect(Collectors.toList()); } public void removeTenant(TenantName name) { curator.delete(tenantPath(name)); } public void writeApplication(Application application) { curator.set(applicationPath(application.id()), asJson(applicationSerializer.toSlime(application))); } public Optional<Application> readApplication(TenantAndApplicationId application) { Path path = applicationPath(application); return curator.getStat(path) .map(stat -> cachedApplications.compute(path, (__, old) -> old != null && old.getFirst() == stat.getVersion() ? old : new Pair<>(stat.getVersion(), read(path, bytes -> applicationSerializer.fromSlime(bytes)).get())).getSecond()); } public List<Application> readApplications(boolean canFail) { return readApplications(ignored -> true, canFail); } public List<Application> readApplications(TenantName name) { return readApplications(application -> application.tenant().equals(name), false); } private List<Application> readApplications(Predicate<TenantAndApplicationId> applicationFilter, boolean canFail) { var applicationIds = readApplicationIds(); var applications = new ArrayList<Application>(applicationIds.size()); for (var id : applicationIds) { if (!applicationFilter.test(id)) continue; try { readApplication(id).ifPresent(applications::add); } catch (Exception e) { if (canFail) { log.log(Level.SEVERE, "Failed to read application '" + id + "', this must be fixed through " + "manual intervention", e); } else { throw e; } } } return Collections.unmodifiableList(applications); } public List<TenantAndApplicationId> readApplicationIds() { return curator.getChildren(applicationRoot).stream() .map(TenantAndApplicationId::fromSerialized) .sorted() .collect(toUnmodifiableList()); } public void removeApplication(TenantAndApplicationId id) { curator.delete(applicationPath(id)); } public void writeLastRun(Run run) { curator.set(lastRunPath(run.id().application(), run.id().type()), asJson(runSerializer.toSlime(run))); } public void writeHistoricRuns(ApplicationId id, JobType type, Iterable<Run> runs) { Path path = runsPath(id, type); curator.set(path, asJson(runSerializer.toSlime(runs))); } public Optional<Run> readLastRun(ApplicationId id, JobType type) { return readSlime(lastRunPath(id, type)).map(runSerializer::runFromSlime); } public NavigableMap<RunId, Run> readHistoricRuns(ApplicationId id, JobType type) { Path path = runsPath(id, type); return curator.getStat(path) .map(stat -> cachedHistoricRuns.compute(path, (__, old) -> old != null && old.getFirst() == stat.getVersion() ? old : new Pair<>(stat.getVersion(), runSerializer.runsFromSlime(readSlime(path).get()))).getSecond()) .orElseGet(Collections::emptyNavigableMap); } public void deleteRunData(ApplicationId id, JobType type) { curator.delete(runsPath(id, type)); curator.delete(lastRunPath(id, type)); } public void deleteRunData(ApplicationId id) { curator.delete(jobRoot.append(id.serializedForm())); } public List<ApplicationId> applicationsWithJobs() { return curator.getChildren(jobRoot).stream() .map(ApplicationId::fromSerializedForm) .collect(Collectors.toList()); } public Optional<byte[]> readLog(ApplicationId id, JobType type, long chunkId) { return curator.getData(logPath(id, type, chunkId)); } public void writeLog(ApplicationId id, JobType type, long chunkId, byte[] log) { curator.set(logPath(id, type, chunkId), log); } public void deleteLog(ApplicationId id, JobType type) { curator.delete(runsPath(id, type).append("logs")); } public Optional<Long> readLastLogEntryId(ApplicationId id, JobType type) { return curator.getData(lastLogPath(id, type)) .map(String::new).map(Long::parseLong); } public void writeLastLogEntryId(ApplicationId id, JobType type, long lastId) { curator.set(lastLogPath(id, type), Long.toString(lastId).getBytes()); } public LongStream getLogChunkIds(ApplicationId id, JobType type) { return curator.getChildren(runsPath(id, type).append("logs")).stream() .mapToLong(Long::parseLong) .sorted(); } public AuditLog readAuditLog() { return readSlime(auditLogPath()).map(auditLogSerializer::fromSlime) .orElse(AuditLog.empty); } public void writeAuditLog(AuditLog log) { curator.set(auditLogPath(), asJson(auditLogSerializer.toSlime(log))); } public NameServiceQueue readNameServiceQueue() { return readSlime(nameServiceQueuePath()).map(nameServiceQueueSerializer::fromSlime) .orElse(NameServiceQueue.EMPTY); } public void writeNameServiceQueue(NameServiceQueue queue) { curator.set(nameServiceQueuePath(), asJson(nameServiceQueueSerializer.toSlime(queue))); } @SuppressWarnings("unused") public Optional<byte[]> readProvisionState(String provisionId) { return curator.getData(provisionStatePath(provisionId)); } @SuppressWarnings("unused") public void writeProvisionState(String provisionId, byte[] data) { curator.set(provisionStatePath(provisionId), data); } @SuppressWarnings("unused") public List<String> readProvisionStateIds() { return curator.getChildren(provisionStatePath()); } public void writeRoutingPolicies(ApplicationId application, List<RoutingPolicy> policies) { for (var policy : policies) { if (!policy.id().owner().equals(application)) { throw new IllegalArgumentException(policy.id() + " does not belong to the application being written: " + application.toShortString()); } } curator.set(routingPolicyPath(application), asJson(routingPolicySerializer.toSlime(policies))); } public Map<ApplicationId, List<RoutingPolicy>> readRoutingPolicies() { return readRoutingPolicies((instance) -> true); } public Map<ApplicationId, List<RoutingPolicy>> readRoutingPolicies(Predicate<ApplicationId> filter) { return curator.getChildren(routingPoliciesRoot).stream() .map(ApplicationId::fromSerializedForm) .filter(filter) .collect(Collectors.toUnmodifiableMap(Function.identity(), this::readRoutingPolicies)); } public List<RoutingPolicy> readRoutingPolicies(ApplicationId application) { return readSlime(routingPolicyPath(application)).map(slime -> routingPolicySerializer.fromSlime(application, slime)) .orElseGet(List::of); } public void writeZoneRoutingPolicy(ZoneRoutingPolicy policy) { curator.set(zoneRoutingPolicyPath(policy.zone()), asJson(zoneRoutingPolicySerializer.toSlime(policy))); } public ZoneRoutingPolicy readZoneRoutingPolicy(ZoneId zone) { return readSlime(zoneRoutingPolicyPath(zone)).map(data -> zoneRoutingPolicySerializer.fromSlime(zone, data)) .orElseGet(() -> new ZoneRoutingPolicy(zone, RoutingStatus.DEFAULT)); } public void writeEndpointCertificateMetadata(ApplicationId applicationId, EndpointCertificateMetadata endpointCertificateMetadata) { curator.set(endpointCertificatePath(applicationId), asJson(EndpointCertificateMetadataSerializer.toSlime(endpointCertificateMetadata))); } public void deleteEndpointCertificateMetadata(ApplicationId applicationId) { curator.delete(endpointCertificatePath(applicationId)); } public Optional<EndpointCertificateMetadata> readEndpointCertificateMetadata(ApplicationId applicationId) { return curator.getData(endpointCertificatePath(applicationId)).map(String::new).map(EndpointCertificateMetadataSerializer::fromJsonString); } public Map<ApplicationId, EndpointCertificateMetadata> readAllEndpointCertificateMetadata() { Map<ApplicationId, EndpointCertificateMetadata> allEndpointCertificateMetadata = new HashMap<>(); for (String appIdString : curator.getChildren(endpointCertificateRoot)) { ApplicationId applicationId = ApplicationId.fromSerializedForm(appIdString); Optional<EndpointCertificateMetadata> endpointCertificateMetadata = readEndpointCertificateMetadata(applicationId); allEndpointCertificateMetadata.put(applicationId, endpointCertificateMetadata.orElseThrow()); } return allEndpointCertificateMetadata; } public void writeMeteringRefreshTime(long timestamp) { curator.set(meteringRefreshPath(), Long.toString(timestamp).getBytes()); } public long readMeteringRefreshTime() { return curator.getData(meteringRefreshPath()) .map(String::new).map(Long::parseLong) .orElse(0L); } public Set<ArchiveBucket> readArchiveBuckets(ZoneId zoneId) { return curator.getData(archiveBucketsPath(zoneId)).map(String::new).map(ArchiveBucketsSerializer::fromJsonString) .orElseGet(Set::of); } public void writeArchiveBuckets(ZoneId zoneid, Set<ArchiveBucket> archiveBuckets) { curator.set(archiveBucketsPath(zoneid), asJson(ArchiveBucketsSerializer.toSlime(archiveBuckets))); } public Optional<VespaChangeRequest> readChangeRequest(String changeRequestId) { return readSlime(changeRequestPath(changeRequestId)).map(ChangeRequestSerializer::fromSlime); } public List<VespaChangeRequest> readChangeRequests() { return curator.getChildren(changeRequestsRoot) .stream() .map(this::readChangeRequest) .flatMap(Optional::stream) .collect(Collectors.toList()); } public void writeChangeRequest(VespaChangeRequest changeRequest) { curator.set(changeRequestPath(changeRequest.getId()), asJson(ChangeRequestSerializer.toSlime(changeRequest))); } public void deleteChangeRequest(VespaChangeRequest changeRequest) { curator.delete(changeRequestPath(changeRequest.getId())); } public List<Notification> readNotifications(TenantName tenantName) { return readSlime(notificationsPath(tenantName)) .map(slime -> notificationsSerializer.fromSlime(tenantName, slime)).orElseGet(List::of); } public List<TenantName> listTenantsWithNotifications() { return curator.getChildren(notificationsRoot).stream() .map(TenantName::from) .collect(Collectors.toUnmodifiableList()); } public void writeNotifications(TenantName tenantName, List<Notification> notifications) { curator.set(notificationsPath(tenantName), asJson(notificationsSerializer.toSlime(notifications))); } public void deleteNotifications(TenantName tenantName) { curator.delete(notificationsPath(tenantName)); } public SupportAccess readSupportAccess(DeploymentId deploymentId) { return readSlime(supportAccessPath(deploymentId)).map(SupportAccessSerializer::fromSlime).orElse(SupportAccess.DISALLOWED_NO_HISTORY); } /** Take lock before reading before writing */ public void writeSupportAccess(DeploymentId deploymentId, SupportAccess supportAccess) { curator.set(supportAccessPath(deploymentId), asJson(SupportAccessSerializer.toSlime(supportAccess))); } public List<RetriggerEntry> readRetriggerEntries() { return readSlime(deploymentRetriggerPath()).map(retriggerEntrySerializer::fromSlime).orElseGet(List::of); } public void writeRetriggerEntries(List<RetriggerEntry> retriggerEntries) { curator.set(deploymentRetriggerPath(), asJson(retriggerEntrySerializer.toSlime(retriggerEntries))); } private Path lockPath(TenantName tenant) { return lockRoot .append(tenant.value()); } private Path lockPath(TenantAndApplicationId application) { return lockRoot.append(application.tenant().value() + ":" + application.application().value()); } private Path lockPath(ApplicationId instance, ZoneId zone) { return lockRoot.append(instance.serializedForm() + ":" + zone.environment().value() + ":" + zone.region().value()); } private Path lockPath(ApplicationId instance, JobType type) { return lockRoot.append(instance.serializedForm() + ":" + type.jobName()); } private Path lockPath(ApplicationId instance, JobType type, Step step) { return lockRoot.append(instance.serializedForm() + ":" + type.jobName() + ":" + step.name()); } private Path lockPath(String provisionId) { return lockRoot .append(provisionStatePath()) .append(provisionId); } private static Path upgradesPerMinutePath() { return root.append("upgrader").append("upgradesPerMinute"); } private static Path confidenceOverridesPath() { return root.append("upgrader").append("confidenceOverrides"); } private static Path osVersionTargetsPath() { return root.append("osUpgrader").append("targetVersion"); } private static Path osVersionStatusPath() { return root.append("osVersionStatus"); } private static Path versionStatusPath() { return root.append("versionStatus"); } private static Path routingPolicyPath(ApplicationId application) { return routingPoliciesRoot.append(application.serializedForm()); } private static Path zoneRoutingPolicyPath(ZoneId zone) { return zoneRoutingPoliciesRoot.append(zone.value()); } private static Path nameServiceQueuePath() { return root.append("nameServiceQueue"); } private static Path auditLogPath() { return root.append("auditLog"); } private static Path provisionStatePath() { return root.append("provisioning").append("states"); } private static Path provisionStatePath(String provisionId) { return provisionStatePath().append(provisionId); } private static Path tenantPath(TenantName name) { return tenantRoot.append(name.value()); } private static Path applicationPath(TenantAndApplicationId id) { return applicationRoot.append(id.serialized()); } private static Path runsPath(ApplicationId id, JobType type) { return jobRoot.append(id.serializedForm()).append(type.jobName()); } private static Path lastRunPath(ApplicationId id, JobType type) { return runsPath(id, type).append("last"); } private static Path logPath(ApplicationId id, JobType type, long first) { return runsPath(id, type).append("logs").append(Long.toString(first)); } private static Path lastLogPath(ApplicationId id, JobType type) { return runsPath(id, type).append("logs"); } private static Path controllerPath(String hostname) { return controllerRoot.append(hostname); } private static Path endpointCertificatePath(ApplicationId id) { return endpointCertificateRoot.append(id.serializedForm()); } private static Path meteringRefreshPath() { return root.append("meteringRefreshTime"); } private static Path archiveBucketsPath(ZoneId zoneId) { return archiveBucketsRoot.append(zoneId.value()); } private static Path changeRequestPath(String id) { return changeRequestsRoot.append(id); } private static Path notificationsPath(TenantName tenantName) { return notificationsRoot.append(tenantName.value()); } private static Path supportAccessPath(DeploymentId deploymentId) { return supportAccessRoot.append(deploymentId.dottedString()); } private static Path deploymentRetriggerPath() { return root.append("deploymentRetriggerQueue"); } }
┏( ゚益゚)ゞ
public void clearTargetMajor() { curator.delete(root.append("ugprader").append("targetMajorVersion")); }
curator.delete(root.append("ugprader").append("targetMajorVersion"));
public void clearTargetMajor() { curator.delete(root.append("upgrader").append("targetMajorVersion")); }
class CuratorDb { private static final Logger log = Logger.getLogger(CuratorDb.class.getName()); private static final Duration deployLockTimeout = Duration.ofMinutes(30); private static final Duration defaultLockTimeout = Duration.ofMinutes(5); private static final Duration defaultTryLockTimeout = Duration.ofSeconds(1); private static final Path root = Path.fromString("/controller/v1"); private static final Path lockRoot = root.append("locks"); private static final Path tenantRoot = root.append("tenants"); private static final Path applicationRoot = root.append("applications"); private static final Path jobRoot = root.append("jobs"); private static final Path controllerRoot = root.append("controllers"); private static final Path routingPoliciesRoot = root.append("routingPolicies"); private static final Path zoneRoutingPoliciesRoot = root.append("zoneRoutingPolicies"); private static final Path endpointCertificateRoot = root.append("applicationCertificates"); private static final Path archiveBucketsRoot = root.append("archiveBuckets"); private static final Path changeRequestsRoot = root.append("changeRequests"); private static final Path notificationsRoot = root.append("notifications"); private static final Path supportAccessRoot = root.append("supportAccess"); private final NodeVersionSerializer nodeVersionSerializer = new NodeVersionSerializer(); private final VersionStatusSerializer versionStatusSerializer = new VersionStatusSerializer(nodeVersionSerializer); private final ControllerVersionSerializer controllerVersionSerializer = new ControllerVersionSerializer(); private final ConfidenceOverrideSerializer confidenceOverrideSerializer = new ConfidenceOverrideSerializer(); private final TenantSerializer tenantSerializer = new TenantSerializer(); private final OsVersionSerializer osVersionSerializer = new OsVersionSerializer(); private final OsVersionTargetSerializer osVersionTargetSerializer = new OsVersionTargetSerializer(osVersionSerializer); private final OsVersionStatusSerializer osVersionStatusSerializer = new OsVersionStatusSerializer(osVersionSerializer, nodeVersionSerializer); private final RoutingPolicySerializer routingPolicySerializer = new RoutingPolicySerializer(); private final ZoneRoutingPolicySerializer zoneRoutingPolicySerializer = new ZoneRoutingPolicySerializer(routingPolicySerializer); private final AuditLogSerializer auditLogSerializer = new AuditLogSerializer(); private final NameServiceQueueSerializer nameServiceQueueSerializer = new NameServiceQueueSerializer(); private final ApplicationSerializer applicationSerializer = new ApplicationSerializer(); private final RunSerializer runSerializer = new RunSerializer(); private final RetriggerEntrySerializer retriggerEntrySerializer = new RetriggerEntrySerializer(); private final NotificationsSerializer notificationsSerializer = new NotificationsSerializer(); private final Curator curator; private final Duration tryLockTimeout; private final Map<Path, Pair<Integer, Application>> cachedApplications = new ConcurrentHashMap<>(); private final Map<Path, Pair<Integer, NavigableMap<RunId, Run>>> cachedHistoricRuns = new ConcurrentHashMap<>(); @Inject public CuratorDb(Curator curator, ServiceRegistry services) { this(curator, defaultTryLockTimeout, services.zoneRegistry().system()); } CuratorDb(Curator curator, Duration tryLockTimeout, SystemName system) { this.curator = curator; this.tryLockTimeout = tryLockTimeout; } /** Returns all hostnames configured to be part of this ZooKeeper cluster */ public List<String> cluster() { return Arrays.stream(curator.zooKeeperEnsembleConnectionSpec().split(",")) .filter(hostAndPort -> !hostAndPort.isEmpty()) .map(hostAndPort -> hostAndPort.split(":")[0]) .collect(Collectors.toUnmodifiableList()); } public Mutex lock(TenantName name) { return curator.lock(lockPath(name), defaultLockTimeout.multipliedBy(2)); } public Mutex lock(TenantAndApplicationId id) { return curator.lock(lockPath(id), defaultLockTimeout.multipliedBy(2)); } public Mutex lockForDeployment(ApplicationId id, ZoneId zone) { return curator.lock(lockPath(id, zone), deployLockTimeout); } public Mutex lock(ApplicationId id, JobType type) { return curator.lock(lockPath(id, type), defaultLockTimeout); } public Mutex lock(ApplicationId id, JobType type, Step step) throws TimeoutException { return tryLock(lockPath(id, type, step)); } public Mutex lockRotations() { return curator.lock(lockRoot.append("rotations"), defaultLockTimeout); } public Mutex lockConfidenceOverrides() { return curator.lock(lockRoot.append("confidenceOverrides"), defaultLockTimeout); } public Mutex lockMaintenanceJob(String jobName) { try { return tryLock(lockRoot.append("maintenanceJobLocks").append(jobName)); } catch (TimeoutException e) { throw new UncheckedTimeoutException(e); } } public Mutex lockProvisionState(String provisionStateId) { return curator.lock(lockPath(provisionStateId), Duration.ofSeconds(1)); } public Mutex lockOsVersions() { return curator.lock(lockRoot.append("osTargetVersion"), defaultLockTimeout); } public Mutex lockOsVersionStatus() { return curator.lock(lockRoot.append("osVersionStatus"), defaultLockTimeout); } public Mutex lockRoutingPolicies() { return curator.lock(lockRoot.append("routingPolicies"), defaultLockTimeout); } public Mutex lockAuditLog() { return curator.lock(lockRoot.append("auditLog"), defaultLockTimeout); } public Mutex lockNameServiceQueue() { return curator.lock(lockRoot.append("nameServiceQueue"), defaultLockTimeout); } public Mutex lockMeteringRefreshTime() throws TimeoutException { return tryLock(lockRoot.append("meteringRefreshTime")); } public Mutex lockArchiveBuckets(ZoneId zoneId) { return curator.lock(lockRoot.append("archiveBuckets").append(zoneId.value()), defaultLockTimeout); } public Mutex lockChangeRequests() { return curator.lock(lockRoot.append("changeRequests"), defaultLockTimeout); } public Mutex lockNotifications(TenantName tenantName) { return curator.lock(lockRoot.append("notifications").append(tenantName.value()), defaultLockTimeout); } public Mutex lockSupportAccess(DeploymentId deploymentId) { return curator.lock(lockRoot.append("supportAccess").append(deploymentId.dottedString()), defaultLockTimeout); } public Mutex lockDeploymentRetriggerQueue() { return curator.lock(lockRoot.append("deploymentRetriggerQueue"), defaultLockTimeout); } /** Try locking with a low timeout, meaning it is OK to fail lock acquisition. * * Useful for maintenance jobs, where there is no point in running the jobs back to back. */ private Mutex tryLock(Path path) throws TimeoutException { try { return curator.lock(path, tryLockTimeout); } catch (UncheckedTimeoutException e) { throw new TimeoutException(e.getMessage()); } } private <T> Optional<T> read(Path path, Function<byte[], T> mapper) { return curator.getData(path).filter(data -> data.length > 0).map(mapper); } private Optional<Slime> readSlime(Path path) { return read(path, SlimeUtils::jsonToSlime); } private static byte[] asJson(Slime slime) { try { return SlimeUtils.toJsonBytes(slime); } catch (IOException e) { throw new UncheckedIOException(e); } } public double readUpgradesPerMinute() { return read(upgradesPerMinutePath(), ByteBuffer::wrap).map(ByteBuffer::getDouble).orElse(0.125); } public void writeUpgradesPerMinute(double n) { curator.set(upgradesPerMinutePath(), ByteBuffer.allocate(Double.BYTES).putDouble(n).array()); } public void writeVersionStatus(VersionStatus status) { curator.set(versionStatusPath(), asJson(versionStatusSerializer.toSlime(status))); } public VersionStatus readVersionStatus() { return readSlime(versionStatusPath()).map(versionStatusSerializer::fromSlime).orElseGet(VersionStatus::empty); } public void writeConfidenceOverrides(Map<Version, VespaVersion.Confidence> overrides) { curator.set(confidenceOverridesPath(), asJson(confidenceOverrideSerializer.toSlime(overrides))); } public Map<Version, VespaVersion.Confidence> readConfidenceOverrides() { return readSlime(confidenceOverridesPath()).map(confidenceOverrideSerializer::fromSlime) .orElseGet(Collections::emptyMap); } public void writeControllerVersion(HostName hostname, ControllerVersion version) { curator.set(controllerPath(hostname.value()), asJson(controllerVersionSerializer.toSlime(version))); } public ControllerVersion readControllerVersion(HostName hostname) { return readSlime(controllerPath(hostname.value())) .map(controllerVersionSerializer::fromSlime) .orElse(ControllerVersion.CURRENT); } public void writeOsVersionTargets(Set<OsVersionTarget> versions) { curator.set(osVersionTargetsPath(), asJson(osVersionTargetSerializer.toSlime(versions))); } public Set<OsVersionTarget> readOsVersionTargets() { return readSlime(osVersionTargetsPath()).map(osVersionTargetSerializer::fromSlime).orElseGet(Collections::emptySet); } public void writeOsVersionStatus(OsVersionStatus status) { curator.set(osVersionStatusPath(), asJson(osVersionStatusSerializer.toSlime(status))); } public OsVersionStatus readOsVersionStatus() { return readSlime(osVersionStatusPath()).map(osVersionStatusSerializer::fromSlime).orElse(OsVersionStatus.empty); } public void writeTenant(Tenant tenant) { curator.set(tenantPath(tenant.name()), asJson(tenantSerializer.toSlime(tenant))); } public Optional<Tenant> readTenant(TenantName name) { return readSlime(tenantPath(name)).map(tenantSerializer::tenantFrom); } public List<Tenant> readTenants() { return readTenantNames().stream() .map(this::readTenant) .flatMap(Optional::stream) .collect(collectingAndThen(Collectors.toList(), Collections::unmodifiableList)); } public List<TenantName> readTenantNames() { return curator.getChildren(tenantRoot).stream() .map(TenantName::from) .collect(Collectors.toList()); } public void removeTenant(TenantName name) { curator.delete(tenantPath(name)); } public void writeApplication(Application application) { curator.set(applicationPath(application.id()), asJson(applicationSerializer.toSlime(application))); } public Optional<Application> readApplication(TenantAndApplicationId application) { Path path = applicationPath(application); return curator.getStat(path) .map(stat -> cachedApplications.compute(path, (__, old) -> old != null && old.getFirst() == stat.getVersion() ? old : new Pair<>(stat.getVersion(), read(path, bytes -> applicationSerializer.fromSlime(bytes)).get())).getSecond()); } public List<Application> readApplications(boolean canFail) { return readApplications(ignored -> true, canFail); } public List<Application> readApplications(TenantName name) { return readApplications(application -> application.tenant().equals(name), false); } private List<Application> readApplications(Predicate<TenantAndApplicationId> applicationFilter, boolean canFail) { var applicationIds = readApplicationIds(); var applications = new ArrayList<Application>(applicationIds.size()); for (var id : applicationIds) { if (!applicationFilter.test(id)) continue; try { readApplication(id).ifPresent(applications::add); } catch (Exception e) { if (canFail) { log.log(Level.SEVERE, "Failed to read application '" + id + "', this must be fixed through " + "manual intervention", e); } else { throw e; } } } return Collections.unmodifiableList(applications); } public List<TenantAndApplicationId> readApplicationIds() { return curator.getChildren(applicationRoot).stream() .map(TenantAndApplicationId::fromSerialized) .sorted() .collect(toUnmodifiableList()); } public void removeApplication(TenantAndApplicationId id) { curator.delete(applicationPath(id)); } public void writeLastRun(Run run) { curator.set(lastRunPath(run.id().application(), run.id().type()), asJson(runSerializer.toSlime(run))); } public void writeHistoricRuns(ApplicationId id, JobType type, Iterable<Run> runs) { Path path = runsPath(id, type); curator.set(path, asJson(runSerializer.toSlime(runs))); } public Optional<Run> readLastRun(ApplicationId id, JobType type) { return readSlime(lastRunPath(id, type)).map(runSerializer::runFromSlime); } public NavigableMap<RunId, Run> readHistoricRuns(ApplicationId id, JobType type) { Path path = runsPath(id, type); return curator.getStat(path) .map(stat -> cachedHistoricRuns.compute(path, (__, old) -> old != null && old.getFirst() == stat.getVersion() ? old : new Pair<>(stat.getVersion(), runSerializer.runsFromSlime(readSlime(path).get()))).getSecond()) .orElseGet(Collections::emptyNavigableMap); } public void deleteRunData(ApplicationId id, JobType type) { curator.delete(runsPath(id, type)); curator.delete(lastRunPath(id, type)); } public void deleteRunData(ApplicationId id) { curator.delete(jobRoot.append(id.serializedForm())); } public List<ApplicationId> applicationsWithJobs() { return curator.getChildren(jobRoot).stream() .map(ApplicationId::fromSerializedForm) .collect(Collectors.toList()); } public Optional<byte[]> readLog(ApplicationId id, JobType type, long chunkId) { return curator.getData(logPath(id, type, chunkId)); } public void writeLog(ApplicationId id, JobType type, long chunkId, byte[] log) { curator.set(logPath(id, type, chunkId), log); } public void deleteLog(ApplicationId id, JobType type) { curator.delete(runsPath(id, type).append("logs")); } public Optional<Long> readLastLogEntryId(ApplicationId id, JobType type) { return curator.getData(lastLogPath(id, type)) .map(String::new).map(Long::parseLong); } public void writeLastLogEntryId(ApplicationId id, JobType type, long lastId) { curator.set(lastLogPath(id, type), Long.toString(lastId).getBytes()); } public LongStream getLogChunkIds(ApplicationId id, JobType type) { return curator.getChildren(runsPath(id, type).append("logs")).stream() .mapToLong(Long::parseLong) .sorted(); } public AuditLog readAuditLog() { return readSlime(auditLogPath()).map(auditLogSerializer::fromSlime) .orElse(AuditLog.empty); } public void writeAuditLog(AuditLog log) { curator.set(auditLogPath(), asJson(auditLogSerializer.toSlime(log))); } public NameServiceQueue readNameServiceQueue() { return readSlime(nameServiceQueuePath()).map(nameServiceQueueSerializer::fromSlime) .orElse(NameServiceQueue.EMPTY); } public void writeNameServiceQueue(NameServiceQueue queue) { curator.set(nameServiceQueuePath(), asJson(nameServiceQueueSerializer.toSlime(queue))); } @SuppressWarnings("unused") public Optional<byte[]> readProvisionState(String provisionId) { return curator.getData(provisionStatePath(provisionId)); } @SuppressWarnings("unused") public void writeProvisionState(String provisionId, byte[] data) { curator.set(provisionStatePath(provisionId), data); } @SuppressWarnings("unused") public List<String> readProvisionStateIds() { return curator.getChildren(provisionStatePath()); } public void writeRoutingPolicies(ApplicationId application, List<RoutingPolicy> policies) { for (var policy : policies) { if (!policy.id().owner().equals(application)) { throw new IllegalArgumentException(policy.id() + " does not belong to the application being written: " + application.toShortString()); } } curator.set(routingPolicyPath(application), asJson(routingPolicySerializer.toSlime(policies))); } public Map<ApplicationId, List<RoutingPolicy>> readRoutingPolicies() { return readRoutingPolicies((instance) -> true); } public Map<ApplicationId, List<RoutingPolicy>> readRoutingPolicies(Predicate<ApplicationId> filter) { return curator.getChildren(routingPoliciesRoot).stream() .map(ApplicationId::fromSerializedForm) .filter(filter) .collect(Collectors.toUnmodifiableMap(Function.identity(), this::readRoutingPolicies)); } public List<RoutingPolicy> readRoutingPolicies(ApplicationId application) { return readSlime(routingPolicyPath(application)).map(slime -> routingPolicySerializer.fromSlime(application, slime)) .orElseGet(List::of); } public void writeZoneRoutingPolicy(ZoneRoutingPolicy policy) { curator.set(zoneRoutingPolicyPath(policy.zone()), asJson(zoneRoutingPolicySerializer.toSlime(policy))); } public ZoneRoutingPolicy readZoneRoutingPolicy(ZoneId zone) { return readSlime(zoneRoutingPolicyPath(zone)).map(data -> zoneRoutingPolicySerializer.fromSlime(zone, data)) .orElseGet(() -> new ZoneRoutingPolicy(zone, RoutingStatus.DEFAULT)); } public void writeEndpointCertificateMetadata(ApplicationId applicationId, EndpointCertificateMetadata endpointCertificateMetadata) { curator.set(endpointCertificatePath(applicationId), asJson(EndpointCertificateMetadataSerializer.toSlime(endpointCertificateMetadata))); } public void deleteEndpointCertificateMetadata(ApplicationId applicationId) { curator.delete(endpointCertificatePath(applicationId)); } public Optional<EndpointCertificateMetadata> readEndpointCertificateMetadata(ApplicationId applicationId) { return curator.getData(endpointCertificatePath(applicationId)).map(String::new).map(EndpointCertificateMetadataSerializer::fromJsonString); } public Map<ApplicationId, EndpointCertificateMetadata> readAllEndpointCertificateMetadata() { Map<ApplicationId, EndpointCertificateMetadata> allEndpointCertificateMetadata = new HashMap<>(); for (String appIdString : curator.getChildren(endpointCertificateRoot)) { ApplicationId applicationId = ApplicationId.fromSerializedForm(appIdString); Optional<EndpointCertificateMetadata> endpointCertificateMetadata = readEndpointCertificateMetadata(applicationId); allEndpointCertificateMetadata.put(applicationId, endpointCertificateMetadata.orElseThrow()); } return allEndpointCertificateMetadata; } public void writeMeteringRefreshTime(long timestamp) { curator.set(meteringRefreshPath(), Long.toString(timestamp).getBytes()); } public long readMeteringRefreshTime() { return curator.getData(meteringRefreshPath()) .map(String::new).map(Long::parseLong) .orElse(0L); } public Set<ArchiveBucket> readArchiveBuckets(ZoneId zoneId) { return curator.getData(archiveBucketsPath(zoneId)).map(String::new).map(ArchiveBucketsSerializer::fromJsonString) .orElseGet(Set::of); } public void writeArchiveBuckets(ZoneId zoneid, Set<ArchiveBucket> archiveBuckets) { curator.set(archiveBucketsPath(zoneid), asJson(ArchiveBucketsSerializer.toSlime(archiveBuckets))); } public Optional<VespaChangeRequest> readChangeRequest(String changeRequestId) { return readSlime(changeRequestPath(changeRequestId)).map(ChangeRequestSerializer::fromSlime); } public List<VespaChangeRequest> readChangeRequests() { return curator.getChildren(changeRequestsRoot) .stream() .map(this::readChangeRequest) .flatMap(Optional::stream) .collect(Collectors.toList()); } public void writeChangeRequest(VespaChangeRequest changeRequest) { curator.set(changeRequestPath(changeRequest.getId()), asJson(ChangeRequestSerializer.toSlime(changeRequest))); } public void deleteChangeRequest(VespaChangeRequest changeRequest) { curator.delete(changeRequestPath(changeRequest.getId())); } public List<Notification> readNotifications(TenantName tenantName) { return readSlime(notificationsPath(tenantName)) .map(slime -> notificationsSerializer.fromSlime(tenantName, slime)).orElseGet(List::of); } public List<TenantName> listTenantsWithNotifications() { return curator.getChildren(notificationsRoot).stream() .map(TenantName::from) .collect(Collectors.toUnmodifiableList()); } public void writeNotifications(TenantName tenantName, List<Notification> notifications) { curator.set(notificationsPath(tenantName), asJson(notificationsSerializer.toSlime(notifications))); } public void deleteNotifications(TenantName tenantName) { curator.delete(notificationsPath(tenantName)); } public SupportAccess readSupportAccess(DeploymentId deploymentId) { return readSlime(supportAccessPath(deploymentId)).map(SupportAccessSerializer::fromSlime).orElse(SupportAccess.DISALLOWED_NO_HISTORY); } /** Take lock before reading before writing */ public void writeSupportAccess(DeploymentId deploymentId, SupportAccess supportAccess) { curator.set(supportAccessPath(deploymentId), asJson(SupportAccessSerializer.toSlime(supportAccess))); } public List<RetriggerEntry> readRetriggerEntries() { return readSlime(deploymentRetriggerPath()).map(retriggerEntrySerializer::fromSlime).orElseGet(List::of); } public void writeRetriggerEntries(List<RetriggerEntry> retriggerEntries) { curator.set(deploymentRetriggerPath(), asJson(retriggerEntrySerializer.toSlime(retriggerEntries))); } private Path lockPath(TenantName tenant) { return lockRoot .append(tenant.value()); } private Path lockPath(TenantAndApplicationId application) { return lockRoot.append(application.tenant().value() + ":" + application.application().value()); } private Path lockPath(ApplicationId instance, ZoneId zone) { return lockRoot.append(instance.serializedForm() + ":" + zone.environment().value() + ":" + zone.region().value()); } private Path lockPath(ApplicationId instance, JobType type) { return lockRoot.append(instance.serializedForm() + ":" + type.jobName()); } private Path lockPath(ApplicationId instance, JobType type, Step step) { return lockRoot.append(instance.serializedForm() + ":" + type.jobName() + ":" + step.name()); } private Path lockPath(String provisionId) { return lockRoot .append(provisionStatePath()) .append(provisionId); } private static Path upgradesPerMinutePath() { return root.append("upgrader").append("upgradesPerMinute"); } private static Path confidenceOverridesPath() { return root.append("upgrader").append("confidenceOverrides"); } private static Path osVersionTargetsPath() { return root.append("osUpgrader").append("targetVersion"); } private static Path osVersionStatusPath() { return root.append("osVersionStatus"); } private static Path versionStatusPath() { return root.append("versionStatus"); } private static Path routingPolicyPath(ApplicationId application) { return routingPoliciesRoot.append(application.serializedForm()); } private static Path zoneRoutingPolicyPath(ZoneId zone) { return zoneRoutingPoliciesRoot.append(zone.value()); } private static Path nameServiceQueuePath() { return root.append("nameServiceQueue"); } private static Path auditLogPath() { return root.append("auditLog"); } private static Path provisionStatePath() { return root.append("provisioning").append("states"); } private static Path provisionStatePath(String provisionId) { return provisionStatePath().append(provisionId); } private static Path tenantPath(TenantName name) { return tenantRoot.append(name.value()); } private static Path applicationPath(TenantAndApplicationId id) { return applicationRoot.append(id.serialized()); } private static Path runsPath(ApplicationId id, JobType type) { return jobRoot.append(id.serializedForm()).append(type.jobName()); } private static Path lastRunPath(ApplicationId id, JobType type) { return runsPath(id, type).append("last"); } private static Path logPath(ApplicationId id, JobType type, long first) { return runsPath(id, type).append("logs").append(Long.toString(first)); } private static Path lastLogPath(ApplicationId id, JobType type) { return runsPath(id, type).append("logs"); } private static Path controllerPath(String hostname) { return controllerRoot.append(hostname); } private static Path endpointCertificatePath(ApplicationId id) { return endpointCertificateRoot.append(id.serializedForm()); } private static Path meteringRefreshPath() { return root.append("meteringRefreshTime"); } private static Path archiveBucketsPath(ZoneId zoneId) { return archiveBucketsRoot.append(zoneId.value()); } private static Path changeRequestPath(String id) { return changeRequestsRoot.append(id); } private static Path notificationsPath(TenantName tenantName) { return notificationsRoot.append(tenantName.value()); } private static Path supportAccessPath(DeploymentId deploymentId) { return supportAccessRoot.append(deploymentId.dottedString()); } private static Path deploymentRetriggerPath() { return root.append("deploymentRetriggerQueue"); } }
class CuratorDb { private static final Logger log = Logger.getLogger(CuratorDb.class.getName()); private static final Duration deployLockTimeout = Duration.ofMinutes(30); private static final Duration defaultLockTimeout = Duration.ofMinutes(5); private static final Duration defaultTryLockTimeout = Duration.ofSeconds(1); private static final Path root = Path.fromString("/controller/v1"); private static final Path lockRoot = root.append("locks"); private static final Path tenantRoot = root.append("tenants"); private static final Path applicationRoot = root.append("applications"); private static final Path jobRoot = root.append("jobs"); private static final Path controllerRoot = root.append("controllers"); private static final Path routingPoliciesRoot = root.append("routingPolicies"); private static final Path zoneRoutingPoliciesRoot = root.append("zoneRoutingPolicies"); private static final Path endpointCertificateRoot = root.append("applicationCertificates"); private static final Path archiveBucketsRoot = root.append("archiveBuckets"); private static final Path changeRequestsRoot = root.append("changeRequests"); private static final Path notificationsRoot = root.append("notifications"); private static final Path supportAccessRoot = root.append("supportAccess"); private final NodeVersionSerializer nodeVersionSerializer = new NodeVersionSerializer(); private final VersionStatusSerializer versionStatusSerializer = new VersionStatusSerializer(nodeVersionSerializer); private final ControllerVersionSerializer controllerVersionSerializer = new ControllerVersionSerializer(); private final ConfidenceOverrideSerializer confidenceOverrideSerializer = new ConfidenceOverrideSerializer(); private final TenantSerializer tenantSerializer = new TenantSerializer(); private final OsVersionSerializer osVersionSerializer = new OsVersionSerializer(); private final OsVersionTargetSerializer osVersionTargetSerializer = new OsVersionTargetSerializer(osVersionSerializer); private final OsVersionStatusSerializer osVersionStatusSerializer = new OsVersionStatusSerializer(osVersionSerializer, nodeVersionSerializer); private final RoutingPolicySerializer routingPolicySerializer = new RoutingPolicySerializer(); private final ZoneRoutingPolicySerializer zoneRoutingPolicySerializer = new ZoneRoutingPolicySerializer(routingPolicySerializer); private final AuditLogSerializer auditLogSerializer = new AuditLogSerializer(); private final NameServiceQueueSerializer nameServiceQueueSerializer = new NameServiceQueueSerializer(); private final ApplicationSerializer applicationSerializer = new ApplicationSerializer(); private final RunSerializer runSerializer = new RunSerializer(); private final RetriggerEntrySerializer retriggerEntrySerializer = new RetriggerEntrySerializer(); private final NotificationsSerializer notificationsSerializer = new NotificationsSerializer(); private final Curator curator; private final Duration tryLockTimeout; private final Map<Path, Pair<Integer, Application>> cachedApplications = new ConcurrentHashMap<>(); private final Map<Path, Pair<Integer, NavigableMap<RunId, Run>>> cachedHistoricRuns = new ConcurrentHashMap<>(); @Inject public CuratorDb(Curator curator, ServiceRegistry services) { this(curator, defaultTryLockTimeout, services.zoneRegistry().system()); } CuratorDb(Curator curator, Duration tryLockTimeout, SystemName system) { this.curator = curator; this.tryLockTimeout = tryLockTimeout; } /** Returns all hostnames configured to be part of this ZooKeeper cluster */ public List<String> cluster() { return Arrays.stream(curator.zooKeeperEnsembleConnectionSpec().split(",")) .filter(hostAndPort -> !hostAndPort.isEmpty()) .map(hostAndPort -> hostAndPort.split(":")[0]) .collect(Collectors.toUnmodifiableList()); } public Mutex lock(TenantName name) { return curator.lock(lockPath(name), defaultLockTimeout.multipliedBy(2)); } public Mutex lock(TenantAndApplicationId id) { return curator.lock(lockPath(id), defaultLockTimeout.multipliedBy(2)); } public Mutex lockForDeployment(ApplicationId id, ZoneId zone) { return curator.lock(lockPath(id, zone), deployLockTimeout); } public Mutex lock(ApplicationId id, JobType type) { return curator.lock(lockPath(id, type), defaultLockTimeout); } public Mutex lock(ApplicationId id, JobType type, Step step) throws TimeoutException { return tryLock(lockPath(id, type, step)); } public Mutex lockRotations() { return curator.lock(lockRoot.append("rotations"), defaultLockTimeout); } public Mutex lockConfidenceOverrides() { return curator.lock(lockRoot.append("confidenceOverrides"), defaultLockTimeout); } public Mutex lockMaintenanceJob(String jobName) { try { return tryLock(lockRoot.append("maintenanceJobLocks").append(jobName)); } catch (TimeoutException e) { throw new UncheckedTimeoutException(e); } } public Mutex lockProvisionState(String provisionStateId) { return curator.lock(lockPath(provisionStateId), Duration.ofSeconds(1)); } public Mutex lockOsVersions() { return curator.lock(lockRoot.append("osTargetVersion"), defaultLockTimeout); } public Mutex lockOsVersionStatus() { return curator.lock(lockRoot.append("osVersionStatus"), defaultLockTimeout); } public Mutex lockRoutingPolicies() { return curator.lock(lockRoot.append("routingPolicies"), defaultLockTimeout); } public Mutex lockAuditLog() { return curator.lock(lockRoot.append("auditLog"), defaultLockTimeout); } public Mutex lockNameServiceQueue() { return curator.lock(lockRoot.append("nameServiceQueue"), defaultLockTimeout); } public Mutex lockMeteringRefreshTime() throws TimeoutException { return tryLock(lockRoot.append("meteringRefreshTime")); } public Mutex lockArchiveBuckets(ZoneId zoneId) { return curator.lock(lockRoot.append("archiveBuckets").append(zoneId.value()), defaultLockTimeout); } public Mutex lockChangeRequests() { return curator.lock(lockRoot.append("changeRequests"), defaultLockTimeout); } public Mutex lockNotifications(TenantName tenantName) { return curator.lock(lockRoot.append("notifications").append(tenantName.value()), defaultLockTimeout); } public Mutex lockSupportAccess(DeploymentId deploymentId) { return curator.lock(lockRoot.append("supportAccess").append(deploymentId.dottedString()), defaultLockTimeout); } public Mutex lockDeploymentRetriggerQueue() { return curator.lock(lockRoot.append("deploymentRetriggerQueue"), defaultLockTimeout); } /** Try locking with a low timeout, meaning it is OK to fail lock acquisition. * * Useful for maintenance jobs, where there is no point in running the jobs back to back. */ private Mutex tryLock(Path path) throws TimeoutException { try { return curator.lock(path, tryLockTimeout); } catch (UncheckedTimeoutException e) { throw new TimeoutException(e.getMessage()); } } private <T> Optional<T> read(Path path, Function<byte[], T> mapper) { return curator.getData(path).filter(data -> data.length > 0).map(mapper); } private Optional<Slime> readSlime(Path path) { return read(path, SlimeUtils::jsonToSlime); } private static byte[] asJson(Slime slime) { try { return SlimeUtils.toJsonBytes(slime); } catch (IOException e) { throw new UncheckedIOException(e); } } public double readUpgradesPerMinute() { return read(upgradesPerMinutePath(), ByteBuffer::wrap).map(ByteBuffer::getDouble).orElse(0.125); } public void writeUpgradesPerMinute(double n) { curator.set(upgradesPerMinutePath(), ByteBuffer.allocate(Double.BYTES).putDouble(n).array()); } public void writeVersionStatus(VersionStatus status) { curator.set(versionStatusPath(), asJson(versionStatusSerializer.toSlime(status))); } public VersionStatus readVersionStatus() { return readSlime(versionStatusPath()).map(versionStatusSerializer::fromSlime).orElseGet(VersionStatus::empty); } public void writeConfidenceOverrides(Map<Version, VespaVersion.Confidence> overrides) { curator.set(confidenceOverridesPath(), asJson(confidenceOverrideSerializer.toSlime(overrides))); } public Map<Version, VespaVersion.Confidence> readConfidenceOverrides() { return readSlime(confidenceOverridesPath()).map(confidenceOverrideSerializer::fromSlime) .orElseGet(Collections::emptyMap); } public void writeControllerVersion(HostName hostname, ControllerVersion version) { curator.set(controllerPath(hostname.value()), asJson(controllerVersionSerializer.toSlime(version))); } public ControllerVersion readControllerVersion(HostName hostname) { return readSlime(controllerPath(hostname.value())) .map(controllerVersionSerializer::fromSlime) .orElse(ControllerVersion.CURRENT); } public void writeOsVersionTargets(Set<OsVersionTarget> versions) { curator.set(osVersionTargetsPath(), asJson(osVersionTargetSerializer.toSlime(versions))); } public Set<OsVersionTarget> readOsVersionTargets() { return readSlime(osVersionTargetsPath()).map(osVersionTargetSerializer::fromSlime).orElseGet(Collections::emptySet); } public void writeOsVersionStatus(OsVersionStatus status) { curator.set(osVersionStatusPath(), asJson(osVersionStatusSerializer.toSlime(status))); } public OsVersionStatus readOsVersionStatus() { return readSlime(osVersionStatusPath()).map(osVersionStatusSerializer::fromSlime).orElse(OsVersionStatus.empty); } public void writeTenant(Tenant tenant) { curator.set(tenantPath(tenant.name()), asJson(tenantSerializer.toSlime(tenant))); } public Optional<Tenant> readTenant(TenantName name) { return readSlime(tenantPath(name)).map(tenantSerializer::tenantFrom); } public List<Tenant> readTenants() { return readTenantNames().stream() .map(this::readTenant) .flatMap(Optional::stream) .collect(collectingAndThen(Collectors.toList(), Collections::unmodifiableList)); } public List<TenantName> readTenantNames() { return curator.getChildren(tenantRoot).stream() .map(TenantName::from) .collect(Collectors.toList()); } public void removeTenant(TenantName name) { curator.delete(tenantPath(name)); } public void writeApplication(Application application) { curator.set(applicationPath(application.id()), asJson(applicationSerializer.toSlime(application))); } public Optional<Application> readApplication(TenantAndApplicationId application) { Path path = applicationPath(application); return curator.getStat(path) .map(stat -> cachedApplications.compute(path, (__, old) -> old != null && old.getFirst() == stat.getVersion() ? old : new Pair<>(stat.getVersion(), read(path, bytes -> applicationSerializer.fromSlime(bytes)).get())).getSecond()); } public List<Application> readApplications(boolean canFail) { return readApplications(ignored -> true, canFail); } public List<Application> readApplications(TenantName name) { return readApplications(application -> application.tenant().equals(name), false); } private List<Application> readApplications(Predicate<TenantAndApplicationId> applicationFilter, boolean canFail) { var applicationIds = readApplicationIds(); var applications = new ArrayList<Application>(applicationIds.size()); for (var id : applicationIds) { if (!applicationFilter.test(id)) continue; try { readApplication(id).ifPresent(applications::add); } catch (Exception e) { if (canFail) { log.log(Level.SEVERE, "Failed to read application '" + id + "', this must be fixed through " + "manual intervention", e); } else { throw e; } } } return Collections.unmodifiableList(applications); } public List<TenantAndApplicationId> readApplicationIds() { return curator.getChildren(applicationRoot).stream() .map(TenantAndApplicationId::fromSerialized) .sorted() .collect(toUnmodifiableList()); } public void removeApplication(TenantAndApplicationId id) { curator.delete(applicationPath(id)); } public void writeLastRun(Run run) { curator.set(lastRunPath(run.id().application(), run.id().type()), asJson(runSerializer.toSlime(run))); } public void writeHistoricRuns(ApplicationId id, JobType type, Iterable<Run> runs) { Path path = runsPath(id, type); curator.set(path, asJson(runSerializer.toSlime(runs))); } public Optional<Run> readLastRun(ApplicationId id, JobType type) { return readSlime(lastRunPath(id, type)).map(runSerializer::runFromSlime); } public NavigableMap<RunId, Run> readHistoricRuns(ApplicationId id, JobType type) { Path path = runsPath(id, type); return curator.getStat(path) .map(stat -> cachedHistoricRuns.compute(path, (__, old) -> old != null && old.getFirst() == stat.getVersion() ? old : new Pair<>(stat.getVersion(), runSerializer.runsFromSlime(readSlime(path).get()))).getSecond()) .orElseGet(Collections::emptyNavigableMap); } public void deleteRunData(ApplicationId id, JobType type) { curator.delete(runsPath(id, type)); curator.delete(lastRunPath(id, type)); } public void deleteRunData(ApplicationId id) { curator.delete(jobRoot.append(id.serializedForm())); } public List<ApplicationId> applicationsWithJobs() { return curator.getChildren(jobRoot).stream() .map(ApplicationId::fromSerializedForm) .collect(Collectors.toList()); } public Optional<byte[]> readLog(ApplicationId id, JobType type, long chunkId) { return curator.getData(logPath(id, type, chunkId)); } public void writeLog(ApplicationId id, JobType type, long chunkId, byte[] log) { curator.set(logPath(id, type, chunkId), log); } public void deleteLog(ApplicationId id, JobType type) { curator.delete(runsPath(id, type).append("logs")); } public Optional<Long> readLastLogEntryId(ApplicationId id, JobType type) { return curator.getData(lastLogPath(id, type)) .map(String::new).map(Long::parseLong); } public void writeLastLogEntryId(ApplicationId id, JobType type, long lastId) { curator.set(lastLogPath(id, type), Long.toString(lastId).getBytes()); } public LongStream getLogChunkIds(ApplicationId id, JobType type) { return curator.getChildren(runsPath(id, type).append("logs")).stream() .mapToLong(Long::parseLong) .sorted(); } public AuditLog readAuditLog() { return readSlime(auditLogPath()).map(auditLogSerializer::fromSlime) .orElse(AuditLog.empty); } public void writeAuditLog(AuditLog log) { curator.set(auditLogPath(), asJson(auditLogSerializer.toSlime(log))); } public NameServiceQueue readNameServiceQueue() { return readSlime(nameServiceQueuePath()).map(nameServiceQueueSerializer::fromSlime) .orElse(NameServiceQueue.EMPTY); } public void writeNameServiceQueue(NameServiceQueue queue) { curator.set(nameServiceQueuePath(), asJson(nameServiceQueueSerializer.toSlime(queue))); } @SuppressWarnings("unused") public Optional<byte[]> readProvisionState(String provisionId) { return curator.getData(provisionStatePath(provisionId)); } @SuppressWarnings("unused") public void writeProvisionState(String provisionId, byte[] data) { curator.set(provisionStatePath(provisionId), data); } @SuppressWarnings("unused") public List<String> readProvisionStateIds() { return curator.getChildren(provisionStatePath()); } public void writeRoutingPolicies(ApplicationId application, List<RoutingPolicy> policies) { for (var policy : policies) { if (!policy.id().owner().equals(application)) { throw new IllegalArgumentException(policy.id() + " does not belong to the application being written: " + application.toShortString()); } } curator.set(routingPolicyPath(application), asJson(routingPolicySerializer.toSlime(policies))); } public Map<ApplicationId, List<RoutingPolicy>> readRoutingPolicies() { return readRoutingPolicies((instance) -> true); } public Map<ApplicationId, List<RoutingPolicy>> readRoutingPolicies(Predicate<ApplicationId> filter) { return curator.getChildren(routingPoliciesRoot).stream() .map(ApplicationId::fromSerializedForm) .filter(filter) .collect(Collectors.toUnmodifiableMap(Function.identity(), this::readRoutingPolicies)); } public List<RoutingPolicy> readRoutingPolicies(ApplicationId application) { return readSlime(routingPolicyPath(application)).map(slime -> routingPolicySerializer.fromSlime(application, slime)) .orElseGet(List::of); } public void writeZoneRoutingPolicy(ZoneRoutingPolicy policy) { curator.set(zoneRoutingPolicyPath(policy.zone()), asJson(zoneRoutingPolicySerializer.toSlime(policy))); } public ZoneRoutingPolicy readZoneRoutingPolicy(ZoneId zone) { return readSlime(zoneRoutingPolicyPath(zone)).map(data -> zoneRoutingPolicySerializer.fromSlime(zone, data)) .orElseGet(() -> new ZoneRoutingPolicy(zone, RoutingStatus.DEFAULT)); } public void writeEndpointCertificateMetadata(ApplicationId applicationId, EndpointCertificateMetadata endpointCertificateMetadata) { curator.set(endpointCertificatePath(applicationId), asJson(EndpointCertificateMetadataSerializer.toSlime(endpointCertificateMetadata))); } public void deleteEndpointCertificateMetadata(ApplicationId applicationId) { curator.delete(endpointCertificatePath(applicationId)); } public Optional<EndpointCertificateMetadata> readEndpointCertificateMetadata(ApplicationId applicationId) { return curator.getData(endpointCertificatePath(applicationId)).map(String::new).map(EndpointCertificateMetadataSerializer::fromJsonString); } public Map<ApplicationId, EndpointCertificateMetadata> readAllEndpointCertificateMetadata() { Map<ApplicationId, EndpointCertificateMetadata> allEndpointCertificateMetadata = new HashMap<>(); for (String appIdString : curator.getChildren(endpointCertificateRoot)) { ApplicationId applicationId = ApplicationId.fromSerializedForm(appIdString); Optional<EndpointCertificateMetadata> endpointCertificateMetadata = readEndpointCertificateMetadata(applicationId); allEndpointCertificateMetadata.put(applicationId, endpointCertificateMetadata.orElseThrow()); } return allEndpointCertificateMetadata; } public void writeMeteringRefreshTime(long timestamp) { curator.set(meteringRefreshPath(), Long.toString(timestamp).getBytes()); } public long readMeteringRefreshTime() { return curator.getData(meteringRefreshPath()) .map(String::new).map(Long::parseLong) .orElse(0L); } public Set<ArchiveBucket> readArchiveBuckets(ZoneId zoneId) { return curator.getData(archiveBucketsPath(zoneId)).map(String::new).map(ArchiveBucketsSerializer::fromJsonString) .orElseGet(Set::of); } public void writeArchiveBuckets(ZoneId zoneid, Set<ArchiveBucket> archiveBuckets) { curator.set(archiveBucketsPath(zoneid), asJson(ArchiveBucketsSerializer.toSlime(archiveBuckets))); } public Optional<VespaChangeRequest> readChangeRequest(String changeRequestId) { return readSlime(changeRequestPath(changeRequestId)).map(ChangeRequestSerializer::fromSlime); } public List<VespaChangeRequest> readChangeRequests() { return curator.getChildren(changeRequestsRoot) .stream() .map(this::readChangeRequest) .flatMap(Optional::stream) .collect(Collectors.toList()); } public void writeChangeRequest(VespaChangeRequest changeRequest) { curator.set(changeRequestPath(changeRequest.getId()), asJson(ChangeRequestSerializer.toSlime(changeRequest))); } public void deleteChangeRequest(VespaChangeRequest changeRequest) { curator.delete(changeRequestPath(changeRequest.getId())); } public List<Notification> readNotifications(TenantName tenantName) { return readSlime(notificationsPath(tenantName)) .map(slime -> notificationsSerializer.fromSlime(tenantName, slime)).orElseGet(List::of); } public List<TenantName> listTenantsWithNotifications() { return curator.getChildren(notificationsRoot).stream() .map(TenantName::from) .collect(Collectors.toUnmodifiableList()); } public void writeNotifications(TenantName tenantName, List<Notification> notifications) { curator.set(notificationsPath(tenantName), asJson(notificationsSerializer.toSlime(notifications))); } public void deleteNotifications(TenantName tenantName) { curator.delete(notificationsPath(tenantName)); } public SupportAccess readSupportAccess(DeploymentId deploymentId) { return readSlime(supportAccessPath(deploymentId)).map(SupportAccessSerializer::fromSlime).orElse(SupportAccess.DISALLOWED_NO_HISTORY); } /** Take lock before reading before writing */ public void writeSupportAccess(DeploymentId deploymentId, SupportAccess supportAccess) { curator.set(supportAccessPath(deploymentId), asJson(SupportAccessSerializer.toSlime(supportAccess))); } public List<RetriggerEntry> readRetriggerEntries() { return readSlime(deploymentRetriggerPath()).map(retriggerEntrySerializer::fromSlime).orElseGet(List::of); } public void writeRetriggerEntries(List<RetriggerEntry> retriggerEntries) { curator.set(deploymentRetriggerPath(), asJson(retriggerEntrySerializer.toSlime(retriggerEntries))); } private Path lockPath(TenantName tenant) { return lockRoot .append(tenant.value()); } private Path lockPath(TenantAndApplicationId application) { return lockRoot.append(application.tenant().value() + ":" + application.application().value()); } private Path lockPath(ApplicationId instance, ZoneId zone) { return lockRoot.append(instance.serializedForm() + ":" + zone.environment().value() + ":" + zone.region().value()); } private Path lockPath(ApplicationId instance, JobType type) { return lockRoot.append(instance.serializedForm() + ":" + type.jobName()); } private Path lockPath(ApplicationId instance, JobType type, Step step) { return lockRoot.append(instance.serializedForm() + ":" + type.jobName() + ":" + step.name()); } private Path lockPath(String provisionId) { return lockRoot .append(provisionStatePath()) .append(provisionId); } private static Path upgradesPerMinutePath() { return root.append("upgrader").append("upgradesPerMinute"); } private static Path confidenceOverridesPath() { return root.append("upgrader").append("confidenceOverrides"); } private static Path osVersionTargetsPath() { return root.append("osUpgrader").append("targetVersion"); } private static Path osVersionStatusPath() { return root.append("osVersionStatus"); } private static Path versionStatusPath() { return root.append("versionStatus"); } private static Path routingPolicyPath(ApplicationId application) { return routingPoliciesRoot.append(application.serializedForm()); } private static Path zoneRoutingPolicyPath(ZoneId zone) { return zoneRoutingPoliciesRoot.append(zone.value()); } private static Path nameServiceQueuePath() { return root.append("nameServiceQueue"); } private static Path auditLogPath() { return root.append("auditLog"); } private static Path provisionStatePath() { return root.append("provisioning").append("states"); } private static Path provisionStatePath(String provisionId) { return provisionStatePath().append(provisionId); } private static Path tenantPath(TenantName name) { return tenantRoot.append(name.value()); } private static Path applicationPath(TenantAndApplicationId id) { return applicationRoot.append(id.serialized()); } private static Path runsPath(ApplicationId id, JobType type) { return jobRoot.append(id.serializedForm()).append(type.jobName()); } private static Path lastRunPath(ApplicationId id, JobType type) { return runsPath(id, type).append("last"); } private static Path logPath(ApplicationId id, JobType type, long first) { return runsPath(id, type).append("logs").append(Long.toString(first)); } private static Path lastLogPath(ApplicationId id, JobType type) { return runsPath(id, type).append("logs"); } private static Path controllerPath(String hostname) { return controllerRoot.append(hostname); } private static Path endpointCertificatePath(ApplicationId id) { return endpointCertificateRoot.append(id.serializedForm()); } private static Path meteringRefreshPath() { return root.append("meteringRefreshTime"); } private static Path archiveBucketsPath(ZoneId zoneId) { return archiveBucketsRoot.append(zoneId.value()); } private static Path changeRequestPath(String id) { return changeRequestsRoot.append(id); } private static Path notificationsPath(TenantName tenantName) { return notificationsRoot.append(tenantName.value()); } private static Path supportAccessPath(DeploymentId deploymentId) { return supportAccessRoot.append(deploymentId.dottedString()); } private static Path deploymentRetriggerPath() { return root.append("deploymentRetriggerQueue"); } }
Any started bundles here are never uninstalled, as in `bundle.uninstall()`, as far as I can see.
public ComponentGraphResult waitForNextGraphGeneration(ComponentGraph oldGraph, Injector fallbackInjector, boolean isInitializing) { try { ComponentGraph newGraph; Collection<Bundle> obsoleteBundles = new HashSet<>(); try { newGraph = waitForNewConfigGenAndCreateGraph(oldGraph, fallbackInjector, isInitializing, obsoleteBundles); newGraph.reuseNodes(oldGraph); } catch (Throwable t) { log.warning("Failed to set up component graph - uninstalling latest bundles. Bootstrap generation: " + getBootstrapGeneration()); osgi.completeBundleGeneration(Osgi.GenerationStatus.FAILURE); throw t; } try { constructComponents(newGraph); } catch (Throwable e) { log.warning("Failed to construct components for generation '" + newGraph.generation() + "' - scheduling partial graph for deconstruction"); Collection<Bundle> newBundlesFromFailedGen = osgi.completeBundleGeneration(Osgi.GenerationStatus.FAILURE); deconstructFailedGraph(oldGraph, newGraph, newBundlesFromFailedGen); throw e; } osgi.completeBundleGeneration(Osgi.GenerationStatus.SUCCESS); Runnable cleanupTask = createPreviousGraphDeconstructionTask(oldGraph, newGraph, obsoleteBundles); return new ComponentGraphResult(newGraph, cleanupTask); } catch (Throwable t) { invalidateGeneration(oldGraph.generation(), t); throw t; } }
}
public ComponentGraphResult waitForNextGraphGeneration(ComponentGraph oldGraph, Injector fallbackInjector, boolean isInitializing) { try { ComponentGraph newGraph; Collection<Bundle> obsoleteBundles = new HashSet<>(); try { newGraph = waitForNewConfigGenAndCreateGraph(oldGraph, fallbackInjector, isInitializing, obsoleteBundles); newGraph.reuseNodes(oldGraph); } catch (Throwable t) { log.warning("Failed to set up component graph - uninstalling latest bundles. Bootstrap generation: " + getBootstrapGeneration()); Collection<Bundle> newBundlesFromFailedGen = osgi.completeBundleGeneration(Osgi.GenerationStatus.FAILURE); deconstructComponentsAndBundles(getBootstrapGeneration(), newBundlesFromFailedGen, List.of()); throw t; } try { constructComponents(newGraph); } catch (Throwable e) { log.warning("Failed to construct components for generation '" + newGraph.generation() + "' - scheduling partial graph for deconstruction"); Collection<Bundle> newBundlesFromFailedGen = osgi.completeBundleGeneration(Osgi.GenerationStatus.FAILURE); deconstructFailedGraph(oldGraph, newGraph, newBundlesFromFailedGen); throw e; } osgi.completeBundleGeneration(Osgi.GenerationStatus.SUCCESS); Runnable cleanupTask = createPreviousGraphDeconstructionTask(oldGraph, newGraph, obsoleteBundles); return new ComponentGraphResult(newGraph, cleanupTask); } catch (Throwable t) { invalidateGeneration(oldGraph.generation(), t); throw t; } }
class Container { private static final Logger log = Logger.getLogger(Container.class.getName()); private final SubscriberFactory subscriberFactory; private final ConfigKey<ApplicationBundlesConfig> applicationBundlesConfigKey; private final ConfigKey<PlatformBundlesConfig> platformBundlesConfigKey; private final ConfigKey<ComponentsConfig> componentsConfigKey; private final ComponentDeconstructor destructor; private final Osgi osgi; private final ConfigRetriever retriever; private List<String> platformBundles; private long previousConfigGeneration = -1L; private long leastGeneration = -1L; public Container(SubscriberFactory subscriberFactory, String configId, ComponentDeconstructor destructor, Osgi osgi) { this.subscriberFactory = subscriberFactory; this.destructor = destructor; this.osgi = osgi; applicationBundlesConfigKey = new ConfigKey<>(ApplicationBundlesConfig.class, configId); platformBundlesConfigKey = new ConfigKey<>(PlatformBundlesConfig.class, configId); componentsConfigKey = new ConfigKey<>(ComponentsConfig.class, configId); var bootstrapKeys = Set.of(applicationBundlesConfigKey, platformBundlesConfigKey, componentsConfigKey); this.retriever = new ConfigRetriever(bootstrapKeys, subscriberFactory); } private void constructComponents(ComponentGraph graph) { graph.nodes().forEach(n -> { if (Thread.interrupted()) throw new UncheckedInterruptedException("Interrupted while constructing component graph", true); n.constructInstance(); }); } private ComponentGraph waitForNewConfigGenAndCreateGraph( ComponentGraph graph, Injector fallbackInjector, boolean isInitializing, Collection<Bundle> obsoleteBundles) { ConfigSnapshot snapshot; while (true) { snapshot = retriever.getConfigs(graph.configKeys(), leastGeneration, isInitializing); if (log.isLoggable(FINE)) log.log(FINE, String.format("getConfigAndCreateGraph:\n" + "graph.configKeys = %s\n" + "graph.generation = %s\n" + "snapshot = %s\n", graph.configKeys(), graph.generation(), snapshot)); if (snapshot instanceof BootstrapConfigs) { if (getBootstrapGeneration() <= previousConfigGeneration) { throw new IllegalStateException(String.format( "Got bootstrap configs out of sequence for old config generation %d.\n" + "Previous config generation is %d", getBootstrapGeneration(), previousConfigGeneration)); } log.log(FINE, () -> "Got new bootstrap generation\n" + configGenerationsString()); if (graph.generation() == 0) { platformBundles = getConfig(platformBundlesConfigKey, snapshot.configs()).bundlePaths(); osgi.installPlatformBundles(platformBundles); } else { throwIfPlatformBundlesChanged(snapshot); } Collection<Bundle> bundlesToRemove = installApplicationBundles(snapshot.configs()); obsoleteBundles.addAll(bundlesToRemove); graph = createComponentGraph(snapshot.configs(), getBootstrapGeneration(), fallbackInjector); } else if (snapshot instanceof ComponentsConfigs) { break; } } log.log(FINE, () -> "Got components configs,\n" + configGenerationsString()); return createAndConfigureComponentGraph(snapshot.configs(), fallbackInjector); } private long getBootstrapGeneration() { return retriever.getBootstrapGeneration(); } private long getComponentsGeneration() { return retriever.getComponentsGeneration(); } private String configGenerationsString() { return String.format("bootstrap generation = %d\n" + "components generation: %d\n" + "previous generation: %d", getBootstrapGeneration(), getComponentsGeneration(), previousConfigGeneration); } private void throwIfPlatformBundlesChanged(ConfigSnapshot snapshot) { var checkPlatformBundles = getConfig(platformBundlesConfigKey, snapshot.configs()).bundlePaths(); if (! checkPlatformBundles.equals(platformBundles)) throw new RuntimeException("Platform bundles are not allowed to change!\nOld: " + platformBundles + "\nNew: " + checkPlatformBundles); } private ComponentGraph createAndConfigureComponentGraph(Map<ConfigKey<? extends ConfigInstance>, ConfigInstance> componentsConfigs, Injector fallbackInjector) { ComponentGraph componentGraph = createComponentGraph(componentsConfigs, getComponentsGeneration(), fallbackInjector); componentGraph.setAvailableConfigs(componentsConfigs); return componentGraph; } private void deconstructFailedGraph(ComponentGraph currentGraph, ComponentGraph failedGraph, Collection<Bundle> bundlesFromFailedGraph) { Set<Object> currentComponents = Collections.newSetFromMap(new IdentityHashMap<>(currentGraph.size())); currentComponents.addAll(currentGraph.allConstructedComponentsAndProviders()); List<Object> unusedComponents = new ArrayList<>(); for (Object component : failedGraph.allConstructedComponentsAndProviders()) { if (!currentComponents.contains(component)) unusedComponents.add(component); } destructor.deconstruct(failedGraph.generation(), unusedComponents, bundlesFromFailedGraph); } private Runnable createPreviousGraphDeconstructionTask(ComponentGraph oldGraph, ComponentGraph newGraph, Collection<Bundle> obsoleteBundles) { Map<Object, ?> newComponents = new IdentityHashMap<>(newGraph.size()); for (Object component : newGraph.allConstructedComponentsAndProviders()) newComponents.put(component, null); List<Object> obsoleteComponents = new ArrayList<>(); for (Object component : oldGraph.allConstructedComponentsAndProviders()) if ( ! newComponents.containsKey(component)) obsoleteComponents.add(component); return () -> destructor.deconstruct(oldGraph.generation(), obsoleteComponents, obsoleteBundles); } private Set<Bundle> installApplicationBundles(Map<ConfigKey<? extends ConfigInstance>, ConfigInstance> configsIncludingBootstrapConfigs) { ApplicationBundlesConfig applicationBundlesConfig = getConfig(applicationBundlesConfigKey, configsIncludingBootstrapConfigs); return osgi.useApplicationBundles(applicationBundlesConfig.bundles(), getBootstrapGeneration()); } private ComponentGraph createComponentGraph(Map<ConfigKey<? extends ConfigInstance>, ConfigInstance> configsIncludingBootstrapConfigs, long generation, Injector fallbackInjector) { previousConfigGeneration = generation; ComponentGraph graph = new ComponentGraph(generation); ComponentsConfig componentsConfig = getConfig(componentsConfigKey, configsIncludingBootstrapConfigs); if (componentsConfig == null) { throw new ConfigurationRuntimeException("The set of all configs does not include a valid 'components' config. Config set: " + configsIncludingBootstrapConfigs.keySet()); } addNodes(componentsConfig, graph); injectNodes(componentsConfig, graph); graph.complete(fallbackInjector); return graph; } private void addNodes(ComponentsConfig componentsConfig, ComponentGraph graph) { for (ComponentsConfig.Components config : componentsConfig.components()) { BundleInstantiationSpecification specification = bundleInstantiationSpecification(config); Class<?> componentClass = osgi.resolveClass(specification); Node componentNode = new ComponentNode(specification.id, config.configId(), componentClass, null); graph.add(componentNode); } } private void injectNodes(ComponentsConfig config, ComponentGraph graph) { for (ComponentsConfig.Components component : config.components()) { Node componentNode = ComponentGraph.getNode(graph, component.id()); for (ComponentsConfig.Components.Inject inject : component.inject()) { componentNode.inject(ComponentGraph.getNode(graph, inject.id())); } } } private void invalidateGeneration(long generation, Throwable cause) { leastGeneration = Math.max(retriever.getComponentsGeneration(), retriever.getBootstrapGeneration()) + 1; if (!(cause instanceof InterruptedException) && !(cause instanceof ConfigInterruptedException) && !(cause instanceof SubscriberClosedException)) { log.log(Level.WARNING, newGraphErrorMessage(generation, cause), cause); } } private static String newGraphErrorMessage(long generation, Throwable cause) { String failedFirstMessage = "Failed to set up first component graph"; String failedNewMessage = "Failed to set up new component graph"; String constructMessage = " due to error when constructing one of the components"; String retainMessage = ". Retaining previous component generation."; if (generation == 0) { if (cause instanceof ComponentNode.ComponentConstructorException) { return failedFirstMessage + constructMessage; } else { return failedFirstMessage; } } else { if (cause instanceof ComponentNode.ComponentConstructorException) { return failedNewMessage + constructMessage + retainMessage; } else { return failedNewMessage + retainMessage; } } } public void shutdown(ComponentGraph graph) { shutdownConfigRetriever(); if (graph != null) { scheduleGraphForDeconstruction(graph); destructor.shutdown(); } } public void shutdownConfigRetriever() { retriever.shutdown(); } public void reloadConfig(long generation) { subscriberFactory.reloadActiveSubscribers(generation); } private void scheduleGraphForDeconstruction(ComponentGraph graph) { destructor.deconstruct(graph.generation(), graph.allConstructedComponentsAndProviders(), List.of()); } public static <T extends ConfigInstance> T getConfig(ConfigKey<T> key, Map<ConfigKey<? extends ConfigInstance>, ConfigInstance> configs) { ConfigInstance inst = configs.get(key); if (inst == null || key.getConfigClass() == null) { throw new RuntimeException("Missing config " + key); } return key.getConfigClass().cast(inst); } private static BundleInstantiationSpecification bundleInstantiationSpecification(ComponentsConfig.Components config) { return BundleInstantiationSpecification.fromStrings(config.id(), config.classId(), config.bundle()); } public static class ComponentGraphResult { private final ComponentGraph newGraph; private final Runnable oldComponentsCleanupTask; public ComponentGraphResult(ComponentGraph newGraph, Runnable oldComponentsCleanupTask) { this.newGraph = newGraph; this.oldComponentsCleanupTask = oldComponentsCleanupTask; } public ComponentGraph newGraph() { return newGraph; } public Runnable oldComponentsCleanupTask() { return oldComponentsCleanupTask; } } }
class Container { private static final Logger log = Logger.getLogger(Container.class.getName()); private final SubscriberFactory subscriberFactory; private final ConfigKey<ApplicationBundlesConfig> applicationBundlesConfigKey; private final ConfigKey<PlatformBundlesConfig> platformBundlesConfigKey; private final ConfigKey<ComponentsConfig> componentsConfigKey; private final ComponentDeconstructor destructor; private final Osgi osgi; private final ConfigRetriever retriever; private List<String> platformBundles; private long previousConfigGeneration = -1L; private long leastGeneration = -1L; public Container(SubscriberFactory subscriberFactory, String configId, ComponentDeconstructor destructor, Osgi osgi) { this.subscriberFactory = subscriberFactory; this.destructor = destructor; this.osgi = osgi; applicationBundlesConfigKey = new ConfigKey<>(ApplicationBundlesConfig.class, configId); platformBundlesConfigKey = new ConfigKey<>(PlatformBundlesConfig.class, configId); componentsConfigKey = new ConfigKey<>(ComponentsConfig.class, configId); var bootstrapKeys = Set.of(applicationBundlesConfigKey, platformBundlesConfigKey, componentsConfigKey); this.retriever = new ConfigRetriever(bootstrapKeys, subscriberFactory); } private void constructComponents(ComponentGraph graph) { graph.nodes().forEach(n -> { if (Thread.interrupted()) throw new UncheckedInterruptedException("Interrupted while constructing component graph", true); n.constructInstance(); }); } private ComponentGraph waitForNewConfigGenAndCreateGraph( ComponentGraph graph, Injector fallbackInjector, boolean isInitializing, Collection<Bundle> obsoleteBundles) { ConfigSnapshot snapshot; while (true) { snapshot = retriever.getConfigs(graph.configKeys(), leastGeneration, isInitializing); if (log.isLoggable(FINE)) log.log(FINE, String.format("getConfigAndCreateGraph:\n" + "graph.configKeys = %s\n" + "graph.generation = %s\n" + "snapshot = %s\n", graph.configKeys(), graph.generation(), snapshot)); if (snapshot instanceof BootstrapConfigs) { if (getBootstrapGeneration() <= previousConfigGeneration) { throw new IllegalStateException(String.format( "Got bootstrap configs out of sequence for old config generation %d.\n" + "Previous config generation is %d", getBootstrapGeneration(), previousConfigGeneration)); } log.log(FINE, () -> "Got new bootstrap generation\n" + configGenerationsString()); if (graph.generation() == 0) { platformBundles = getConfig(platformBundlesConfigKey, snapshot.configs()).bundlePaths(); osgi.installPlatformBundles(platformBundles); } else { throwIfPlatformBundlesChanged(snapshot); } Collection<Bundle> bundlesToRemove = installApplicationBundles(snapshot.configs()); obsoleteBundles.addAll(bundlesToRemove); graph = createComponentGraph(snapshot.configs(), getBootstrapGeneration(), fallbackInjector); } else if (snapshot instanceof ComponentsConfigs) { break; } } log.log(FINE, () -> "Got components configs,\n" + configGenerationsString()); return createAndConfigureComponentGraph(snapshot.configs(), fallbackInjector); } private long getBootstrapGeneration() { return retriever.getBootstrapGeneration(); } private long getComponentsGeneration() { return retriever.getComponentsGeneration(); } private String configGenerationsString() { return String.format("bootstrap generation = %d\n" + "components generation: %d\n" + "previous generation: %d", getBootstrapGeneration(), getComponentsGeneration(), previousConfigGeneration); } private void throwIfPlatformBundlesChanged(ConfigSnapshot snapshot) { var checkPlatformBundles = getConfig(platformBundlesConfigKey, snapshot.configs()).bundlePaths(); if (! checkPlatformBundles.equals(platformBundles)) throw new RuntimeException("Platform bundles are not allowed to change!\nOld: " + platformBundles + "\nNew: " + checkPlatformBundles); } private ComponentGraph createAndConfigureComponentGraph(Map<ConfigKey<? extends ConfigInstance>, ConfigInstance> componentsConfigs, Injector fallbackInjector) { ComponentGraph componentGraph = createComponentGraph(componentsConfigs, getComponentsGeneration(), fallbackInjector); componentGraph.setAvailableConfigs(componentsConfigs); return componentGraph; } private void deconstructFailedGraph(ComponentGraph currentGraph, ComponentGraph failedGraph, Collection<Bundle> bundlesFromFailedGraph) { Set<Object> currentComponents = Collections.newSetFromMap(new IdentityHashMap<>(currentGraph.size())); currentComponents.addAll(currentGraph.allConstructedComponentsAndProviders()); List<Object> unusedComponents = new ArrayList<>(); for (Object component : failedGraph.allConstructedComponentsAndProviders()) { if (!currentComponents.contains(component)) unusedComponents.add(component); } deconstructComponentsAndBundles(failedGraph.generation(), bundlesFromFailedGraph, unusedComponents); } private void deconstructComponentsAndBundles(long generation, Collection<Bundle> bundlesFromFailedGraph, List<Object> unusedComponents) { destructor.deconstruct(generation, unusedComponents, bundlesFromFailedGraph); } private Runnable createPreviousGraphDeconstructionTask(ComponentGraph oldGraph, ComponentGraph newGraph, Collection<Bundle> obsoleteBundles) { Map<Object, ?> newComponents = new IdentityHashMap<>(newGraph.size()); for (Object component : newGraph.allConstructedComponentsAndProviders()) newComponents.put(component, null); List<Object> obsoleteComponents = new ArrayList<>(); for (Object component : oldGraph.allConstructedComponentsAndProviders()) if ( ! newComponents.containsKey(component)) obsoleteComponents.add(component); return () -> destructor.deconstruct(oldGraph.generation(), obsoleteComponents, obsoleteBundles); } private Set<Bundle> installApplicationBundles(Map<ConfigKey<? extends ConfigInstance>, ConfigInstance> configsIncludingBootstrapConfigs) { ApplicationBundlesConfig applicationBundlesConfig = getConfig(applicationBundlesConfigKey, configsIncludingBootstrapConfigs); return osgi.useApplicationBundles(applicationBundlesConfig.bundles(), getBootstrapGeneration()); } private ComponentGraph createComponentGraph(Map<ConfigKey<? extends ConfigInstance>, ConfigInstance> configsIncludingBootstrapConfigs, long generation, Injector fallbackInjector) { previousConfigGeneration = generation; ComponentGraph graph = new ComponentGraph(generation); ComponentsConfig componentsConfig = getConfig(componentsConfigKey, configsIncludingBootstrapConfigs); if (componentsConfig == null) { throw new ConfigurationRuntimeException("The set of all configs does not include a valid 'components' config. Config set: " + configsIncludingBootstrapConfigs.keySet()); } addNodes(componentsConfig, graph); injectNodes(componentsConfig, graph); graph.complete(fallbackInjector); return graph; } private void addNodes(ComponentsConfig componentsConfig, ComponentGraph graph) { for (ComponentsConfig.Components config : componentsConfig.components()) { BundleInstantiationSpecification specification = bundleInstantiationSpecification(config); Class<?> componentClass = osgi.resolveClass(specification); Node componentNode = new ComponentNode(specification.id, config.configId(), componentClass, null); graph.add(componentNode); } } private void injectNodes(ComponentsConfig config, ComponentGraph graph) { for (ComponentsConfig.Components component : config.components()) { Node componentNode = ComponentGraph.getNode(graph, component.id()); for (ComponentsConfig.Components.Inject inject : component.inject()) { componentNode.inject(ComponentGraph.getNode(graph, inject.id())); } } } private void invalidateGeneration(long generation, Throwable cause) { leastGeneration = Math.max(retriever.getComponentsGeneration(), retriever.getBootstrapGeneration()) + 1; if (!(cause instanceof InterruptedException) && !(cause instanceof ConfigInterruptedException) && !(cause instanceof SubscriberClosedException)) { log.log(Level.WARNING, newGraphErrorMessage(generation, cause), cause); } } private static String newGraphErrorMessage(long generation, Throwable cause) { String failedFirstMessage = "Failed to set up first component graph"; String failedNewMessage = "Failed to set up new component graph"; String constructMessage = " due to error when constructing one of the components"; String retainMessage = ". Retaining previous component generation."; if (generation == 0) { if (cause instanceof ComponentNode.ComponentConstructorException) { return failedFirstMessage + constructMessage; } else { return failedFirstMessage; } } else { if (cause instanceof ComponentNode.ComponentConstructorException) { return failedNewMessage + constructMessage + retainMessage; } else { return failedNewMessage + retainMessage; } } } public void shutdown(ComponentGraph graph) { shutdownConfigRetriever(); if (graph != null) { deconstructComponentsAndBundles(graph.generation(), List.of(), graph.allConstructedComponentsAndProviders()); destructor.shutdown(); } } public void shutdownConfigRetriever() { retriever.shutdown(); } public void reloadConfig(long generation) { subscriberFactory.reloadActiveSubscribers(generation); } public static <T extends ConfigInstance> T getConfig(ConfigKey<T> key, Map<ConfigKey<? extends ConfigInstance>, ConfigInstance> configs) { ConfigInstance inst = configs.get(key); if (inst == null || key.getConfigClass() == null) { throw new RuntimeException("Missing config " + key); } return key.getConfigClass().cast(inst); } private static BundleInstantiationSpecification bundleInstantiationSpecification(ComponentsConfig.Components config) { return BundleInstantiationSpecification.fromStrings(config.id(), config.classId(), config.bundle()); } public static class ComponentGraphResult { private final ComponentGraph newGraph; private final Runnable oldComponentsCleanupTask; public ComponentGraphResult(ComponentGraph newGraph, Runnable oldComponentsCleanupTask) { this.newGraph = newGraph; this.oldComponentsCleanupTask = oldComponentsCleanupTask; } public ComponentGraph newGraph() { return newGraph; } public Runnable oldComponentsCleanupTask() { return oldComponentsCleanupTask; } } }
Return empty list instead?
private HttpResponse userMetadataFromUserId(String email) { var maybeUser = users.findUser(email); if (maybeUser.isPresent()) { var user = maybeUser.get(); var roles = users.listRoles(new UserId(user.email())); var slime = new Slime(); var root = slime.setObject(); var usersRoot = root.setArray("users"); renderUserMetaData(usersRoot.addObject(), user, Set.copyOf(roles)); return new SlimeJsonResponse(slime); } return ErrorResponse.notFoundError("Could not find user: " + email); }
return ErrorResponse.notFoundError("Could not find user: " + email);
private HttpResponse userMetadataFromUserId(String email) { var maybeUser = users.findUser(email); var slime = new Slime(); var root = slime.setObject(); var usersRoot = root.setArray("users"); if (maybeUser.isPresent()) { var user = maybeUser.get(); var roles = users.listRoles(new UserId(user.email())); renderUserMetaData(usersRoot.addObject(), user, Set.copyOf(roles)); } return new SlimeJsonResponse(slime); }
class UserApiHandler extends ThreadedHttpRequestHandler { private final static Logger log = Logger.getLogger(UserApiHandler.class.getName()); private final UserManagement users; private final Controller controller; private final FlagsDb flagsDb; private final IntFlag maxTrialTenants; @Inject public UserApiHandler(Context parentCtx, UserManagement users, Controller controller, FlagSource flagSource, FlagsDb flagsDb) { super(parentCtx); this.users = users; this.controller = controller; this.flagsDb = flagsDb; this.maxTrialTenants = PermanentFlags.MAX_TRIAL_TENANTS.bindTo(flagSource); } @Override public HttpResponse handle(HttpRequest request) { try { Path path = new Path(request.getUri()); switch (request.getMethod()) { case GET: return handleGET(path, request); case POST: return handlePOST(path, request); case DELETE: return handleDELETE(path, request); case OPTIONS: return handleOPTIONS(); default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported"); } } catch (IllegalArgumentException e) { return ErrorResponse.badRequest(Exceptions.toMessageString(e)); } catch (RuntimeException e) { return ErrorResponses.logThrowing(request, log, e); } } private HttpResponse handleGET(Path path, HttpRequest request) { if (path.matches("/user/v1/user")) return userMetadata(request); if (path.matches("/user/v1/find")) return findUser(request); if (path.matches("/user/v1/tenant/{tenant}")) return listTenantRoleMembers(path.get("tenant")); if (path.matches("/user/v1/tenant/{tenant}/application/{application}")) return listApplicationRoleMembers(path.get("tenant"), path.get("application")); return ErrorResponse.notFoundError(Text.format("No '%s' handler at '%s'", request.getMethod(), request.getUri().getPath())); } private HttpResponse handlePOST(Path path, HttpRequest request) { if (path.matches("/user/v1/tenant/{tenant}")) return addTenantRoleMember(path.get("tenant"), request); return ErrorResponse.notFoundError(Text.format("No '%s' handler at '%s'", request.getMethod(), request.getUri().getPath())); } private HttpResponse handleDELETE(Path path, HttpRequest request) { if (path.matches("/user/v1/tenant/{tenant}")) return removeTenantRoleMember(path.get("tenant"), request); return ErrorResponse.notFoundError(Text.format("No '%s' handler at '%s'", request.getMethod(), request.getUri().getPath())); } private HttpResponse handleOPTIONS() { EmptyResponse response = new EmptyResponse(); response.headers().put("Allow", "GET,PUT,POST,PATCH,DELETE,OPTIONS"); return response; } private static final Set<RoleDefinition> hostedOperators = Set.of( RoleDefinition.hostedOperator, RoleDefinition.hostedSupporter, RoleDefinition.hostedAccountant); private HttpResponse findUser(HttpRequest request) { var email = request.getProperty("email"); var query = request.getProperty("query"); if (email != null) return userMetadataFromUserId(email); if (query != null) return userMetadataQuery(query); return ErrorResponse.badRequest("Need 'email' or 'query' parameter"); } private HttpResponse userMetadataQuery(String query) { var userList = users.findUsers(query); var slime = new Slime(); var root = slime.setObject(); var userSlime = root.setArray("users"); for (var user : userList) { var roles = users.listRoles(new UserId((user.email()))); renderUserMetaData(userSlime.addObject(), user, Set.copyOf(roles)); } return new SlimeJsonResponse(slime); } private HttpResponse userMetadata(HttpRequest request) { User user; if (request.getJDiscRequest().context().get(User.ATTRIBUTE_NAME) instanceof User) { user = getAttribute(request, User.ATTRIBUTE_NAME, User.class); } else { @SuppressWarnings("unchecked") Map<String, String> attr = (Map<String, String>) getAttribute(request, User.ATTRIBUTE_NAME, Map.class); user = new User(attr.get("email"), attr.get("name"), attr.get("nickname"), attr.get("picture")); } Set<Role> roles = getAttribute(request, SecurityContext.ATTRIBUTE_NAME, SecurityContext.class).roles(); var slime = new Slime(); renderUserMetaData(slime.setObject(), user, roles); return new SlimeJsonResponse(slime); } private void renderUserMetaData(Cursor root, User user, Set<Role> roles) { Map<TenantName, List<TenantRole>> tenantRolesByTenantName = roles.stream() .flatMap(role -> filterTenantRoles(role).stream()) .distinct() .sorted(Comparator.comparing(Role::definition).reversed()) .collect(Collectors.groupingBy(TenantRole::tenant, Collectors.toList())); List<Role> operatorRoles = roles.stream() .filter(role -> hostedOperators.contains(role.definition())) .sorted(Comparator.comparing(Role::definition)) .toList(); root.setBool("isPublic", controller.system().isPublic()); root.setBool("isCd", controller.system().isCd()); root.setBool("hasTrialCapacity", hasTrialCapacity()); toSlime(root.setObject("user"), user); Cursor tenants = root.setObject("tenants"); tenantRolesByTenantName.keySet().stream() .sorted() .forEach(tenant -> { Cursor tenantObject = tenants.setObject(tenant.value()); tenantObject.setBool("supported", hasSupportedPlan(tenant)); Cursor tenantRolesObject = tenantObject.setArray("roles"); tenantRolesByTenantName.getOrDefault(tenant, List.of()) .forEach(role -> tenantRolesObject.addString(role.definition().name())); }); if (!operatorRoles.isEmpty()) { Cursor operator = root.setArray("operator"); operatorRoles.forEach(role -> operator.addString(role.definition().name())); } UserFlagsSerializer.toSlime(root, flagsDb.getAllFlagData(), tenantRolesByTenantName.keySet(), !operatorRoles.isEmpty(), user.email()); } private HttpResponse listTenantRoleMembers(String tenantName) { Slime slime = new Slime(); Cursor root = slime.setObject(); root.setString("tenant", tenantName); fillRoles(root, Roles.tenantRoles(TenantName.from(tenantName)), Collections.emptyList()); return new SlimeJsonResponse(slime); } private HttpResponse listApplicationRoleMembers(String tenantName, String applicationName) { Slime slime = new Slime(); Cursor root = slime.setObject(); root.setString("tenant", tenantName); root.setString("application", applicationName); fillRoles(root, Roles.applicationRoles(TenantName.from(tenantName), ApplicationName.from(applicationName)), Roles.tenantRoles(TenantName.from(tenantName))); return new SlimeJsonResponse(slime); } private void fillRoles(Cursor root, List<? extends Role> roles, List<? extends Role> superRoles) { Cursor rolesArray = root.setArray("roleNames"); for (Role role : roles) rolesArray.addString(valueOf(role)); Map<User, List<Role>> memberships = new LinkedHashMap<>(); List<Role> allRoles = new ArrayList<>(superRoles); allRoles.addAll(roles); for (Role role : allRoles) for (User user : users.listUsers(role)) { memberships.putIfAbsent(user, new ArrayList<>()); memberships.get(user).add(role); } Cursor usersArray = root.setArray("users"); memberships.forEach((user, userRoles) -> { Cursor userObject = usersArray.addObject(); toSlime(userObject, user); Cursor rolesObject = userObject.setObject("roles"); for (Role role : roles) { Cursor roleObject = rolesObject.setObject(valueOf(role)); roleObject.setBool("explicit", userRoles.contains(role)); roleObject.setBool("implied", userRoles.stream().anyMatch(userRole -> userRole.implies(role))); } }); } private static void toSlime(Cursor userObject, User user) { if (user.name() != null) userObject.setString("name", user.name()); userObject.setString("email", user.email()); if (user.nickname() != null) userObject.setString("nickname", user.nickname()); if (user.picture() != null) userObject.setString("picture", user.picture()); userObject.setBool("verified", user.isVerified()); if (!user.lastLogin().equals(User.NO_DATE)) userObject.setString("lastLogin", user.lastLogin().format(DateTimeFormatter.ISO_DATE)); if (user.loginCount() > -1) userObject.setLong("loginCount", user.loginCount()); } private HttpResponse addTenantRoleMember(String tenantName, HttpRequest request) { Inspector requestObject = bodyInspector(request); var tenant = TenantName.from(tenantName); var user = new UserId(require("user", Inspector::asString, requestObject)); var roles = SlimeStream.fromArray(requestObject.field("roles"), Inspector::asString) .map(roleName -> Roles.toRole(tenant, roleName)) .toList(); users.addToRoles(user, roles); return new MessageResponse(user + " is now a member of " + roles.stream().map(Role::toString).collect(Collectors.joining(", "))); } private HttpResponse removeTenantRoleMember(String tenantName, HttpRequest request) { Inspector requestObject = bodyInspector(request); var tenant = TenantName.from(tenantName); var user = new UserId(require("user", Inspector::asString, requestObject)); var roles = SlimeStream.fromArray(requestObject.field("roles"), Inspector::asString) .map(roleName -> Roles.toRole(tenant, roleName)) .toList(); enforceLastAdminOfTenant(tenant, user, roles); removeDeveloperKey(tenant, user, roles); users.removeFromRoles(user, roles); controller.tenants().lockIfPresent(tenant, LockedTenant.class, lockedTenant -> { if (lockedTenant instanceof LockedTenant.Cloud cloudTenant) controller.tenants().store(cloudTenant.withInvalidateUserSessionsBefore(controller.clock().instant())); }); return new MessageResponse(user + " is no longer a member of " + roles.stream().map(Role::toString).collect(Collectors.joining(", "))); } private void enforceLastAdminOfTenant(TenantName tenantName, UserId user, List<Role> roles) { for (Role role : roles) { if (role.definition().equals(RoleDefinition.administrator)) { if (Set.of(user.value()).equals(users.listUsers(role).stream().map(User::email).collect(Collectors.toSet()))) { throw new IllegalArgumentException("Can't remove the last administrator of a tenant."); } break; } } } private void removeDeveloperKey(TenantName tenantName, UserId user, List<Role> roles) { for (Role role : roles) { if (role.definition().equals(RoleDefinition.developer)) { controller.tenants().lockIfPresent(tenantName, LockedTenant.Cloud.class, tenant -> { PublicKey key = tenant.get().developerKeys().inverse().get(new SimplePrincipal(user.value())); if (key != null) controller.tenants().store(tenant.withoutDeveloperKey(key)); }); break; } } } private boolean hasTrialCapacity() { if (! controller.system().isPublic()) return true; var existing = controller.tenants().asList().stream().map(Tenant::name).collect(Collectors.toList()); var trialTenants = controller.serviceRegistry().billingController().tenantsWithPlan(existing, PlanId.from("trial")); return maxTrialTenants.value() < 0 || trialTenants.size() < maxTrialTenants.value(); } private static Inspector bodyInspector(HttpRequest request) { return Exceptions.uncheck(() -> SlimeUtils.jsonToSlime(IOUtils.readBytes(request.getData(), 1 << 10)).get()); } private static <Type> Type require(String name, Function<Inspector, Type> mapper, Inspector object) { if ( ! object.field(name).valid()) throw new IllegalArgumentException("Missing field '" + name + "'."); return mapper.apply(object.field(name)); } private static String valueOf(Role role) { switch (role.definition()) { case administrator: return "administrator"; case developer: return "developer"; case reader: return "reader"; case headless: return "headless"; default: throw new IllegalArgumentException("Unexpected role type '" + role.definition() + "'."); } } private static Collection<TenantRole> filterTenantRoles(Role role) { if (role instanceof TenantRole tenantRole) { switch (tenantRole.definition()) { case administrator, developer, reader, hostedDeveloper: return Set.of(tenantRole); case athenzTenantAdmin: return Roles.tenantRoles(tenantRole.tenant()); } } return Set.of(); } private static <T> T getAttribute(HttpRequest request, String attributeName, Class<T> clazz) { return Optional.ofNullable(request.getJDiscRequest().context().get(attributeName)) .filter(clazz::isInstance) .map(clazz::cast) .orElseThrow(() -> new IllegalArgumentException("Attribute '" + attributeName + "' was not set on request")); } private boolean hasSupportedPlan(TenantName tenantName) { var planId = controller.serviceRegistry().billingController().getPlan(tenantName); return controller.serviceRegistry().planRegistry().plan(planId) .map(Plan::isSupported) .orElse(false); } }
class UserApiHandler extends ThreadedHttpRequestHandler { private final static Logger log = Logger.getLogger(UserApiHandler.class.getName()); private final UserManagement users; private final Controller controller; private final FlagsDb flagsDb; private final IntFlag maxTrialTenants; @Inject public UserApiHandler(Context parentCtx, UserManagement users, Controller controller, FlagSource flagSource, FlagsDb flagsDb) { super(parentCtx); this.users = users; this.controller = controller; this.flagsDb = flagsDb; this.maxTrialTenants = PermanentFlags.MAX_TRIAL_TENANTS.bindTo(flagSource); } @Override public HttpResponse handle(HttpRequest request) { try { Path path = new Path(request.getUri()); switch (request.getMethod()) { case GET: return handleGET(path, request); case POST: return handlePOST(path, request); case DELETE: return handleDELETE(path, request); case OPTIONS: return handleOPTIONS(); default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported"); } } catch (IllegalArgumentException e) { return ErrorResponse.badRequest(Exceptions.toMessageString(e)); } catch (RuntimeException e) { return ErrorResponses.logThrowing(request, log, e); } } private HttpResponse handleGET(Path path, HttpRequest request) { if (path.matches("/user/v1/user")) return userMetadata(request); if (path.matches("/user/v1/find")) return findUser(request); if (path.matches("/user/v1/tenant/{tenant}")) return listTenantRoleMembers(path.get("tenant")); if (path.matches("/user/v1/tenant/{tenant}/application/{application}")) return listApplicationRoleMembers(path.get("tenant"), path.get("application")); return ErrorResponse.notFoundError(Text.format("No '%s' handler at '%s'", request.getMethod(), request.getUri().getPath())); } private HttpResponse handlePOST(Path path, HttpRequest request) { if (path.matches("/user/v1/tenant/{tenant}")) return addTenantRoleMember(path.get("tenant"), request); return ErrorResponse.notFoundError(Text.format("No '%s' handler at '%s'", request.getMethod(), request.getUri().getPath())); } private HttpResponse handleDELETE(Path path, HttpRequest request) { if (path.matches("/user/v1/tenant/{tenant}")) return removeTenantRoleMember(path.get("tenant"), request); return ErrorResponse.notFoundError(Text.format("No '%s' handler at '%s'", request.getMethod(), request.getUri().getPath())); } private HttpResponse handleOPTIONS() { EmptyResponse response = new EmptyResponse(); response.headers().put("Allow", "GET,PUT,POST,PATCH,DELETE,OPTIONS"); return response; } private static final Set<RoleDefinition> hostedOperators = Set.of( RoleDefinition.hostedOperator, RoleDefinition.hostedSupporter, RoleDefinition.hostedAccountant); private HttpResponse findUser(HttpRequest request) { var email = request.getProperty("email"); var query = request.getProperty("query"); if (email != null) return userMetadataFromUserId(email); if (query != null) return userMetadataQuery(query); return ErrorResponse.badRequest("Need 'email' or 'query' parameter"); } private HttpResponse userMetadataQuery(String query) { var userList = users.findUsers(query); var slime = new Slime(); var root = slime.setObject(); var userSlime = root.setArray("users"); for (var user : userList) { var roles = users.listRoles(new UserId((user.email()))); renderUserMetaData(userSlime.addObject(), user, Set.copyOf(roles)); } return new SlimeJsonResponse(slime); } private HttpResponse userMetadata(HttpRequest request) { User user; if (request.getJDiscRequest().context().get(User.ATTRIBUTE_NAME) instanceof User) { user = getAttribute(request, User.ATTRIBUTE_NAME, User.class); } else { @SuppressWarnings("unchecked") Map<String, String> attr = (Map<String, String>) getAttribute(request, User.ATTRIBUTE_NAME, Map.class); user = new User(attr.get("email"), attr.get("name"), attr.get("nickname"), attr.get("picture")); } Set<Role> roles = getAttribute(request, SecurityContext.ATTRIBUTE_NAME, SecurityContext.class).roles(); var slime = new Slime(); renderUserMetaData(slime.setObject(), user, roles); return new SlimeJsonResponse(slime); } private void renderUserMetaData(Cursor root, User user, Set<Role> roles) { Map<TenantName, List<TenantRole>> tenantRolesByTenantName = roles.stream() .flatMap(role -> filterTenantRoles(role).stream()) .distinct() .sorted(Comparator.comparing(Role::definition).reversed()) .collect(Collectors.groupingBy(TenantRole::tenant, Collectors.toList())); List<Role> operatorRoles = roles.stream() .filter(role -> hostedOperators.contains(role.definition())) .sorted(Comparator.comparing(Role::definition)) .toList(); root.setBool("isPublic", controller.system().isPublic()); root.setBool("isCd", controller.system().isCd()); root.setBool("hasTrialCapacity", hasTrialCapacity()); toSlime(root.setObject("user"), user); Cursor tenants = root.setObject("tenants"); tenantRolesByTenantName.keySet().stream() .sorted() .forEach(tenant -> { Cursor tenantObject = tenants.setObject(tenant.value()); tenantObject.setBool("supported", hasSupportedPlan(tenant)); Cursor tenantRolesObject = tenantObject.setArray("roles"); tenantRolesByTenantName.getOrDefault(tenant, List.of()) .forEach(role -> tenantRolesObject.addString(role.definition().name())); }); if (!operatorRoles.isEmpty()) { Cursor operator = root.setArray("operator"); operatorRoles.forEach(role -> operator.addString(role.definition().name())); } UserFlagsSerializer.toSlime(root, flagsDb.getAllFlagData(), tenantRolesByTenantName.keySet(), !operatorRoles.isEmpty(), user.email()); } private HttpResponse listTenantRoleMembers(String tenantName) { Slime slime = new Slime(); Cursor root = slime.setObject(); root.setString("tenant", tenantName); fillRoles(root, Roles.tenantRoles(TenantName.from(tenantName)), Collections.emptyList()); return new SlimeJsonResponse(slime); } private HttpResponse listApplicationRoleMembers(String tenantName, String applicationName) { Slime slime = new Slime(); Cursor root = slime.setObject(); root.setString("tenant", tenantName); root.setString("application", applicationName); fillRoles(root, Roles.applicationRoles(TenantName.from(tenantName), ApplicationName.from(applicationName)), Roles.tenantRoles(TenantName.from(tenantName))); return new SlimeJsonResponse(slime); } private void fillRoles(Cursor root, List<? extends Role> roles, List<? extends Role> superRoles) { Cursor rolesArray = root.setArray("roleNames"); for (Role role : roles) rolesArray.addString(valueOf(role)); Map<User, List<Role>> memberships = new LinkedHashMap<>(); List<Role> allRoles = new ArrayList<>(superRoles); allRoles.addAll(roles); for (Role role : allRoles) for (User user : users.listUsers(role)) { memberships.putIfAbsent(user, new ArrayList<>()); memberships.get(user).add(role); } Cursor usersArray = root.setArray("users"); memberships.forEach((user, userRoles) -> { Cursor userObject = usersArray.addObject(); toSlime(userObject, user); Cursor rolesObject = userObject.setObject("roles"); for (Role role : roles) { Cursor roleObject = rolesObject.setObject(valueOf(role)); roleObject.setBool("explicit", userRoles.contains(role)); roleObject.setBool("implied", userRoles.stream().anyMatch(userRole -> userRole.implies(role))); } }); } private static void toSlime(Cursor userObject, User user) { if (user.name() != null) userObject.setString("name", user.name()); userObject.setString("email", user.email()); if (user.nickname() != null) userObject.setString("nickname", user.nickname()); if (user.picture() != null) userObject.setString("picture", user.picture()); userObject.setBool("verified", user.isVerified()); if (!user.lastLogin().equals(User.NO_DATE)) userObject.setString("lastLogin", user.lastLogin().format(DateTimeFormatter.ISO_DATE)); if (user.loginCount() > -1) userObject.setLong("loginCount", user.loginCount()); } private HttpResponse addTenantRoleMember(String tenantName, HttpRequest request) { Inspector requestObject = bodyInspector(request); var tenant = TenantName.from(tenantName); var user = new UserId(require("user", Inspector::asString, requestObject)); var roles = SlimeStream.fromArray(requestObject.field("roles"), Inspector::asString) .map(roleName -> Roles.toRole(tenant, roleName)) .toList(); users.addToRoles(user, roles); return new MessageResponse(user + " is now a member of " + roles.stream().map(Role::toString).collect(Collectors.joining(", "))); } private HttpResponse removeTenantRoleMember(String tenantName, HttpRequest request) { Inspector requestObject = bodyInspector(request); var tenant = TenantName.from(tenantName); var user = new UserId(require("user", Inspector::asString, requestObject)); var roles = SlimeStream.fromArray(requestObject.field("roles"), Inspector::asString) .map(roleName -> Roles.toRole(tenant, roleName)) .toList(); enforceLastAdminOfTenant(tenant, user, roles); removeDeveloperKey(tenant, user, roles); users.removeFromRoles(user, roles); controller.tenants().lockIfPresent(tenant, LockedTenant.class, lockedTenant -> { if (lockedTenant instanceof LockedTenant.Cloud cloudTenant) controller.tenants().store(cloudTenant.withInvalidateUserSessionsBefore(controller.clock().instant())); }); return new MessageResponse(user + " is no longer a member of " + roles.stream().map(Role::toString).collect(Collectors.joining(", "))); } private void enforceLastAdminOfTenant(TenantName tenantName, UserId user, List<Role> roles) { for (Role role : roles) { if (role.definition().equals(RoleDefinition.administrator)) { if (Set.of(user.value()).equals(users.listUsers(role).stream().map(User::email).collect(Collectors.toSet()))) { throw new IllegalArgumentException("Can't remove the last administrator of a tenant."); } break; } } } private void removeDeveloperKey(TenantName tenantName, UserId user, List<Role> roles) { for (Role role : roles) { if (role.definition().equals(RoleDefinition.developer)) { controller.tenants().lockIfPresent(tenantName, LockedTenant.Cloud.class, tenant -> { PublicKey key = tenant.get().developerKeys().inverse().get(new SimplePrincipal(user.value())); if (key != null) controller.tenants().store(tenant.withoutDeveloperKey(key)); }); break; } } } private boolean hasTrialCapacity() { if (! controller.system().isPublic()) return true; var existing = controller.tenants().asList().stream().map(Tenant::name).collect(Collectors.toList()); var trialTenants = controller.serviceRegistry().billingController().tenantsWithPlan(existing, PlanId.from("trial")); return maxTrialTenants.value() < 0 || trialTenants.size() < maxTrialTenants.value(); } private static Inspector bodyInspector(HttpRequest request) { return Exceptions.uncheck(() -> SlimeUtils.jsonToSlime(IOUtils.readBytes(request.getData(), 1 << 10)).get()); } private static <Type> Type require(String name, Function<Inspector, Type> mapper, Inspector object) { if ( ! object.field(name).valid()) throw new IllegalArgumentException("Missing field '" + name + "'."); return mapper.apply(object.field(name)); } private static String valueOf(Role role) { switch (role.definition()) { case administrator: return "administrator"; case developer: return "developer"; case reader: return "reader"; case headless: return "headless"; default: throw new IllegalArgumentException("Unexpected role type '" + role.definition() + "'."); } } private static Collection<TenantRole> filterTenantRoles(Role role) { if (role instanceof TenantRole tenantRole) { switch (tenantRole.definition()) { case administrator, developer, reader, hostedDeveloper: return Set.of(tenantRole); case athenzTenantAdmin: return Roles.tenantRoles(tenantRole.tenant()); } } return Set.of(); } private static <T> T getAttribute(HttpRequest request, String attributeName, Class<T> clazz) { return Optional.ofNullable(request.getJDiscRequest().context().get(attributeName)) .filter(clazz::isInstance) .map(clazz::cast) .orElseThrow(() -> new IllegalArgumentException("Attribute '" + attributeName + "' was not set on request")); } private boolean hasSupportedPlan(TenantName tenantName) { var planId = controller.serviceRegistry().billingController().getPlan(tenantName); return controller.serviceRegistry().planRegistry().plan(planId) .map(Plan::isSupported) .orElse(false); } }
It looks like it would be OK to create a similar failed graph deconstruction task, providing the current graph as both current and failed graph arguments?
public ComponentGraphResult waitForNextGraphGeneration(ComponentGraph oldGraph, Injector fallbackInjector, boolean isInitializing) { try { ComponentGraph newGraph; Collection<Bundle> obsoleteBundles = new HashSet<>(); try { newGraph = waitForNewConfigGenAndCreateGraph(oldGraph, fallbackInjector, isInitializing, obsoleteBundles); newGraph.reuseNodes(oldGraph); } catch (Throwable t) { log.warning("Failed to set up component graph - uninstalling latest bundles. Bootstrap generation: " + getBootstrapGeneration()); osgi.completeBundleGeneration(Osgi.GenerationStatus.FAILURE); throw t; } try { constructComponents(newGraph); } catch (Throwable e) { log.warning("Failed to construct components for generation '" + newGraph.generation() + "' - scheduling partial graph for deconstruction"); Collection<Bundle> newBundlesFromFailedGen = osgi.completeBundleGeneration(Osgi.GenerationStatus.FAILURE); deconstructFailedGraph(oldGraph, newGraph, newBundlesFromFailedGen); throw e; } osgi.completeBundleGeneration(Osgi.GenerationStatus.SUCCESS); Runnable cleanupTask = createPreviousGraphDeconstructionTask(oldGraph, newGraph, obsoleteBundles); return new ComponentGraphResult(newGraph, cleanupTask); } catch (Throwable t) { invalidateGeneration(oldGraph.generation(), t); throw t; } }
}
public ComponentGraphResult waitForNextGraphGeneration(ComponentGraph oldGraph, Injector fallbackInjector, boolean isInitializing) { try { ComponentGraph newGraph; Collection<Bundle> obsoleteBundles = new HashSet<>(); try { newGraph = waitForNewConfigGenAndCreateGraph(oldGraph, fallbackInjector, isInitializing, obsoleteBundles); newGraph.reuseNodes(oldGraph); } catch (Throwable t) { log.warning("Failed to set up component graph - uninstalling latest bundles. Bootstrap generation: " + getBootstrapGeneration()); Collection<Bundle> newBundlesFromFailedGen = osgi.completeBundleGeneration(Osgi.GenerationStatus.FAILURE); deconstructComponentsAndBundles(getBootstrapGeneration(), newBundlesFromFailedGen, List.of()); throw t; } try { constructComponents(newGraph); } catch (Throwable e) { log.warning("Failed to construct components for generation '" + newGraph.generation() + "' - scheduling partial graph for deconstruction"); Collection<Bundle> newBundlesFromFailedGen = osgi.completeBundleGeneration(Osgi.GenerationStatus.FAILURE); deconstructFailedGraph(oldGraph, newGraph, newBundlesFromFailedGen); throw e; } osgi.completeBundleGeneration(Osgi.GenerationStatus.SUCCESS); Runnable cleanupTask = createPreviousGraphDeconstructionTask(oldGraph, newGraph, obsoleteBundles); return new ComponentGraphResult(newGraph, cleanupTask); } catch (Throwable t) { invalidateGeneration(oldGraph.generation(), t); throw t; } }
class Container { private static final Logger log = Logger.getLogger(Container.class.getName()); private final SubscriberFactory subscriberFactory; private final ConfigKey<ApplicationBundlesConfig> applicationBundlesConfigKey; private final ConfigKey<PlatformBundlesConfig> platformBundlesConfigKey; private final ConfigKey<ComponentsConfig> componentsConfigKey; private final ComponentDeconstructor destructor; private final Osgi osgi; private final ConfigRetriever retriever; private List<String> platformBundles; private long previousConfigGeneration = -1L; private long leastGeneration = -1L; public Container(SubscriberFactory subscriberFactory, String configId, ComponentDeconstructor destructor, Osgi osgi) { this.subscriberFactory = subscriberFactory; this.destructor = destructor; this.osgi = osgi; applicationBundlesConfigKey = new ConfigKey<>(ApplicationBundlesConfig.class, configId); platformBundlesConfigKey = new ConfigKey<>(PlatformBundlesConfig.class, configId); componentsConfigKey = new ConfigKey<>(ComponentsConfig.class, configId); var bootstrapKeys = Set.of(applicationBundlesConfigKey, platformBundlesConfigKey, componentsConfigKey); this.retriever = new ConfigRetriever(bootstrapKeys, subscriberFactory); } private void constructComponents(ComponentGraph graph) { graph.nodes().forEach(n -> { if (Thread.interrupted()) throw new UncheckedInterruptedException("Interrupted while constructing component graph", true); n.constructInstance(); }); } private ComponentGraph waitForNewConfigGenAndCreateGraph( ComponentGraph graph, Injector fallbackInjector, boolean isInitializing, Collection<Bundle> obsoleteBundles) { ConfigSnapshot snapshot; while (true) { snapshot = retriever.getConfigs(graph.configKeys(), leastGeneration, isInitializing); if (log.isLoggable(FINE)) log.log(FINE, String.format("getConfigAndCreateGraph:\n" + "graph.configKeys = %s\n" + "graph.generation = %s\n" + "snapshot = %s\n", graph.configKeys(), graph.generation(), snapshot)); if (snapshot instanceof BootstrapConfigs) { if (getBootstrapGeneration() <= previousConfigGeneration) { throw new IllegalStateException(String.format( "Got bootstrap configs out of sequence for old config generation %d.\n" + "Previous config generation is %d", getBootstrapGeneration(), previousConfigGeneration)); } log.log(FINE, () -> "Got new bootstrap generation\n" + configGenerationsString()); if (graph.generation() == 0) { platformBundles = getConfig(platformBundlesConfigKey, snapshot.configs()).bundlePaths(); osgi.installPlatformBundles(platformBundles); } else { throwIfPlatformBundlesChanged(snapshot); } Collection<Bundle> bundlesToRemove = installApplicationBundles(snapshot.configs()); obsoleteBundles.addAll(bundlesToRemove); graph = createComponentGraph(snapshot.configs(), getBootstrapGeneration(), fallbackInjector); } else if (snapshot instanceof ComponentsConfigs) { break; } } log.log(FINE, () -> "Got components configs,\n" + configGenerationsString()); return createAndConfigureComponentGraph(snapshot.configs(), fallbackInjector); } private long getBootstrapGeneration() { return retriever.getBootstrapGeneration(); } private long getComponentsGeneration() { return retriever.getComponentsGeneration(); } private String configGenerationsString() { return String.format("bootstrap generation = %d\n" + "components generation: %d\n" + "previous generation: %d", getBootstrapGeneration(), getComponentsGeneration(), previousConfigGeneration); } private void throwIfPlatformBundlesChanged(ConfigSnapshot snapshot) { var checkPlatformBundles = getConfig(platformBundlesConfigKey, snapshot.configs()).bundlePaths(); if (! checkPlatformBundles.equals(platformBundles)) throw new RuntimeException("Platform bundles are not allowed to change!\nOld: " + platformBundles + "\nNew: " + checkPlatformBundles); } private ComponentGraph createAndConfigureComponentGraph(Map<ConfigKey<? extends ConfigInstance>, ConfigInstance> componentsConfigs, Injector fallbackInjector) { ComponentGraph componentGraph = createComponentGraph(componentsConfigs, getComponentsGeneration(), fallbackInjector); componentGraph.setAvailableConfigs(componentsConfigs); return componentGraph; } private void deconstructFailedGraph(ComponentGraph currentGraph, ComponentGraph failedGraph, Collection<Bundle> bundlesFromFailedGraph) { Set<Object> currentComponents = Collections.newSetFromMap(new IdentityHashMap<>(currentGraph.size())); currentComponents.addAll(currentGraph.allConstructedComponentsAndProviders()); List<Object> unusedComponents = new ArrayList<>(); for (Object component : failedGraph.allConstructedComponentsAndProviders()) { if (!currentComponents.contains(component)) unusedComponents.add(component); } destructor.deconstruct(failedGraph.generation(), unusedComponents, bundlesFromFailedGraph); } private Runnable createPreviousGraphDeconstructionTask(ComponentGraph oldGraph, ComponentGraph newGraph, Collection<Bundle> obsoleteBundles) { Map<Object, ?> newComponents = new IdentityHashMap<>(newGraph.size()); for (Object component : newGraph.allConstructedComponentsAndProviders()) newComponents.put(component, null); List<Object> obsoleteComponents = new ArrayList<>(); for (Object component : oldGraph.allConstructedComponentsAndProviders()) if ( ! newComponents.containsKey(component)) obsoleteComponents.add(component); return () -> destructor.deconstruct(oldGraph.generation(), obsoleteComponents, obsoleteBundles); } private Set<Bundle> installApplicationBundles(Map<ConfigKey<? extends ConfigInstance>, ConfigInstance> configsIncludingBootstrapConfigs) { ApplicationBundlesConfig applicationBundlesConfig = getConfig(applicationBundlesConfigKey, configsIncludingBootstrapConfigs); return osgi.useApplicationBundles(applicationBundlesConfig.bundles(), getBootstrapGeneration()); } private ComponentGraph createComponentGraph(Map<ConfigKey<? extends ConfigInstance>, ConfigInstance> configsIncludingBootstrapConfigs, long generation, Injector fallbackInjector) { previousConfigGeneration = generation; ComponentGraph graph = new ComponentGraph(generation); ComponentsConfig componentsConfig = getConfig(componentsConfigKey, configsIncludingBootstrapConfigs); if (componentsConfig == null) { throw new ConfigurationRuntimeException("The set of all configs does not include a valid 'components' config. Config set: " + configsIncludingBootstrapConfigs.keySet()); } addNodes(componentsConfig, graph); injectNodes(componentsConfig, graph); graph.complete(fallbackInjector); return graph; } private void addNodes(ComponentsConfig componentsConfig, ComponentGraph graph) { for (ComponentsConfig.Components config : componentsConfig.components()) { BundleInstantiationSpecification specification = bundleInstantiationSpecification(config); Class<?> componentClass = osgi.resolveClass(specification); Node componentNode = new ComponentNode(specification.id, config.configId(), componentClass, null); graph.add(componentNode); } } private void injectNodes(ComponentsConfig config, ComponentGraph graph) { for (ComponentsConfig.Components component : config.components()) { Node componentNode = ComponentGraph.getNode(graph, component.id()); for (ComponentsConfig.Components.Inject inject : component.inject()) { componentNode.inject(ComponentGraph.getNode(graph, inject.id())); } } } private void invalidateGeneration(long generation, Throwable cause) { leastGeneration = Math.max(retriever.getComponentsGeneration(), retriever.getBootstrapGeneration()) + 1; if (!(cause instanceof InterruptedException) && !(cause instanceof ConfigInterruptedException) && !(cause instanceof SubscriberClosedException)) { log.log(Level.WARNING, newGraphErrorMessage(generation, cause), cause); } } private static String newGraphErrorMessage(long generation, Throwable cause) { String failedFirstMessage = "Failed to set up first component graph"; String failedNewMessage = "Failed to set up new component graph"; String constructMessage = " due to error when constructing one of the components"; String retainMessage = ". Retaining previous component generation."; if (generation == 0) { if (cause instanceof ComponentNode.ComponentConstructorException) { return failedFirstMessage + constructMessage; } else { return failedFirstMessage; } } else { if (cause instanceof ComponentNode.ComponentConstructorException) { return failedNewMessage + constructMessage + retainMessage; } else { return failedNewMessage + retainMessage; } } } public void shutdown(ComponentGraph graph) { shutdownConfigRetriever(); if (graph != null) { scheduleGraphForDeconstruction(graph); destructor.shutdown(); } } public void shutdownConfigRetriever() { retriever.shutdown(); } public void reloadConfig(long generation) { subscriberFactory.reloadActiveSubscribers(generation); } private void scheduleGraphForDeconstruction(ComponentGraph graph) { destructor.deconstruct(graph.generation(), graph.allConstructedComponentsAndProviders(), List.of()); } public static <T extends ConfigInstance> T getConfig(ConfigKey<T> key, Map<ConfigKey<? extends ConfigInstance>, ConfigInstance> configs) { ConfigInstance inst = configs.get(key); if (inst == null || key.getConfigClass() == null) { throw new RuntimeException("Missing config " + key); } return key.getConfigClass().cast(inst); } private static BundleInstantiationSpecification bundleInstantiationSpecification(ComponentsConfig.Components config) { return BundleInstantiationSpecification.fromStrings(config.id(), config.classId(), config.bundle()); } public static class ComponentGraphResult { private final ComponentGraph newGraph; private final Runnable oldComponentsCleanupTask; public ComponentGraphResult(ComponentGraph newGraph, Runnable oldComponentsCleanupTask) { this.newGraph = newGraph; this.oldComponentsCleanupTask = oldComponentsCleanupTask; } public ComponentGraph newGraph() { return newGraph; } public Runnable oldComponentsCleanupTask() { return oldComponentsCleanupTask; } } }
class Container { private static final Logger log = Logger.getLogger(Container.class.getName()); private final SubscriberFactory subscriberFactory; private final ConfigKey<ApplicationBundlesConfig> applicationBundlesConfigKey; private final ConfigKey<PlatformBundlesConfig> platformBundlesConfigKey; private final ConfigKey<ComponentsConfig> componentsConfigKey; private final ComponentDeconstructor destructor; private final Osgi osgi; private final ConfigRetriever retriever; private List<String> platformBundles; private long previousConfigGeneration = -1L; private long leastGeneration = -1L; public Container(SubscriberFactory subscriberFactory, String configId, ComponentDeconstructor destructor, Osgi osgi) { this.subscriberFactory = subscriberFactory; this.destructor = destructor; this.osgi = osgi; applicationBundlesConfigKey = new ConfigKey<>(ApplicationBundlesConfig.class, configId); platformBundlesConfigKey = new ConfigKey<>(PlatformBundlesConfig.class, configId); componentsConfigKey = new ConfigKey<>(ComponentsConfig.class, configId); var bootstrapKeys = Set.of(applicationBundlesConfigKey, platformBundlesConfigKey, componentsConfigKey); this.retriever = new ConfigRetriever(bootstrapKeys, subscriberFactory); } private void constructComponents(ComponentGraph graph) { graph.nodes().forEach(n -> { if (Thread.interrupted()) throw new UncheckedInterruptedException("Interrupted while constructing component graph", true); n.constructInstance(); }); } private ComponentGraph waitForNewConfigGenAndCreateGraph( ComponentGraph graph, Injector fallbackInjector, boolean isInitializing, Collection<Bundle> obsoleteBundles) { ConfigSnapshot snapshot; while (true) { snapshot = retriever.getConfigs(graph.configKeys(), leastGeneration, isInitializing); if (log.isLoggable(FINE)) log.log(FINE, String.format("getConfigAndCreateGraph:\n" + "graph.configKeys = %s\n" + "graph.generation = %s\n" + "snapshot = %s\n", graph.configKeys(), graph.generation(), snapshot)); if (snapshot instanceof BootstrapConfigs) { if (getBootstrapGeneration() <= previousConfigGeneration) { throw new IllegalStateException(String.format( "Got bootstrap configs out of sequence for old config generation %d.\n" + "Previous config generation is %d", getBootstrapGeneration(), previousConfigGeneration)); } log.log(FINE, () -> "Got new bootstrap generation\n" + configGenerationsString()); if (graph.generation() == 0) { platformBundles = getConfig(platformBundlesConfigKey, snapshot.configs()).bundlePaths(); osgi.installPlatformBundles(platformBundles); } else { throwIfPlatformBundlesChanged(snapshot); } Collection<Bundle> bundlesToRemove = installApplicationBundles(snapshot.configs()); obsoleteBundles.addAll(bundlesToRemove); graph = createComponentGraph(snapshot.configs(), getBootstrapGeneration(), fallbackInjector); } else if (snapshot instanceof ComponentsConfigs) { break; } } log.log(FINE, () -> "Got components configs,\n" + configGenerationsString()); return createAndConfigureComponentGraph(snapshot.configs(), fallbackInjector); } private long getBootstrapGeneration() { return retriever.getBootstrapGeneration(); } private long getComponentsGeneration() { return retriever.getComponentsGeneration(); } private String configGenerationsString() { return String.format("bootstrap generation = %d\n" + "components generation: %d\n" + "previous generation: %d", getBootstrapGeneration(), getComponentsGeneration(), previousConfigGeneration); } private void throwIfPlatformBundlesChanged(ConfigSnapshot snapshot) { var checkPlatformBundles = getConfig(platformBundlesConfigKey, snapshot.configs()).bundlePaths(); if (! checkPlatformBundles.equals(platformBundles)) throw new RuntimeException("Platform bundles are not allowed to change!\nOld: " + platformBundles + "\nNew: " + checkPlatformBundles); } private ComponentGraph createAndConfigureComponentGraph(Map<ConfigKey<? extends ConfigInstance>, ConfigInstance> componentsConfigs, Injector fallbackInjector) { ComponentGraph componentGraph = createComponentGraph(componentsConfigs, getComponentsGeneration(), fallbackInjector); componentGraph.setAvailableConfigs(componentsConfigs); return componentGraph; } private void deconstructFailedGraph(ComponentGraph currentGraph, ComponentGraph failedGraph, Collection<Bundle> bundlesFromFailedGraph) { Set<Object> currentComponents = Collections.newSetFromMap(new IdentityHashMap<>(currentGraph.size())); currentComponents.addAll(currentGraph.allConstructedComponentsAndProviders()); List<Object> unusedComponents = new ArrayList<>(); for (Object component : failedGraph.allConstructedComponentsAndProviders()) { if (!currentComponents.contains(component)) unusedComponents.add(component); } deconstructComponentsAndBundles(failedGraph.generation(), bundlesFromFailedGraph, unusedComponents); } private void deconstructComponentsAndBundles(long generation, Collection<Bundle> bundlesFromFailedGraph, List<Object> unusedComponents) { destructor.deconstruct(generation, unusedComponents, bundlesFromFailedGraph); } private Runnable createPreviousGraphDeconstructionTask(ComponentGraph oldGraph, ComponentGraph newGraph, Collection<Bundle> obsoleteBundles) { Map<Object, ?> newComponents = new IdentityHashMap<>(newGraph.size()); for (Object component : newGraph.allConstructedComponentsAndProviders()) newComponents.put(component, null); List<Object> obsoleteComponents = new ArrayList<>(); for (Object component : oldGraph.allConstructedComponentsAndProviders()) if ( ! newComponents.containsKey(component)) obsoleteComponents.add(component); return () -> destructor.deconstruct(oldGraph.generation(), obsoleteComponents, obsoleteBundles); } private Set<Bundle> installApplicationBundles(Map<ConfigKey<? extends ConfigInstance>, ConfigInstance> configsIncludingBootstrapConfigs) { ApplicationBundlesConfig applicationBundlesConfig = getConfig(applicationBundlesConfigKey, configsIncludingBootstrapConfigs); return osgi.useApplicationBundles(applicationBundlesConfig.bundles(), getBootstrapGeneration()); } private ComponentGraph createComponentGraph(Map<ConfigKey<? extends ConfigInstance>, ConfigInstance> configsIncludingBootstrapConfigs, long generation, Injector fallbackInjector) { previousConfigGeneration = generation; ComponentGraph graph = new ComponentGraph(generation); ComponentsConfig componentsConfig = getConfig(componentsConfigKey, configsIncludingBootstrapConfigs); if (componentsConfig == null) { throw new ConfigurationRuntimeException("The set of all configs does not include a valid 'components' config. Config set: " + configsIncludingBootstrapConfigs.keySet()); } addNodes(componentsConfig, graph); injectNodes(componentsConfig, graph); graph.complete(fallbackInjector); return graph; } private void addNodes(ComponentsConfig componentsConfig, ComponentGraph graph) { for (ComponentsConfig.Components config : componentsConfig.components()) { BundleInstantiationSpecification specification = bundleInstantiationSpecification(config); Class<?> componentClass = osgi.resolveClass(specification); Node componentNode = new ComponentNode(specification.id, config.configId(), componentClass, null); graph.add(componentNode); } } private void injectNodes(ComponentsConfig config, ComponentGraph graph) { for (ComponentsConfig.Components component : config.components()) { Node componentNode = ComponentGraph.getNode(graph, component.id()); for (ComponentsConfig.Components.Inject inject : component.inject()) { componentNode.inject(ComponentGraph.getNode(graph, inject.id())); } } } private void invalidateGeneration(long generation, Throwable cause) { leastGeneration = Math.max(retriever.getComponentsGeneration(), retriever.getBootstrapGeneration()) + 1; if (!(cause instanceof InterruptedException) && !(cause instanceof ConfigInterruptedException) && !(cause instanceof SubscriberClosedException)) { log.log(Level.WARNING, newGraphErrorMessage(generation, cause), cause); } } private static String newGraphErrorMessage(long generation, Throwable cause) { String failedFirstMessage = "Failed to set up first component graph"; String failedNewMessage = "Failed to set up new component graph"; String constructMessage = " due to error when constructing one of the components"; String retainMessage = ". Retaining previous component generation."; if (generation == 0) { if (cause instanceof ComponentNode.ComponentConstructorException) { return failedFirstMessage + constructMessage; } else { return failedFirstMessage; } } else { if (cause instanceof ComponentNode.ComponentConstructorException) { return failedNewMessage + constructMessage + retainMessage; } else { return failedNewMessage + retainMessage; } } } public void shutdown(ComponentGraph graph) { shutdownConfigRetriever(); if (graph != null) { deconstructComponentsAndBundles(graph.generation(), List.of(), graph.allConstructedComponentsAndProviders()); destructor.shutdown(); } } public void shutdownConfigRetriever() { retriever.shutdown(); } public void reloadConfig(long generation) { subscriberFactory.reloadActiveSubscribers(generation); } public static <T extends ConfigInstance> T getConfig(ConfigKey<T> key, Map<ConfigKey<? extends ConfigInstance>, ConfigInstance> configs) { ConfigInstance inst = configs.get(key); if (inst == null || key.getConfigClass() == null) { throw new RuntimeException("Missing config " + key); } return key.getConfigClass().cast(inst); } private static BundleInstantiationSpecification bundleInstantiationSpecification(ComponentsConfig.Components config) { return BundleInstantiationSpecification.fromStrings(config.id(), config.classId(), config.bundle()); } public static class ComponentGraphResult { private final ComponentGraph newGraph; private final Runnable oldComponentsCleanupTask; public ComponentGraphResult(ComponentGraph newGraph, Runnable oldComponentsCleanupTask) { this.newGraph = newGraph; this.oldComponentsCleanupTask = oldComponentsCleanupTask; } public ComponentGraph newGraph() { return newGraph; } public Runnable oldComponentsCleanupTask() { return oldComponentsCleanupTask; } } }
Correct. I'll see if I can fix it easily, or add a TODO. They are not uninstalled today, and not in the previous attempted fix either.
public ComponentGraphResult waitForNextGraphGeneration(ComponentGraph oldGraph, Injector fallbackInjector, boolean isInitializing) { try { ComponentGraph newGraph; Collection<Bundle> obsoleteBundles = new HashSet<>(); try { newGraph = waitForNewConfigGenAndCreateGraph(oldGraph, fallbackInjector, isInitializing, obsoleteBundles); newGraph.reuseNodes(oldGraph); } catch (Throwable t) { log.warning("Failed to set up component graph - uninstalling latest bundles. Bootstrap generation: " + getBootstrapGeneration()); osgi.completeBundleGeneration(Osgi.GenerationStatus.FAILURE); throw t; } try { constructComponents(newGraph); } catch (Throwable e) { log.warning("Failed to construct components for generation '" + newGraph.generation() + "' - scheduling partial graph for deconstruction"); Collection<Bundle> newBundlesFromFailedGen = osgi.completeBundleGeneration(Osgi.GenerationStatus.FAILURE); deconstructFailedGraph(oldGraph, newGraph, newBundlesFromFailedGen); throw e; } osgi.completeBundleGeneration(Osgi.GenerationStatus.SUCCESS); Runnable cleanupTask = createPreviousGraphDeconstructionTask(oldGraph, newGraph, obsoleteBundles); return new ComponentGraphResult(newGraph, cleanupTask); } catch (Throwable t) { invalidateGeneration(oldGraph.generation(), t); throw t; } }
}
public ComponentGraphResult waitForNextGraphGeneration(ComponentGraph oldGraph, Injector fallbackInjector, boolean isInitializing) { try { ComponentGraph newGraph; Collection<Bundle> obsoleteBundles = new HashSet<>(); try { newGraph = waitForNewConfigGenAndCreateGraph(oldGraph, fallbackInjector, isInitializing, obsoleteBundles); newGraph.reuseNodes(oldGraph); } catch (Throwable t) { log.warning("Failed to set up component graph - uninstalling latest bundles. Bootstrap generation: " + getBootstrapGeneration()); Collection<Bundle> newBundlesFromFailedGen = osgi.completeBundleGeneration(Osgi.GenerationStatus.FAILURE); deconstructComponentsAndBundles(getBootstrapGeneration(), newBundlesFromFailedGen, List.of()); throw t; } try { constructComponents(newGraph); } catch (Throwable e) { log.warning("Failed to construct components for generation '" + newGraph.generation() + "' - scheduling partial graph for deconstruction"); Collection<Bundle> newBundlesFromFailedGen = osgi.completeBundleGeneration(Osgi.GenerationStatus.FAILURE); deconstructFailedGraph(oldGraph, newGraph, newBundlesFromFailedGen); throw e; } osgi.completeBundleGeneration(Osgi.GenerationStatus.SUCCESS); Runnable cleanupTask = createPreviousGraphDeconstructionTask(oldGraph, newGraph, obsoleteBundles); return new ComponentGraphResult(newGraph, cleanupTask); } catch (Throwable t) { invalidateGeneration(oldGraph.generation(), t); throw t; } }
class Container { private static final Logger log = Logger.getLogger(Container.class.getName()); private final SubscriberFactory subscriberFactory; private final ConfigKey<ApplicationBundlesConfig> applicationBundlesConfigKey; private final ConfigKey<PlatformBundlesConfig> platformBundlesConfigKey; private final ConfigKey<ComponentsConfig> componentsConfigKey; private final ComponentDeconstructor destructor; private final Osgi osgi; private final ConfigRetriever retriever; private List<String> platformBundles; private long previousConfigGeneration = -1L; private long leastGeneration = -1L; public Container(SubscriberFactory subscriberFactory, String configId, ComponentDeconstructor destructor, Osgi osgi) { this.subscriberFactory = subscriberFactory; this.destructor = destructor; this.osgi = osgi; applicationBundlesConfigKey = new ConfigKey<>(ApplicationBundlesConfig.class, configId); platformBundlesConfigKey = new ConfigKey<>(PlatformBundlesConfig.class, configId); componentsConfigKey = new ConfigKey<>(ComponentsConfig.class, configId); var bootstrapKeys = Set.of(applicationBundlesConfigKey, platformBundlesConfigKey, componentsConfigKey); this.retriever = new ConfigRetriever(bootstrapKeys, subscriberFactory); } private void constructComponents(ComponentGraph graph) { graph.nodes().forEach(n -> { if (Thread.interrupted()) throw new UncheckedInterruptedException("Interrupted while constructing component graph", true); n.constructInstance(); }); } private ComponentGraph waitForNewConfigGenAndCreateGraph( ComponentGraph graph, Injector fallbackInjector, boolean isInitializing, Collection<Bundle> obsoleteBundles) { ConfigSnapshot snapshot; while (true) { snapshot = retriever.getConfigs(graph.configKeys(), leastGeneration, isInitializing); if (log.isLoggable(FINE)) log.log(FINE, String.format("getConfigAndCreateGraph:\n" + "graph.configKeys = %s\n" + "graph.generation = %s\n" + "snapshot = %s\n", graph.configKeys(), graph.generation(), snapshot)); if (snapshot instanceof BootstrapConfigs) { if (getBootstrapGeneration() <= previousConfigGeneration) { throw new IllegalStateException(String.format( "Got bootstrap configs out of sequence for old config generation %d.\n" + "Previous config generation is %d", getBootstrapGeneration(), previousConfigGeneration)); } log.log(FINE, () -> "Got new bootstrap generation\n" + configGenerationsString()); if (graph.generation() == 0) { platformBundles = getConfig(platformBundlesConfigKey, snapshot.configs()).bundlePaths(); osgi.installPlatformBundles(platformBundles); } else { throwIfPlatformBundlesChanged(snapshot); } Collection<Bundle> bundlesToRemove = installApplicationBundles(snapshot.configs()); obsoleteBundles.addAll(bundlesToRemove); graph = createComponentGraph(snapshot.configs(), getBootstrapGeneration(), fallbackInjector); } else if (snapshot instanceof ComponentsConfigs) { break; } } log.log(FINE, () -> "Got components configs,\n" + configGenerationsString()); return createAndConfigureComponentGraph(snapshot.configs(), fallbackInjector); } private long getBootstrapGeneration() { return retriever.getBootstrapGeneration(); } private long getComponentsGeneration() { return retriever.getComponentsGeneration(); } private String configGenerationsString() { return String.format("bootstrap generation = %d\n" + "components generation: %d\n" + "previous generation: %d", getBootstrapGeneration(), getComponentsGeneration(), previousConfigGeneration); } private void throwIfPlatformBundlesChanged(ConfigSnapshot snapshot) { var checkPlatformBundles = getConfig(platformBundlesConfigKey, snapshot.configs()).bundlePaths(); if (! checkPlatformBundles.equals(platformBundles)) throw new RuntimeException("Platform bundles are not allowed to change!\nOld: " + platformBundles + "\nNew: " + checkPlatformBundles); } private ComponentGraph createAndConfigureComponentGraph(Map<ConfigKey<? extends ConfigInstance>, ConfigInstance> componentsConfigs, Injector fallbackInjector) { ComponentGraph componentGraph = createComponentGraph(componentsConfigs, getComponentsGeneration(), fallbackInjector); componentGraph.setAvailableConfigs(componentsConfigs); return componentGraph; } private void deconstructFailedGraph(ComponentGraph currentGraph, ComponentGraph failedGraph, Collection<Bundle> bundlesFromFailedGraph) { Set<Object> currentComponents = Collections.newSetFromMap(new IdentityHashMap<>(currentGraph.size())); currentComponents.addAll(currentGraph.allConstructedComponentsAndProviders()); List<Object> unusedComponents = new ArrayList<>(); for (Object component : failedGraph.allConstructedComponentsAndProviders()) { if (!currentComponents.contains(component)) unusedComponents.add(component); } destructor.deconstruct(failedGraph.generation(), unusedComponents, bundlesFromFailedGraph); } private Runnable createPreviousGraphDeconstructionTask(ComponentGraph oldGraph, ComponentGraph newGraph, Collection<Bundle> obsoleteBundles) { Map<Object, ?> newComponents = new IdentityHashMap<>(newGraph.size()); for (Object component : newGraph.allConstructedComponentsAndProviders()) newComponents.put(component, null); List<Object> obsoleteComponents = new ArrayList<>(); for (Object component : oldGraph.allConstructedComponentsAndProviders()) if ( ! newComponents.containsKey(component)) obsoleteComponents.add(component); return () -> destructor.deconstruct(oldGraph.generation(), obsoleteComponents, obsoleteBundles); } private Set<Bundle> installApplicationBundles(Map<ConfigKey<? extends ConfigInstance>, ConfigInstance> configsIncludingBootstrapConfigs) { ApplicationBundlesConfig applicationBundlesConfig = getConfig(applicationBundlesConfigKey, configsIncludingBootstrapConfigs); return osgi.useApplicationBundles(applicationBundlesConfig.bundles(), getBootstrapGeneration()); } private ComponentGraph createComponentGraph(Map<ConfigKey<? extends ConfigInstance>, ConfigInstance> configsIncludingBootstrapConfigs, long generation, Injector fallbackInjector) { previousConfigGeneration = generation; ComponentGraph graph = new ComponentGraph(generation); ComponentsConfig componentsConfig = getConfig(componentsConfigKey, configsIncludingBootstrapConfigs); if (componentsConfig == null) { throw new ConfigurationRuntimeException("The set of all configs does not include a valid 'components' config. Config set: " + configsIncludingBootstrapConfigs.keySet()); } addNodes(componentsConfig, graph); injectNodes(componentsConfig, graph); graph.complete(fallbackInjector); return graph; } private void addNodes(ComponentsConfig componentsConfig, ComponentGraph graph) { for (ComponentsConfig.Components config : componentsConfig.components()) { BundleInstantiationSpecification specification = bundleInstantiationSpecification(config); Class<?> componentClass = osgi.resolveClass(specification); Node componentNode = new ComponentNode(specification.id, config.configId(), componentClass, null); graph.add(componentNode); } } private void injectNodes(ComponentsConfig config, ComponentGraph graph) { for (ComponentsConfig.Components component : config.components()) { Node componentNode = ComponentGraph.getNode(graph, component.id()); for (ComponentsConfig.Components.Inject inject : component.inject()) { componentNode.inject(ComponentGraph.getNode(graph, inject.id())); } } } private void invalidateGeneration(long generation, Throwable cause) { leastGeneration = Math.max(retriever.getComponentsGeneration(), retriever.getBootstrapGeneration()) + 1; if (!(cause instanceof InterruptedException) && !(cause instanceof ConfigInterruptedException) && !(cause instanceof SubscriberClosedException)) { log.log(Level.WARNING, newGraphErrorMessage(generation, cause), cause); } } private static String newGraphErrorMessage(long generation, Throwable cause) { String failedFirstMessage = "Failed to set up first component graph"; String failedNewMessage = "Failed to set up new component graph"; String constructMessage = " due to error when constructing one of the components"; String retainMessage = ". Retaining previous component generation."; if (generation == 0) { if (cause instanceof ComponentNode.ComponentConstructorException) { return failedFirstMessage + constructMessage; } else { return failedFirstMessage; } } else { if (cause instanceof ComponentNode.ComponentConstructorException) { return failedNewMessage + constructMessage + retainMessage; } else { return failedNewMessage + retainMessage; } } } public void shutdown(ComponentGraph graph) { shutdownConfigRetriever(); if (graph != null) { scheduleGraphForDeconstruction(graph); destructor.shutdown(); } } public void shutdownConfigRetriever() { retriever.shutdown(); } public void reloadConfig(long generation) { subscriberFactory.reloadActiveSubscribers(generation); } private void scheduleGraphForDeconstruction(ComponentGraph graph) { destructor.deconstruct(graph.generation(), graph.allConstructedComponentsAndProviders(), List.of()); } public static <T extends ConfigInstance> T getConfig(ConfigKey<T> key, Map<ConfigKey<? extends ConfigInstance>, ConfigInstance> configs) { ConfigInstance inst = configs.get(key); if (inst == null || key.getConfigClass() == null) { throw new RuntimeException("Missing config " + key); } return key.getConfigClass().cast(inst); } private static BundleInstantiationSpecification bundleInstantiationSpecification(ComponentsConfig.Components config) { return BundleInstantiationSpecification.fromStrings(config.id(), config.classId(), config.bundle()); } public static class ComponentGraphResult { private final ComponentGraph newGraph; private final Runnable oldComponentsCleanupTask; public ComponentGraphResult(ComponentGraph newGraph, Runnable oldComponentsCleanupTask) { this.newGraph = newGraph; this.oldComponentsCleanupTask = oldComponentsCleanupTask; } public ComponentGraph newGraph() { return newGraph; } public Runnable oldComponentsCleanupTask() { return oldComponentsCleanupTask; } } }
class Container { private static final Logger log = Logger.getLogger(Container.class.getName()); private final SubscriberFactory subscriberFactory; private final ConfigKey<ApplicationBundlesConfig> applicationBundlesConfigKey; private final ConfigKey<PlatformBundlesConfig> platformBundlesConfigKey; private final ConfigKey<ComponentsConfig> componentsConfigKey; private final ComponentDeconstructor destructor; private final Osgi osgi; private final ConfigRetriever retriever; private List<String> platformBundles; private long previousConfigGeneration = -1L; private long leastGeneration = -1L; public Container(SubscriberFactory subscriberFactory, String configId, ComponentDeconstructor destructor, Osgi osgi) { this.subscriberFactory = subscriberFactory; this.destructor = destructor; this.osgi = osgi; applicationBundlesConfigKey = new ConfigKey<>(ApplicationBundlesConfig.class, configId); platformBundlesConfigKey = new ConfigKey<>(PlatformBundlesConfig.class, configId); componentsConfigKey = new ConfigKey<>(ComponentsConfig.class, configId); var bootstrapKeys = Set.of(applicationBundlesConfigKey, platformBundlesConfigKey, componentsConfigKey); this.retriever = new ConfigRetriever(bootstrapKeys, subscriberFactory); } private void constructComponents(ComponentGraph graph) { graph.nodes().forEach(n -> { if (Thread.interrupted()) throw new UncheckedInterruptedException("Interrupted while constructing component graph", true); n.constructInstance(); }); } private ComponentGraph waitForNewConfigGenAndCreateGraph( ComponentGraph graph, Injector fallbackInjector, boolean isInitializing, Collection<Bundle> obsoleteBundles) { ConfigSnapshot snapshot; while (true) { snapshot = retriever.getConfigs(graph.configKeys(), leastGeneration, isInitializing); if (log.isLoggable(FINE)) log.log(FINE, String.format("getConfigAndCreateGraph:\n" + "graph.configKeys = %s\n" + "graph.generation = %s\n" + "snapshot = %s\n", graph.configKeys(), graph.generation(), snapshot)); if (snapshot instanceof BootstrapConfigs) { if (getBootstrapGeneration() <= previousConfigGeneration) { throw new IllegalStateException(String.format( "Got bootstrap configs out of sequence for old config generation %d.\n" + "Previous config generation is %d", getBootstrapGeneration(), previousConfigGeneration)); } log.log(FINE, () -> "Got new bootstrap generation\n" + configGenerationsString()); if (graph.generation() == 0) { platformBundles = getConfig(platformBundlesConfigKey, snapshot.configs()).bundlePaths(); osgi.installPlatformBundles(platformBundles); } else { throwIfPlatformBundlesChanged(snapshot); } Collection<Bundle> bundlesToRemove = installApplicationBundles(snapshot.configs()); obsoleteBundles.addAll(bundlesToRemove); graph = createComponentGraph(snapshot.configs(), getBootstrapGeneration(), fallbackInjector); } else if (snapshot instanceof ComponentsConfigs) { break; } } log.log(FINE, () -> "Got components configs,\n" + configGenerationsString()); return createAndConfigureComponentGraph(snapshot.configs(), fallbackInjector); } private long getBootstrapGeneration() { return retriever.getBootstrapGeneration(); } private long getComponentsGeneration() { return retriever.getComponentsGeneration(); } private String configGenerationsString() { return String.format("bootstrap generation = %d\n" + "components generation: %d\n" + "previous generation: %d", getBootstrapGeneration(), getComponentsGeneration(), previousConfigGeneration); } private void throwIfPlatformBundlesChanged(ConfigSnapshot snapshot) { var checkPlatformBundles = getConfig(platformBundlesConfigKey, snapshot.configs()).bundlePaths(); if (! checkPlatformBundles.equals(platformBundles)) throw new RuntimeException("Platform bundles are not allowed to change!\nOld: " + platformBundles + "\nNew: " + checkPlatformBundles); } private ComponentGraph createAndConfigureComponentGraph(Map<ConfigKey<? extends ConfigInstance>, ConfigInstance> componentsConfigs, Injector fallbackInjector) { ComponentGraph componentGraph = createComponentGraph(componentsConfigs, getComponentsGeneration(), fallbackInjector); componentGraph.setAvailableConfigs(componentsConfigs); return componentGraph; } private void deconstructFailedGraph(ComponentGraph currentGraph, ComponentGraph failedGraph, Collection<Bundle> bundlesFromFailedGraph) { Set<Object> currentComponents = Collections.newSetFromMap(new IdentityHashMap<>(currentGraph.size())); currentComponents.addAll(currentGraph.allConstructedComponentsAndProviders()); List<Object> unusedComponents = new ArrayList<>(); for (Object component : failedGraph.allConstructedComponentsAndProviders()) { if (!currentComponents.contains(component)) unusedComponents.add(component); } deconstructComponentsAndBundles(failedGraph.generation(), bundlesFromFailedGraph, unusedComponents); } private void deconstructComponentsAndBundles(long generation, Collection<Bundle> bundlesFromFailedGraph, List<Object> unusedComponents) { destructor.deconstruct(generation, unusedComponents, bundlesFromFailedGraph); } private Runnable createPreviousGraphDeconstructionTask(ComponentGraph oldGraph, ComponentGraph newGraph, Collection<Bundle> obsoleteBundles) { Map<Object, ?> newComponents = new IdentityHashMap<>(newGraph.size()); for (Object component : newGraph.allConstructedComponentsAndProviders()) newComponents.put(component, null); List<Object> obsoleteComponents = new ArrayList<>(); for (Object component : oldGraph.allConstructedComponentsAndProviders()) if ( ! newComponents.containsKey(component)) obsoleteComponents.add(component); return () -> destructor.deconstruct(oldGraph.generation(), obsoleteComponents, obsoleteBundles); } private Set<Bundle> installApplicationBundles(Map<ConfigKey<? extends ConfigInstance>, ConfigInstance> configsIncludingBootstrapConfigs) { ApplicationBundlesConfig applicationBundlesConfig = getConfig(applicationBundlesConfigKey, configsIncludingBootstrapConfigs); return osgi.useApplicationBundles(applicationBundlesConfig.bundles(), getBootstrapGeneration()); } private ComponentGraph createComponentGraph(Map<ConfigKey<? extends ConfigInstance>, ConfigInstance> configsIncludingBootstrapConfigs, long generation, Injector fallbackInjector) { previousConfigGeneration = generation; ComponentGraph graph = new ComponentGraph(generation); ComponentsConfig componentsConfig = getConfig(componentsConfigKey, configsIncludingBootstrapConfigs); if (componentsConfig == null) { throw new ConfigurationRuntimeException("The set of all configs does not include a valid 'components' config. Config set: " + configsIncludingBootstrapConfigs.keySet()); } addNodes(componentsConfig, graph); injectNodes(componentsConfig, graph); graph.complete(fallbackInjector); return graph; } private void addNodes(ComponentsConfig componentsConfig, ComponentGraph graph) { for (ComponentsConfig.Components config : componentsConfig.components()) { BundleInstantiationSpecification specification = bundleInstantiationSpecification(config); Class<?> componentClass = osgi.resolveClass(specification); Node componentNode = new ComponentNode(specification.id, config.configId(), componentClass, null); graph.add(componentNode); } } private void injectNodes(ComponentsConfig config, ComponentGraph graph) { for (ComponentsConfig.Components component : config.components()) { Node componentNode = ComponentGraph.getNode(graph, component.id()); for (ComponentsConfig.Components.Inject inject : component.inject()) { componentNode.inject(ComponentGraph.getNode(graph, inject.id())); } } } private void invalidateGeneration(long generation, Throwable cause) { leastGeneration = Math.max(retriever.getComponentsGeneration(), retriever.getBootstrapGeneration()) + 1; if (!(cause instanceof InterruptedException) && !(cause instanceof ConfigInterruptedException) && !(cause instanceof SubscriberClosedException)) { log.log(Level.WARNING, newGraphErrorMessage(generation, cause), cause); } } private static String newGraphErrorMessage(long generation, Throwable cause) { String failedFirstMessage = "Failed to set up first component graph"; String failedNewMessage = "Failed to set up new component graph"; String constructMessage = " due to error when constructing one of the components"; String retainMessage = ". Retaining previous component generation."; if (generation == 0) { if (cause instanceof ComponentNode.ComponentConstructorException) { return failedFirstMessage + constructMessage; } else { return failedFirstMessage; } } else { if (cause instanceof ComponentNode.ComponentConstructorException) { return failedNewMessage + constructMessage + retainMessage; } else { return failedNewMessage + retainMessage; } } } public void shutdown(ComponentGraph graph) { shutdownConfigRetriever(); if (graph != null) { deconstructComponentsAndBundles(graph.generation(), List.of(), graph.allConstructedComponentsAndProviders()); destructor.shutdown(); } } public void shutdownConfigRetriever() { retriever.shutdown(); } public void reloadConfig(long generation) { subscriberFactory.reloadActiveSubscribers(generation); } public static <T extends ConfigInstance> T getConfig(ConfigKey<T> key, Map<ConfigKey<? extends ConfigInstance>, ConfigInstance> configs) { ConfigInstance inst = configs.get(key); if (inst == null || key.getConfigClass() == null) { throw new RuntimeException("Missing config " + key); } return key.getConfigClass().cast(inst); } private static BundleInstantiationSpecification bundleInstantiationSpecification(ComponentsConfig.Components config) { return BundleInstantiationSpecification.fromStrings(config.id(), config.classId(), config.bundle()); } public static class ComponentGraphResult { private final ComponentGraph newGraph; private final Runnable oldComponentsCleanupTask; public ComponentGraphResult(ComponentGraph newGraph, Runnable oldComponentsCleanupTask) { this.newGraph = newGraph; this.oldComponentsCleanupTask = oldComponentsCleanupTask; } public ComponentGraph newGraph() { return newGraph; } public Runnable oldComponentsCleanupTask() { return oldComponentsCleanupTask; } } }
Hmm, perhaps this should instead be a check on whether that platform is compatible with the compile version. That could also be done outside all of this ... Let me fix.
public Change withCompatibilityPlatform(Change change, InstanceName instance) { if (change.revision().isEmpty()) return change; Optional<Version> compileVersion = change.revision() .map(application.revisions()::get) .flatMap(ApplicationVersion::compileVersion); VersionCompatibility compatibility = versionCompatibility.apply(instance); Predicate<Version> compatibleWithCompileVersion = version -> compileVersion.map(compiled -> compatibility.accept(version, compiled)).orElse(true); if ( application.productionDeployments().isEmpty() || application.productionDeployments().getOrDefault(instance, List.of()).stream() .anyMatch(deployment -> ! compatibleWithCompileVersion.test(deployment.version()))) { return targetsForPolicy(versionStatus, systemVersion, application.deploymentSpec().requireInstance(instance).upgradePolicy()) .stream() .filter(compatibleWithCompileVersion) .findFirst() .map(platform -> change.platform() .filter(platform::equals) .map(__ -> change) .orElse(change.withoutPin().with(platform))) .orElse(change); } return change; }
.filter(platform::equals)
public Change withCompatibilityPlatform(Change change, InstanceName instance) { if (change.revision().isEmpty()) return change; Optional<Version> compileVersion = change.revision() .map(application.revisions()::get) .flatMap(ApplicationVersion::compileVersion); VersionCompatibility compatibility = versionCompatibility.apply(instance); Predicate<Version> compatibleWithCompileVersion = version -> compileVersion.map(compiled -> compatibility.accept(version, compiled)).orElse(true); if (change.platform().map(compatibleWithCompileVersion::test).orElse(false)) return change; if ( application.productionDeployments().isEmpty() || application.productionDeployments().getOrDefault(instance, List.of()).stream() .anyMatch(deployment -> ! compatibleWithCompileVersion.test(deployment.version()))) { for (Version platform : targetsForPolicy(versionStatus, systemVersion, application.deploymentSpec().requireInstance(instance).upgradePolicy())) if (compatibleWithCompileVersion.test(platform)) return change.withoutPin().with(platform); } return change; }
class DeploymentStatus { private static <T> List<T> union(List<T> first, List<T> second) { return Stream.concat(first.stream(), second.stream()).distinct().collect(toUnmodifiableList()); } private final Application application; private final JobList allJobs; private final VersionStatus versionStatus; private final Version systemVersion; private final Function<InstanceName, VersionCompatibility> versionCompatibility; private final ZoneRegistry zones; private final Instant now; private final Map<JobId, StepStatus> jobSteps; private final List<StepStatus> allSteps; public DeploymentStatus(Application application, Function<JobId, JobStatus> allJobs, ZoneRegistry zones, VersionStatus versionStatus, Version systemVersion, Function<InstanceName, VersionCompatibility> versionCompatibility, Instant now) { this.application = requireNonNull(application); this.zones = zones; this.versionStatus = requireNonNull(versionStatus); this.systemVersion = requireNonNull(systemVersion); this.versionCompatibility = versionCompatibility; this.now = requireNonNull(now); List<StepStatus> allSteps = new ArrayList<>(); Map<JobId, JobStatus> jobs = new HashMap<>(); this.jobSteps = jobDependencies(application.deploymentSpec(), allSteps, job -> jobs.computeIfAbsent(job, allJobs)); this.allSteps = Collections.unmodifiableList(allSteps); this.allJobs = JobList.from(jobSteps.keySet().stream().map(allJobs).collect(toList())); } private JobType systemTest(JobType dependent) { return JobType.systemTest(zones, dependent == null ? null : findCloud(dependent)); } private JobType stagingTest(JobType dependent) { return JobType.stagingTest(zones, dependent == null ? null : findCloud(dependent)); } /** The application this deployment status concerns. */ public Application application() { return application; } /** A filterable list of the status of all jobs for this application. */ public JobList jobs() { return allJobs; } /** Whether any jobs both dependent on the dependency, and a dependency for the dependent, are failing. */ private boolean hasFailures(StepStatus dependency, StepStatus dependent) { Set<StepStatus> dependents = new HashSet<>(); fillDependents(dependency, new HashSet<>(), dependents, dependent); Set<JobId> criticalJobs = dependents.stream().flatMap(step -> step.job().stream()).collect(toSet()); return ! allJobs.matching(job -> criticalJobs.contains(job.id())) .failingHard() .isEmpty(); } private boolean fillDependents(StepStatus dependency, Set<StepStatus> visited, Set<StepStatus> dependents, StepStatus current) { if (visited.contains(current)) return dependents.contains(current); if (dependency == current) dependents.add(current); else for (StepStatus dep : current.dependencies) if (fillDependents(dependency, visited, dependents, dep)) dependents.add(current); visited.add(current); return dependents.contains(current); } /** Whether any job is failing on versions selected by the given filter, with errors other than lack of capacity in a test zone.. */ public boolean hasFailures(Predicate<RevisionId> revisionFilter) { return ! allJobs.failingHard() .matching(job -> revisionFilter.test(job.lastTriggered().get().versions().targetRevision())) .isEmpty(); } /** Whether any jobs of this application are failing with other errors than lack of capacity in a test zone. */ public boolean hasFailures() { return ! allJobs.failingHard().isEmpty(); } /** All job statuses, by job type, for the given instance. */ public Map<JobType, JobStatus> instanceJobs(InstanceName instance) { return allJobs.asList().stream() .filter(job -> job.id().application().equals(application.id().instance(instance))) .collect(CustomCollectors.toLinkedMap(job -> job.id().type(), Function.identity())); } /** Filterable job status lists for each instance of this application. */ public Map<ApplicationId, JobList> instanceJobs() { return allJobs.groupingBy(job -> job.id().application()); } /** Returns change potentially with a compatibility platform added, if required for the change to roll out to the given instance. */ /** Returns target versions for given confidence, by descending version number. */ public static List<Version> targetsForPolicy(VersionStatus versions, Version systemVersion, DeploymentSpec.UpgradePolicy policy) { if (policy == DeploymentSpec.UpgradePolicy.canary) return List.of(systemVersion); VespaVersion.Confidence target = policy == DeploymentSpec.UpgradePolicy.defaultPolicy ? VespaVersion.Confidence.normal : VespaVersion.Confidence.high; return versions.deployableVersions().stream() .filter(version -> version.confidence().equalOrHigherThan(target)) .map(VespaVersion::versionNumber) .sorted(reverseOrder()) .collect(Collectors.toList()); } /** * The set of jobs that need to run for the changes of each instance of the application to be considered complete, * and any test jobs for any outstanding change, which will likely be needed to later deploy this change. */ public Map<JobId, List<Job>> jobsToRun() { if (application.revisions().last().isEmpty()) return Map.of(); Map<InstanceName, Change> changes = new LinkedHashMap<>(); for (InstanceName instance : application.deploymentSpec().instanceNames()) changes.put(instance, application.require(instance).change()); Map<JobId, List<Job>> jobs = jobsToRun(changes); Map<InstanceName, Change> outstandingChanges = new LinkedHashMap<>(); for (InstanceName instance : application.deploymentSpec().instanceNames()) { Change outstanding = outstandingChange(instance); if (outstanding.hasTargets()) outstandingChanges.put(instance, outstanding.onTopOf(application.require(instance).change())); } var testJobs = jobsToRun(outstandingChanges, true).entrySet().stream() .filter(entry -> ! entry.getKey().type().isProduction()); return Stream.concat(jobs.entrySet().stream(), testJobs) .collect(collectingAndThen(toMap(Map.Entry::getKey, Map.Entry::getValue, DeploymentStatus::union, LinkedHashMap::new), Collections::unmodifiableMap)); } private Map<JobId, List<Job>> jobsToRun(Map<InstanceName, Change> changes, boolean eagerTests) { if (application.revisions().last().isEmpty()) return Map.of(); Map<JobId, List<Job>> productionJobs = new LinkedHashMap<>(); changes.forEach((instance, change) -> productionJobs.putAll(productionJobs(instance, change, eagerTests))); Map<JobId, List<Job>> testJobs = testJobs(productionJobs); Map<JobId, List<Job>> jobs = new LinkedHashMap<>(testJobs); jobs.putAll(productionJobs); jobSteps.forEach((job, step) -> { if ( ! step.isDeclared() || job.type().isProduction() || jobs.containsKey(job)) return; Change change = changes.get(job.application().instance()); if (change == null || ! change.hasTargets()) return; Collection<Optional<JobId>> firstProductionJobsWithDeployment = jobSteps.keySet().stream() .filter(jobId -> jobId.type().isProduction() && jobId.type().isDeployment()) .filter(jobId -> deploymentFor(jobId).isPresent()) .collect(groupingBy(jobId -> findCloud(jobId.type()), Collectors.reducing((o, n) -> o))) .values(); if (firstProductionJobsWithDeployment.isEmpty()) firstProductionJobsWithDeployment = List.of(Optional.empty()); for (Optional<JobId> firstProductionJobWithDeploymentInCloud : firstProductionJobsWithDeployment) { Versions versions = Versions.from(change, application, firstProductionJobWithDeploymentInCloud.flatMap(this::deploymentFor), fallbackPlatform(change, job)); if (step.completedAt(change, firstProductionJobWithDeploymentInCloud).isEmpty()) { CloudName cloud = firstProductionJobWithDeploymentInCloud.map(JobId::type).map(this::findCloud).orElse(zones.systemZone().getCloudName()); JobType typeWithZone = job.type().isSystemTest() ? JobType.systemTest(zones, cloud) : JobType.stagingTest(zones, cloud); jobs.merge(job, List.of(new Job(typeWithZone, versions, step.readyAt(change), change)), DeploymentStatus::union); } } }); return Collections.unmodifiableMap(jobs); } /** Fall back to the newest, deployable platform, which is compatible with what we want to deploy. */ public Version fallbackPlatform(Change change, JobId job) { Optional<Version> compileVersion = change.revision().map(application.revisions()::get).flatMap(ApplicationVersion::compileVersion); if (compileVersion.isEmpty()) return systemVersion; for (VespaVersion version : reversed(versionStatus.deployableVersions())) if (versionCompatibility.apply(job.application().instance()).accept(version.versionNumber(), compileVersion.get())) return version.versionNumber(); throw new IllegalArgumentException("no legal platform version exists in this system for compile version " + compileVersion.get()); } /** The set of jobs that need to run for the given changes to be considered complete. */ public boolean hasCompleted(InstanceName instance, Change change) { if ( ! application.deploymentSpec().requireInstance(instance).concerns(prod)) { if (newestTested(instance, run -> run.versions().targetRevision()).map(change::downgrades).orElse(false)) return true; if (newestTested(instance, run -> run.versions().targetPlatform()).map(change::downgrades).orElse(false)) return true; } return jobsToRun(Map.of(instance, change), false).isEmpty(); } /** The set of jobs that need to run for the given changes to be considered complete. */ private Map<JobId, List<Job>> jobsToRun(Map<InstanceName, Change> changes) { return jobsToRun(changes, false); } /** The step status for all steps in the deployment spec of this, which are jobs, in the same order as in the deployment spec. */ public Map<JobId, StepStatus> jobSteps() { return jobSteps; } public Map<InstanceName, StepStatus> instanceSteps() { ImmutableMap.Builder<InstanceName, StepStatus> instances = ImmutableMap.builder(); for (StepStatus status : allSteps) if (status instanceof InstanceStatus) instances.put(status.instance(), status); return instances.build(); } /** The step status for all relevant steps in the deployment spec of this, in the same order as in the deployment spec. */ public List<StepStatus> allSteps() { return allSteps; } public Optional<Deployment> deploymentFor(JobId job) { return Optional.ofNullable(application.require(job.application().instance()) .deployments().get(job.type().zone())); } private <T extends Comparable<T>> Optional<T> newestTested(InstanceName instance, Function<Run, T> runMapper) { Set<CloudName> clouds = Stream.concat(Stream.of(zones.systemZone().getCloudName()), jobSteps.keySet().stream() .filter(job -> job.type().isProduction()) .map(job -> findCloud(job.type()))) .collect(toSet()); List<ZoneId> testZones = new ArrayList<>(); if (application.deploymentSpec().requireInstance(instance).concerns(test)) for (CloudName cloud: clouds) testZones.add(JobType.systemTest(zones, cloud).zone()); if (application.deploymentSpec().requireInstance(instance).concerns(staging)) for (CloudName cloud: clouds) testZones.add(JobType.stagingTest(zones, cloud).zone()); Map<ZoneId, Optional<T>> newestPerZone = instanceJobs().get(application.id().instance(instance)) .type(systemTest(null), stagingTest(null)) .asList().stream().flatMap(jobs -> jobs.runs().values().stream()) .filter(Run::hasSucceeded) .collect(groupingBy(run -> run.id().type().zone(), mapping(runMapper, Collectors.maxBy(naturalOrder())))); return newestPerZone.keySet().containsAll(testZones) ? testZones.stream().map(newestPerZone::get) .reduce((o, n) -> o.isEmpty() || n.isEmpty() ? Optional.empty() : n.get().compareTo(o.get()) < 0 ? n : o) .orElse(Optional.empty()) : Optional.empty(); } /** * The change to a revision which all dependencies of the given instance has completed, * which does not downgrade any deployments in the instance, * which is not already rolling out to the instance, and * which causes at least one job to run if deployed to the instance. * For the "exclusive" revision upgrade policy it is the oldest such revision; otherwise, it is the latest. */ public Change outstandingChange(InstanceName instance) { StepStatus status = instanceSteps().get(instance); if (status == null) return Change.empty(); DeploymentInstanceSpec spec = application.deploymentSpec().requireInstance(instance); boolean ascending = next == spec.revisionTarget(); int cumulativeRisk = 0; int nextRisk = 0; int skippedCumulativeRisk = 0; Instant readySince = now; Optional<RevisionId> newestRevision = application.productionDeployments() .getOrDefault(instance, List.of()).stream() .map(Deployment::revision).max(naturalOrder()); Change candidate = Change.empty(); for (ApplicationVersion version : application.revisions().deployable(ascending)) { Change change = Change.of(version.id()); if ( newestRevision.isPresent() && change.downgrades(newestRevision.get()) || ! application.require(instance).change().revision().map(change::upgrades).orElse(true) || hasCompleted(instance, change)) { if (ascending) continue; else return Change.empty(); } skippedCumulativeRisk += version.risk(); nextRisk = nextRisk > 0 ? nextRisk : version.risk(); Optional<Instant> readyAt = status.dependenciesCompletedAt(Change.of(version.id()), Optional.empty()); if (readyAt.map(now::isBefore).orElse(true)) continue; cumulativeRisk += skippedCumulativeRisk; skippedCumulativeRisk = 0; nextRisk = 0; if (cumulativeRisk >= spec.maxRisk()) return candidate.equals(Change.empty()) ? change : candidate; if (readyAt.get().isBefore(readySince)) readySince = readyAt.get(); candidate = change; } return instanceJobs(instance).values().stream().allMatch(jobs -> jobs.lastTriggered().isEmpty()) || cumulativeRisk >= spec.minRisk() || cumulativeRisk + nextRisk > spec.maxRisk() || ! now.isBefore(readySince.plus(Duration.ofHours(spec.maxIdleHours()))) ? candidate : Change.empty(); } /** Earliest instant when job was triggered with given versions, or both system and staging tests were successful. */ public Optional<Instant> verifiedAt(JobId job, Versions versions) { Optional<Instant> triggeredAt = allJobs.get(job) .flatMap(status -> status.runs().values().stream() .filter(run -> run.versions().equals(versions)) .findFirst()) .map(Run::start); Optional<Instant> systemTestedAt = testedAt(job.application(), systemTest(job.type()), versions); Optional<Instant> stagingTestedAt = testedAt(job.application(), stagingTest(job.type()), versions); if (systemTestedAt.isEmpty() || stagingTestedAt.isEmpty()) return triggeredAt; Optional<Instant> testedAt = systemTestedAt.get().isAfter(stagingTestedAt.get()) ? systemTestedAt : stagingTestedAt; return triggeredAt.isPresent() && triggeredAt.get().isBefore(testedAt.get()) ? triggeredAt : testedAt; } /** Earliest instant when versions were tested for the given instance */ private Optional<Instant> testedAt(ApplicationId instance, JobType type, Versions versions) { return declaredTest(instance, type).map(__ -> allJobs.instance(instance.instance())) .orElse(allJobs) .type(type).asList().stream() .flatMap(status -> RunList.from(status) .on(versions) .matching(run -> run.id().type().zone().equals(type.zone())) .matching(Run::hasSucceeded) .asList().stream() .map(Run::start)) .min(naturalOrder()); } private Map<JobId, List<Job>> productionJobs(InstanceName instance, Change change, boolean assumeUpgradesSucceed) { Map<JobId, List<Job>> jobs = new LinkedHashMap<>(); jobSteps.forEach((job, step) -> { if ( ! job.application().instance().equals(instance) || ! job.type().isProduction()) return; if (step.completedAt(change, Optional.of(job)).isPresent()) return; Optional<Deployment> deployment = deploymentFor(job); Optional<Version> existingPlatform = deployment.map(Deployment::version); Optional<RevisionId> existingRevision = deployment.map(Deployment::revision); boolean deployingCompatibilityChange = areIncompatible(existingPlatform, change.revision(), job) || areIncompatible(change.platform(), existingRevision, job); if (assumeUpgradesSucceed) { if (deployingCompatibilityChange) return; Change currentChange = application.require(instance).change(); Versions target = Versions.from(currentChange, application, deployment, fallbackPlatform(currentChange, job)); existingPlatform = Optional.of(target.targetPlatform()); existingRevision = Optional.of(target.targetRevision()); } List<Job> toRun = new ArrayList<>(); List<Change> changes = deployingCompatibilityChange ? List.of(change) : changes(job, step, change); for (Change partial : changes) { Job jobToRun = new Job(job.type(), Versions.from(partial, application, existingPlatform, existingRevision, fallbackPlatform(partial, job)), step.readyAt(partial, Optional.of(job)), partial); toRun.add(jobToRun); existingPlatform = Optional.of(jobToRun.versions.targetPlatform()); existingRevision = Optional.of(jobToRun.versions.targetRevision()); } jobs.put(job, toRun); }); return jobs; } private boolean areIncompatible(Optional<Version> platform, Optional<RevisionId> revision, JobId job) { Optional<Version> compileVersion = revision.map(application.revisions()::get) .flatMap(ApplicationVersion::compileVersion); return platform.isPresent() && compileVersion.isPresent() && versionCompatibility.apply(job.application().instance()).refuse(platform.get(), compileVersion.get()); } /** Changes to deploy with the given job, possibly split in two steps. */ private List<Change> changes(JobId job, StepStatus step, Change change) { if (change.platform().isEmpty() || change.revision().isEmpty() || change.isPinned()) return List.of(change); if ( step.completedAt(change.withoutApplication(), Optional.of(job)).isPresent() || step.completedAt(change.withoutPlatform(), Optional.of(job)).isPresent()) return List.of(change); JobId deployment = new JobId(job.application(), JobType.deploymentTo(job.type().zone())); UpgradeRollout rollout = application.deploymentSpec().requireInstance(job.application().instance()).upgradeRollout(); if (job.type().isTest()) { Optional<Instant> platformDeployedAt = jobSteps.get(deployment).completedAt(change.withoutApplication(), Optional.of(deployment)); Optional<Instant> revisionDeployedAt = jobSteps.get(deployment).completedAt(change.withoutPlatform(), Optional.of(deployment)); if (platformDeployedAt.isEmpty() && revisionDeployedAt.isPresent()) return List.of(change.withoutPlatform(), change); if (platformDeployedAt.isPresent() && revisionDeployedAt.isEmpty()) { if (jobSteps.get(deployment).readyAt(change, Optional.of(deployment)) .map(ready -> ! now.isBefore(ready)).orElse(false)) { switch (rollout) { case separate: return hasFailures(jobSteps.get(deployment), jobSteps.get(job)) ? List.of(change) : List.of(change.withoutApplication(), change); case leading: return List.of(change); case simultaneous: return List.of(change.withoutPlatform(), change); } } return List.of(change.withoutApplication(), change); } } Optional<Instant> platformReadyAt = step.dependenciesCompletedAt(change.withoutApplication(), Optional.of(job)); Optional<Instant> revisionReadyAt = step.dependenciesCompletedAt(change.withoutPlatform(), Optional.of(job)); if (platformReadyAt.isEmpty() && revisionReadyAt.isEmpty()) { switch (rollout) { case separate: return List.of(change.withoutApplication(), change); case leading: return List.of(change); case simultaneous: return List.of(change.withoutPlatform(), change); } } if (platformReadyAt.isEmpty()) return List.of(change.withoutPlatform(), change); if (revisionReadyAt.isEmpty()) { return List.of(change.withoutApplication(), change); } boolean platformReadyFirst = platformReadyAt.get().isBefore(revisionReadyAt.get()); boolean revisionReadyFirst = revisionReadyAt.get().isBefore(platformReadyAt.get()); boolean failingUpgradeOnlyTests = ! jobs().type(systemTest(job.type()), stagingTest(job.type())) .failingHardOn(Versions.from(change.withoutApplication(), application, deploymentFor(job), systemVersion)) .isEmpty(); switch (rollout) { case separate: return (platformReadyFirst || platformReadyAt.get().equals(Instant.EPOCH)) ? step.job().flatMap(jobs()::get).flatMap(JobStatus::firstFailing).isPresent() || failingUpgradeOnlyTests ? List.of(change) : List.of(change.withoutApplication(), change) : revisionReadyFirst ? List.of(change.withoutPlatform(), change) : List.of(change); case leading: return List.of(change); case simultaneous: return platformReadyFirst ? List.of(change) : List.of(change.withoutPlatform(), change); default: throw new IllegalStateException("Unknown upgrade rollout policy"); } } /** The test jobs that need to run prior to the given production deployment jobs. */ public Map<JobId, List<Job>> testJobs(Map<JobId, List<Job>> jobs) { Map<JobId, List<Job>> testJobs = new LinkedHashMap<>(); jobs.forEach((job, versionsList) -> { for (JobType testType : List.of(systemTest(job.type()), stagingTest(job.type()))) { if (job.type().isProduction() && job.type().isDeployment()) { declaredTest(job.application(), testType).ifPresent(testJob -> { for (Job productionJob : versionsList) if (allJobs.successOn(testType, productionJob.versions()) .instance(testJob.application().instance()) .asList().isEmpty()) testJobs.merge(testJob, List.of(new Job(testJob.type(), productionJob.versions(), jobSteps().get(testJob).readyAt(productionJob.change), productionJob.change)), DeploymentStatus::union); }); } } }); jobs.forEach((job, versionsList) -> { for (JobType testType : List.of(systemTest(job.type()), stagingTest(job.type()))) { for (Job productionJob : versionsList) if ( job.type().isProduction() && job.type().isDeployment() && allJobs.successOn(testType, productionJob.versions()).asList().isEmpty() && testJobs.keySet().stream() .noneMatch(test -> test.type().equals(testType) && test.type().zone().equals(testType.zone()) && testJobs.get(test).stream().anyMatch(testJob -> testJob.versions().equals(productionJob.versions())))) { JobId testJob = firstDeclaredOrElseImplicitTest(testType); testJobs.merge(testJob, List.of(new Job(testJob.type(), productionJob.versions(), jobSteps.get(testJob).readyAt(productionJob.change), productionJob.change)), DeploymentStatus::union); } } }); return Collections.unmodifiableMap(testJobs); } private CloudName findCloud(JobType job) { return zones.zones().all().get(job.zone()).map(ZoneApi::getCloudName).orElse(zones.systemZone().getCloudName()); } private JobId firstDeclaredOrElseImplicitTest(JobType testJob) { return application.deploymentSpec().instanceNames().stream() .map(name -> new JobId(application.id().instance(name), testJob)) .filter(jobSteps::containsKey) .min(comparing(id -> ! jobSteps.get(id).isDeclared())).orElseThrow(); } /** JobId of any declared test of the given type, for the given instance. */ private Optional<JobId> declaredTest(ApplicationId instanceId, JobType testJob) { JobId jobId = new JobId(instanceId, testJob); return jobSteps.containsKey(jobId) && jobSteps.get(jobId).isDeclared() ? Optional.of(jobId) : Optional.empty(); } /** A DAG of the dependencies between the primitive steps in the spec, with iteration order equal to declaration order. */ private Map<JobId, StepStatus> jobDependencies(DeploymentSpec spec, List<StepStatus> allSteps, Function<JobId, JobStatus> jobs) { if (DeploymentSpec.empty.equals(spec)) return Map.of(); Map<JobId, StepStatus> dependencies = new LinkedHashMap<>(); List<StepStatus> previous = List.of(); for (DeploymentSpec.Step step : spec.steps()) previous = fillStep(dependencies, allSteps, step, previous, null, jobs, instanceWithImplicitTest(test, spec), instanceWithImplicitTest(staging, spec)); return Collections.unmodifiableMap(dependencies); } private static InstanceName instanceWithImplicitTest(Environment environment, DeploymentSpec spec) { InstanceName first = null; for (DeploymentInstanceSpec step : spec.instances()) { if (step.concerns(environment)) return null; first = first != null ? first : step.name(); } return first; } /** Adds the primitive steps contained in the given step, which depend on the given previous primitives, to the dependency graph. */ private List<StepStatus> fillStep(Map<JobId, StepStatus> dependencies, List<StepStatus> allSteps, DeploymentSpec.Step step, List<StepStatus> previous, InstanceName instance, Function<JobId, JobStatus> jobs, InstanceName implicitSystemTest, InstanceName implicitStagingTest) { if (step.steps().isEmpty() && ! (step instanceof DeploymentInstanceSpec)) { if (instance == null) return previous; if ( ! step.delay().isZero()) { StepStatus stepStatus = new DelayStatus((DeploymentSpec.Delay) step, previous, instance); allSteps.add(stepStatus); return List.of(stepStatus); } JobType jobType; JobId jobId; StepStatus stepStatus; if (step.concerns(test) || step.concerns(staging)) { jobType = step.concerns(test) ? systemTest(null) : stagingTest(null); jobId = new JobId(application.id().instance(instance), jobType); stepStatus = JobStepStatus.ofTestDeployment((DeclaredZone) step, List.of(), this, jobs.apply(jobId), true); previous = new ArrayList<>(previous); previous.add(stepStatus); } else if (step.isTest()) { jobType = JobType.test(((DeclaredTest) step).region()); jobId = new JobId(application.id().instance(instance), jobType); stepStatus = JobStepStatus.ofProductionTest((DeclaredTest) step, previous, this, jobs.apply(jobId)); previous = List.of(stepStatus); } else if (step.concerns(prod)) { jobType = JobType.prod(((DeclaredZone) step).region().get()); jobId = new JobId(application.id().instance(instance), jobType); stepStatus = JobStepStatus.ofProductionDeployment((DeclaredZone) step, previous, this, jobs.apply(jobId)); previous = List.of(stepStatus); } else return previous; allSteps.add(stepStatus); dependencies.put(jobId, stepStatus); return previous; } if (step instanceof DeploymentInstanceSpec) { DeploymentInstanceSpec spec = ((DeploymentInstanceSpec) step); StepStatus instanceStatus = new InstanceStatus(spec, previous, now, application.require(spec.name()), this); instance = spec.name(); allSteps.add(instanceStatus); previous = List.of(instanceStatus); if (instance.equals(implicitSystemTest)) { JobId job = new JobId(application.id().instance(instance), systemTest(null)); JobStepStatus testStatus = JobStepStatus.ofTestDeployment(new DeclaredZone(test), List.of(), this, jobs.apply(job), false); dependencies.put(job, testStatus); allSteps.add(testStatus); } if (instance.equals(implicitStagingTest)) { JobId job = new JobId(application.id().instance(instance), stagingTest(null)); JobStepStatus testStatus = JobStepStatus.ofTestDeployment(new DeclaredZone(staging), List.of(), this, jobs.apply(job), false); dependencies.put(job, testStatus); allSteps.add(testStatus); } } if (step.isOrdered()) { for (DeploymentSpec.Step nested : step.steps()) previous = fillStep(dependencies, allSteps, nested, previous, instance, jobs, implicitSystemTest, implicitStagingTest); return previous; } List<StepStatus> parallel = new ArrayList<>(); for (DeploymentSpec.Step nested : step.steps()) parallel.addAll(fillStep(dependencies, allSteps, nested, previous, instance, jobs, implicitSystemTest, implicitStagingTest)); return List.copyOf(parallel); } public enum StepType { /** An instance — completion marks a change as ready for the jobs contained in it. */ instance, /** A timed delay. */ delay, /** A system, staging or production test. */ test, /** A production deployment. */ deployment, } /** * Used to represent all steps — explicit and implicit — that may run in order to complete deployment of a change. * * Each node contains a step describing the node, * a list of steps which need to be complete before the step may start, * a list of jobs from which completion of the step is computed, and * optionally, an instance name used to identify a job type for the step, * * The completion criterion for each type of step is implemented in subclasses of this. */ public static abstract class StepStatus { private final StepType type; private final DeploymentSpec.Step step; private final List<StepStatus> dependencies; private final InstanceName instance; private StepStatus(StepType type, DeploymentSpec.Step step, List<StepStatus> dependencies, InstanceName instance) { this.type = requireNonNull(type); this.step = requireNonNull(step); this.dependencies = List.copyOf(dependencies); this.instance = instance; } /** The type of step this is. */ public final StepType type() { return type; } /** The step defining this. */ public final DeploymentSpec.Step step() { return step; } /** The list of steps that need to be complete before this may start. */ public final List<StepStatus> dependencies() { return dependencies; } /** The instance of this. */ public final InstanceName instance() { return instance; } /** The id of the job this corresponds to, if any. */ public Optional<JobId> job() { return Optional.empty(); } /** The time at which this is, or was, complete on the given change and / or versions. */ public Optional<Instant> completedAt(Change change) { return completedAt(change, Optional.empty()); } /** The time at which this is, or was, complete on the given change and / or versions. */ abstract Optional<Instant> completedAt(Change change, Optional<JobId> dependent); /** The time at which this step is ready to run the specified change and / or versions. */ public Optional<Instant> readyAt(Change change) { return readyAt(change, Optional.empty()); } /** The time at which this step is ready to run the specified change and / or versions. */ Optional<Instant> readyAt(Change change, Optional<JobId> dependent) { return dependenciesCompletedAt(change, dependent) .map(ready -> Stream.of(blockedUntil(change), pausedUntil(), coolingDownUntil(change)) .flatMap(Optional::stream) .reduce(ready, maxBy(naturalOrder()))); } /** The time at which all dependencies completed on the given change and / or versions. */ Optional<Instant> dependenciesCompletedAt(Change change, Optional<JobId> dependent) { Instant latest = Instant.EPOCH; for (StepStatus step : dependencies) { Optional<Instant> completedAt = step.completedAt(change, dependent); if (completedAt.isEmpty()) return Optional.empty(); latest = latest.isBefore(completedAt.get()) ? completedAt.get() : latest; } return Optional.of(latest); } /** The time until which this step is blocked by a change blocker. */ public Optional<Instant> blockedUntil(Change change) { return Optional.empty(); } /** The time until which this step is paused by user intervention. */ public Optional<Instant> pausedUntil() { return Optional.empty(); } /** The time until which this step is cooling down, due to consecutive failures. */ public Optional<Instant> coolingDownUntil(Change change) { return Optional.empty(); } /** Whether this step is declared in the deployment spec, or is an implicit step. */ public boolean isDeclared() { return true; } } private static class DelayStatus extends StepStatus { private DelayStatus(DeploymentSpec.Delay step, List<StepStatus> dependencies, InstanceName instance) { super(StepType.delay, step, dependencies, instance); } @Override Optional<Instant> completedAt(Change change, Optional<JobId> dependent) { return readyAt(change, dependent).map(completion -> completion.plus(step().delay())); } } private static class InstanceStatus extends StepStatus { private final DeploymentInstanceSpec spec; private final Instant now; private final Instance instance; private final DeploymentStatus status; private InstanceStatus(DeploymentInstanceSpec spec, List<StepStatus> dependencies, Instant now, Instance instance, DeploymentStatus status) { super(StepType.instance, spec, dependencies, spec.name()); this.spec = spec; this.now = now; this.instance = instance; this.status = status; } /** The time at which this step is ready to run the specified change and / or versions. */ @Override public Optional<Instant> readyAt(Change change) { return status.jobSteps.keySet().stream() .filter(job -> job.type().isProduction() && job.application().instance().equals(instance.name())) .map(job -> super.readyAt(change, Optional.of(job))) .reduce((o, n) -> o.isEmpty() || n.isEmpty() ? Optional.empty() : n.get().isBefore(o.get()) ? n : o) .orElseGet(() -> super.readyAt(change, Optional.empty())); } /** * Time of completion of its dependencies, if all parts of the given change are contained in the change * for this instance, or if no more jobs should run for this instance for the given change. */ @Override Optional<Instant> completedAt(Change change, Optional<JobId> dependent) { return ( (change.platform().isEmpty() || change.platform().equals(instance.change().platform())) && (change.revision().isEmpty() || change.revision().equals(instance.change().revision())) || step().steps().stream().noneMatch(step -> step.concerns(prod))) ? dependenciesCompletedAt(change, dependent).or(() -> Optional.of(Instant.EPOCH).filter(__ -> change.hasTargets())) : Optional.empty(); } @Override public Optional<Instant> blockedUntil(Change change) { for (Instant current = now; now.plus(Duration.ofDays(7)).isAfter(current); ) { boolean blocked = false; for (DeploymentSpec.ChangeBlocker blocker : spec.changeBlocker()) { while ( blocker.window().includes(current) && now.plus(Duration.ofDays(7)).isAfter(current) && ( change.platform().isPresent() && blocker.blocksVersions() || change.revision().isPresent() && blocker.blocksRevisions())) { blocked = true; current = current.plus(Duration.ofHours(1)).truncatedTo(ChronoUnit.HOURS); } } if ( ! blocked) return current == now ? Optional.empty() : Optional.of(current); } return Optional.of(now.plusSeconds(1 << 30)); } } private static abstract class JobStepStatus extends StepStatus { private final JobStatus job; private final DeploymentStatus status; private JobStepStatus(StepType type, DeploymentSpec.Step step, List<StepStatus> dependencies, JobStatus job, DeploymentStatus status) { super(type, step, dependencies, job.id().application().instance()); this.job = requireNonNull(job); this.status = requireNonNull(status); } @Override public Optional<JobId> job() { return Optional.of(job.id()); } @Override public Optional<Instant> pausedUntil() { return status.application().require(job.id().application().instance()).jobPause(job.id().type()); } @Override public Optional<Instant> coolingDownUntil(Change change) { if (job.lastTriggered().isEmpty()) return Optional.empty(); if (job.lastCompleted().isEmpty()) return Optional.empty(); if (job.firstFailing().isEmpty() || ! job.firstFailing().get().hasEnded()) return Optional.empty(); Versions lastVersions = job.lastCompleted().get().versions(); if (change.platform().isPresent() && ! change.platform().get().equals(lastVersions.targetPlatform())) return Optional.empty(); if (change.revision().isPresent() && ! change.revision().get().equals(lastVersions.targetRevision())) return Optional.empty(); if (job.id().type().environment().isTest() && job.isNodeAllocationFailure()) return Optional.empty(); Instant firstFailing = job.firstFailing().get().end().get(); Instant lastCompleted = job.lastCompleted().get().end().get(); return firstFailing.equals(lastCompleted) ? Optional.of(lastCompleted) : Optional.of(lastCompleted.plus(Duration.ofMinutes(10)) .plus(Duration.between(firstFailing, lastCompleted) .dividedBy(2))) .filter(status.now::isBefore); } private static JobStepStatus ofProductionDeployment(DeclaredZone step, List<StepStatus> dependencies, DeploymentStatus status, JobStatus job) { ZoneId zone = ZoneId.from(step.environment(), step.region().get()); Optional<Deployment> existingDeployment = Optional.ofNullable(status.application().require(job.id().application().instance()) .deployments().get(zone)); return new JobStepStatus(StepType.deployment, step, dependencies, job, status) { @Override public Optional<Instant> readyAt(Change change, Optional<JobId> dependent) { Optional<Instant> readyAt = super.readyAt(change, dependent); Optional<Instant> testedAt = status.verifiedAt(job.id(), Versions.from(change, status.application, existingDeployment, status.fallbackPlatform(change, job.id()))); if (readyAt.isEmpty() || testedAt.isEmpty()) return Optional.empty(); return readyAt.get().isAfter(testedAt.get()) ? readyAt : testedAt; } /** Complete if deployment is on pinned version, and last successful deployment, or if given versions is strictly a downgrade, and this isn't forced by a pin. */ @Override Optional<Instant> completedAt(Change change, Optional<JobId> dependent) { if ( change.isPinned() && change.platform().isPresent() && ! existingDeployment.map(Deployment::version).equals(change.platform())) return Optional.empty(); if ( change.revision().isPresent() && ! existingDeployment.map(Deployment::revision).equals(change.revision()) && dependent.equals(job())) return Optional.empty(); Change fullChange = status.application().require(job.id().application().instance()).change(); if (existingDeployment.map(deployment -> ! (change.upgrades(deployment.version()) || change.upgrades(deployment.revision())) && (fullChange.downgrades(deployment.version()) || fullChange.downgrades(deployment.revision()))) .orElse(false)) return job.lastCompleted().flatMap(Run::end); Optional<Instant> end = Optional.empty(); for (Run run : job.runs().descendingMap().values()) { if (run.versions().targetsMatch(change)) { if (run.hasSucceeded()) end = run.end(); } else if (dependent.equals(job())) break; } return end; } }; } private static JobStepStatus ofProductionTest(DeclaredTest step, List<StepStatus> dependencies, DeploymentStatus status, JobStatus job) { JobId prodId = new JobId(job.id().application(), JobType.deploymentTo(job.id().type().zone())); return new JobStepStatus(StepType.test, step, dependencies, job, status) { @Override Optional<Instant> readyAt(Change change, Optional<JobId> dependent) { Optional<Instant> readyAt = super.readyAt(change, dependent); Optional<Instant> deployedAt = status.jobSteps().get(prodId).completedAt(change, Optional.of(prodId)); if (readyAt.isEmpty() || deployedAt.isEmpty()) return Optional.empty(); return readyAt.get().isAfter(deployedAt.get()) ? readyAt : deployedAt; } @Override Optional<Instant> completedAt(Change change, Optional<JobId> dependent) { Optional<Instant> deployedAt = status.jobSteps().get(prodId).completedAt(change, Optional.of(prodId)); return (dependent.equals(job()) ? job.lastTriggered().filter(run -> deployedAt.map(at -> ! run.start().isBefore(at)).orElse(false)).stream() : job.runs().values().stream()) .filter(Run::hasSucceeded) .filter(run -> run.versions().targetsMatch(change)) .flatMap(run -> run.end().stream()).findFirst(); } }; } private static JobStepStatus ofTestDeployment(DeclaredZone step, List<StepStatus> dependencies, DeploymentStatus status, JobStatus job, boolean declared) { return new JobStepStatus(StepType.test, step, dependencies, job, status) { @Override Optional<Instant> completedAt(Change change, Optional<JobId> dependent) { Optional<ZoneId> requiredTestZone = dependent.map(dep -> job.id().type().isSystemTest() ? status.systemTest(dep.type()).zone() : status.stagingTest(dep.type()).zone()); return RunList.from(job) .matching(run -> dependent.flatMap(status::deploymentFor) .map(deployment -> run.versions().targetsMatch(Versions.from(change, status.application, Optional.of(deployment), status.fallbackPlatform(change, dependent.get())))) .orElseGet(() -> (change.platform().isEmpty() || change.platform().get().equals(run.versions().targetPlatform())) && (change.revision().isEmpty() || change.revision().get().equals(run.versions().targetRevision())))) .matching(Run::hasSucceeded) .matching(run -> requiredTestZone.isEmpty() || requiredTestZone.get().equals(run.id().type().zone())) .asList().stream() .map(run -> run.end().get()) .max(naturalOrder()); } @Override public boolean isDeclared() { return declared; } }; } } public static class Job { private final JobType type; private final Versions versions; private final Optional<Instant> readyAt; private final Change change; public Job(JobType type, Versions versions, Optional<Instant> readyAt, Change change) { this.type = type; this.versions = type.isSystemTest() ? versions.withoutSources() : versions; this.readyAt = readyAt; this.change = change; } public JobType type() { return type; } public Versions versions() { return versions; } public Optional<Instant> readyAt() { return readyAt; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; Job job = (Job) o; return type.zone().equals(job.type.zone()) && versions.equals(job.versions) && readyAt.equals(job.readyAt) && change.equals(job.change); } @Override public int hashCode() { return Objects.hash(type.zone(), versions, readyAt, change); } @Override public String toString() { return change + " with versions " + versions + ", ready at " + readyAt; } } }
class DeploymentStatus { private static <T> List<T> union(List<T> first, List<T> second) { return Stream.concat(first.stream(), second.stream()).distinct().collect(toUnmodifiableList()); } private final Application application; private final JobList allJobs; private final VersionStatus versionStatus; private final Version systemVersion; private final Function<InstanceName, VersionCompatibility> versionCompatibility; private final ZoneRegistry zones; private final Instant now; private final Map<JobId, StepStatus> jobSteps; private final List<StepStatus> allSteps; public DeploymentStatus(Application application, Function<JobId, JobStatus> allJobs, ZoneRegistry zones, VersionStatus versionStatus, Version systemVersion, Function<InstanceName, VersionCompatibility> versionCompatibility, Instant now) { this.application = requireNonNull(application); this.zones = zones; this.versionStatus = requireNonNull(versionStatus); this.systemVersion = requireNonNull(systemVersion); this.versionCompatibility = versionCompatibility; this.now = requireNonNull(now); List<StepStatus> allSteps = new ArrayList<>(); Map<JobId, JobStatus> jobs = new HashMap<>(); this.jobSteps = jobDependencies(application.deploymentSpec(), allSteps, job -> jobs.computeIfAbsent(job, allJobs)); this.allSteps = Collections.unmodifiableList(allSteps); this.allJobs = JobList.from(jobSteps.keySet().stream().map(allJobs).collect(toList())); } private JobType systemTest(JobType dependent) { return JobType.systemTest(zones, dependent == null ? null : findCloud(dependent)); } private JobType stagingTest(JobType dependent) { return JobType.stagingTest(zones, dependent == null ? null : findCloud(dependent)); } /** The application this deployment status concerns. */ public Application application() { return application; } /** A filterable list of the status of all jobs for this application. */ public JobList jobs() { return allJobs; } /** Whether any jobs both dependent on the dependency, and a dependency for the dependent, are failing. */ private boolean hasFailures(StepStatus dependency, StepStatus dependent) { Set<StepStatus> dependents = new HashSet<>(); fillDependents(dependency, new HashSet<>(), dependents, dependent); Set<JobId> criticalJobs = dependents.stream().flatMap(step -> step.job().stream()).collect(toSet()); return ! allJobs.matching(job -> criticalJobs.contains(job.id())) .failingHard() .isEmpty(); } private boolean fillDependents(StepStatus dependency, Set<StepStatus> visited, Set<StepStatus> dependents, StepStatus current) { if (visited.contains(current)) return dependents.contains(current); if (dependency == current) dependents.add(current); else for (StepStatus dep : current.dependencies) if (fillDependents(dependency, visited, dependents, dep)) dependents.add(current); visited.add(current); return dependents.contains(current); } /** Whether any job is failing on versions selected by the given filter, with errors other than lack of capacity in a test zone.. */ public boolean hasFailures(Predicate<RevisionId> revisionFilter) { return ! allJobs.failingHard() .matching(job -> revisionFilter.test(job.lastTriggered().get().versions().targetRevision())) .isEmpty(); } /** Whether any jobs of this application are failing with other errors than lack of capacity in a test zone. */ public boolean hasFailures() { return ! allJobs.failingHard().isEmpty(); } /** All job statuses, by job type, for the given instance. */ public Map<JobType, JobStatus> instanceJobs(InstanceName instance) { return allJobs.asList().stream() .filter(job -> job.id().application().equals(application.id().instance(instance))) .collect(CustomCollectors.toLinkedMap(job -> job.id().type(), Function.identity())); } /** Filterable job status lists for each instance of this application. */ public Map<ApplicationId, JobList> instanceJobs() { return allJobs.groupingBy(job -> job.id().application()); } /** Returns change potentially with a compatibility platform added, if required for the change to roll out to the given instance. */ /** Returns target versions for given confidence, by descending version number. */ public static List<Version> targetsForPolicy(VersionStatus versions, Version systemVersion, DeploymentSpec.UpgradePolicy policy) { if (policy == DeploymentSpec.UpgradePolicy.canary) return List.of(systemVersion); VespaVersion.Confidence target = policy == DeploymentSpec.UpgradePolicy.defaultPolicy ? VespaVersion.Confidence.normal : VespaVersion.Confidence.high; return versions.deployableVersions().stream() .filter(version -> version.confidence().equalOrHigherThan(target)) .map(VespaVersion::versionNumber) .sorted(reverseOrder()) .collect(Collectors.toList()); } /** * The set of jobs that need to run for the changes of each instance of the application to be considered complete, * and any test jobs for any outstanding change, which will likely be needed to later deploy this change. */ public Map<JobId, List<Job>> jobsToRun() { if (application.revisions().last().isEmpty()) return Map.of(); Map<InstanceName, Change> changes = new LinkedHashMap<>(); for (InstanceName instance : application.deploymentSpec().instanceNames()) changes.put(instance, application.require(instance).change()); Map<JobId, List<Job>> jobs = jobsToRun(changes); Map<InstanceName, Change> outstandingChanges = new LinkedHashMap<>(); for (InstanceName instance : application.deploymentSpec().instanceNames()) { Change outstanding = outstandingChange(instance); if (outstanding.hasTargets()) outstandingChanges.put(instance, outstanding.onTopOf(application.require(instance).change())); } var testJobs = jobsToRun(outstandingChanges, true).entrySet().stream() .filter(entry -> ! entry.getKey().type().isProduction()); return Stream.concat(jobs.entrySet().stream(), testJobs) .collect(collectingAndThen(toMap(Map.Entry::getKey, Map.Entry::getValue, DeploymentStatus::union, LinkedHashMap::new), Collections::unmodifiableMap)); } private Map<JobId, List<Job>> jobsToRun(Map<InstanceName, Change> changes, boolean eagerTests) { if (application.revisions().last().isEmpty()) return Map.of(); Map<JobId, List<Job>> productionJobs = new LinkedHashMap<>(); changes.forEach((instance, change) -> productionJobs.putAll(productionJobs(instance, change, eagerTests))); Map<JobId, List<Job>> testJobs = testJobs(productionJobs); Map<JobId, List<Job>> jobs = new LinkedHashMap<>(testJobs); jobs.putAll(productionJobs); jobSteps.forEach((job, step) -> { if ( ! step.isDeclared() || job.type().isProduction() || jobs.containsKey(job)) return; Change change = changes.get(job.application().instance()); if (change == null || ! change.hasTargets()) return; Collection<Optional<JobId>> firstProductionJobsWithDeployment = jobSteps.keySet().stream() .filter(jobId -> jobId.type().isProduction() && jobId.type().isDeployment()) .filter(jobId -> deploymentFor(jobId).isPresent()) .collect(groupingBy(jobId -> findCloud(jobId.type()), Collectors.reducing((o, n) -> o))) .values(); if (firstProductionJobsWithDeployment.isEmpty()) firstProductionJobsWithDeployment = List.of(Optional.empty()); for (Optional<JobId> firstProductionJobWithDeploymentInCloud : firstProductionJobsWithDeployment) { Versions versions = Versions.from(change, application, firstProductionJobWithDeploymentInCloud.flatMap(this::deploymentFor), fallbackPlatform(change, job)); if (step.completedAt(change, firstProductionJobWithDeploymentInCloud).isEmpty()) { CloudName cloud = firstProductionJobWithDeploymentInCloud.map(JobId::type).map(this::findCloud).orElse(zones.systemZone().getCloudName()); JobType typeWithZone = job.type().isSystemTest() ? JobType.systemTest(zones, cloud) : JobType.stagingTest(zones, cloud); jobs.merge(job, List.of(new Job(typeWithZone, versions, step.readyAt(change), change)), DeploymentStatus::union); } } }); return Collections.unmodifiableMap(jobs); } /** Fall back to the newest, deployable platform, which is compatible with what we want to deploy. */ public Version fallbackPlatform(Change change, JobId job) { Optional<Version> compileVersion = change.revision().map(application.revisions()::get).flatMap(ApplicationVersion::compileVersion); if (compileVersion.isEmpty()) return systemVersion; for (VespaVersion version : reversed(versionStatus.deployableVersions())) if (versionCompatibility.apply(job.application().instance()).accept(version.versionNumber(), compileVersion.get())) return version.versionNumber(); throw new IllegalArgumentException("no legal platform version exists in this system for compile version " + compileVersion.get()); } /** The set of jobs that need to run for the given changes to be considered complete. */ public boolean hasCompleted(InstanceName instance, Change change) { if ( ! application.deploymentSpec().requireInstance(instance).concerns(prod)) { if (newestTested(instance, run -> run.versions().targetRevision()).map(change::downgrades).orElse(false)) return true; if (newestTested(instance, run -> run.versions().targetPlatform()).map(change::downgrades).orElse(false)) return true; } return jobsToRun(Map.of(instance, change), false).isEmpty(); } /** The set of jobs that need to run for the given changes to be considered complete. */ private Map<JobId, List<Job>> jobsToRun(Map<InstanceName, Change> changes) { return jobsToRun(changes, false); } /** The step status for all steps in the deployment spec of this, which are jobs, in the same order as in the deployment spec. */ public Map<JobId, StepStatus> jobSteps() { return jobSteps; } public Map<InstanceName, StepStatus> instanceSteps() { ImmutableMap.Builder<InstanceName, StepStatus> instances = ImmutableMap.builder(); for (StepStatus status : allSteps) if (status instanceof InstanceStatus) instances.put(status.instance(), status); return instances.build(); } /** The step status for all relevant steps in the deployment spec of this, in the same order as in the deployment spec. */ public List<StepStatus> allSteps() { return allSteps; } public Optional<Deployment> deploymentFor(JobId job) { return Optional.ofNullable(application.require(job.application().instance()) .deployments().get(job.type().zone())); } private <T extends Comparable<T>> Optional<T> newestTested(InstanceName instance, Function<Run, T> runMapper) { Set<CloudName> clouds = Stream.concat(Stream.of(zones.systemZone().getCloudName()), jobSteps.keySet().stream() .filter(job -> job.type().isProduction()) .map(job -> findCloud(job.type()))) .collect(toSet()); List<ZoneId> testZones = new ArrayList<>(); if (application.deploymentSpec().requireInstance(instance).concerns(test)) for (CloudName cloud: clouds) testZones.add(JobType.systemTest(zones, cloud).zone()); if (application.deploymentSpec().requireInstance(instance).concerns(staging)) for (CloudName cloud: clouds) testZones.add(JobType.stagingTest(zones, cloud).zone()); Map<ZoneId, Optional<T>> newestPerZone = instanceJobs().get(application.id().instance(instance)) .type(systemTest(null), stagingTest(null)) .asList().stream().flatMap(jobs -> jobs.runs().values().stream()) .filter(Run::hasSucceeded) .collect(groupingBy(run -> run.id().type().zone(), mapping(runMapper, Collectors.maxBy(naturalOrder())))); return newestPerZone.keySet().containsAll(testZones) ? testZones.stream().map(newestPerZone::get) .reduce((o, n) -> o.isEmpty() || n.isEmpty() ? Optional.empty() : n.get().compareTo(o.get()) < 0 ? n : o) .orElse(Optional.empty()) : Optional.empty(); } /** * The change to a revision which all dependencies of the given instance has completed, * which does not downgrade any deployments in the instance, * which is not already rolling out to the instance, and * which causes at least one job to run if deployed to the instance. * For the "exclusive" revision upgrade policy it is the oldest such revision; otherwise, it is the latest. */ public Change outstandingChange(InstanceName instance) { StepStatus status = instanceSteps().get(instance); if (status == null) return Change.empty(); DeploymentInstanceSpec spec = application.deploymentSpec().requireInstance(instance); boolean ascending = next == spec.revisionTarget(); int cumulativeRisk = 0; int nextRisk = 0; int skippedCumulativeRisk = 0; Instant readySince = now; Optional<RevisionId> newestRevision = application.productionDeployments() .getOrDefault(instance, List.of()).stream() .map(Deployment::revision).max(naturalOrder()); Change candidate = Change.empty(); for (ApplicationVersion version : application.revisions().deployable(ascending)) { Change change = Change.of(version.id()); if ( newestRevision.isPresent() && change.downgrades(newestRevision.get()) || ! application.require(instance).change().revision().map(change::upgrades).orElse(true) || hasCompleted(instance, change)) { if (ascending) continue; else return Change.empty(); } skippedCumulativeRisk += version.risk(); nextRisk = nextRisk > 0 ? nextRisk : version.risk(); Optional<Instant> readyAt = status.dependenciesCompletedAt(Change.of(version.id()), Optional.empty()); if (readyAt.map(now::isBefore).orElse(true)) continue; cumulativeRisk += skippedCumulativeRisk; skippedCumulativeRisk = 0; nextRisk = 0; if (cumulativeRisk >= spec.maxRisk()) return candidate.equals(Change.empty()) ? change : candidate; if (readyAt.get().isBefore(readySince)) readySince = readyAt.get(); candidate = change; } return instanceJobs(instance).values().stream().allMatch(jobs -> jobs.lastTriggered().isEmpty()) || cumulativeRisk >= spec.minRisk() || cumulativeRisk + nextRisk > spec.maxRisk() || ! now.isBefore(readySince.plus(Duration.ofHours(spec.maxIdleHours()))) ? candidate : Change.empty(); } /** Earliest instant when job was triggered with given versions, or both system and staging tests were successful. */ public Optional<Instant> verifiedAt(JobId job, Versions versions) { Optional<Instant> triggeredAt = allJobs.get(job) .flatMap(status -> status.runs().values().stream() .filter(run -> run.versions().equals(versions)) .findFirst()) .map(Run::start); Optional<Instant> systemTestedAt = testedAt(job.application(), systemTest(job.type()), versions); Optional<Instant> stagingTestedAt = testedAt(job.application(), stagingTest(job.type()), versions); if (systemTestedAt.isEmpty() || stagingTestedAt.isEmpty()) return triggeredAt; Optional<Instant> testedAt = systemTestedAt.get().isAfter(stagingTestedAt.get()) ? systemTestedAt : stagingTestedAt; return triggeredAt.isPresent() && triggeredAt.get().isBefore(testedAt.get()) ? triggeredAt : testedAt; } /** Earliest instant when versions were tested for the given instance */ private Optional<Instant> testedAt(ApplicationId instance, JobType type, Versions versions) { return declaredTest(instance, type).map(__ -> allJobs.instance(instance.instance())) .orElse(allJobs) .type(type).asList().stream() .flatMap(status -> RunList.from(status) .on(versions) .matching(run -> run.id().type().zone().equals(type.zone())) .matching(Run::hasSucceeded) .asList().stream() .map(Run::start)) .min(naturalOrder()); } private Map<JobId, List<Job>> productionJobs(InstanceName instance, Change change, boolean assumeUpgradesSucceed) { Map<JobId, List<Job>> jobs = new LinkedHashMap<>(); jobSteps.forEach((job, step) -> { if ( ! job.application().instance().equals(instance) || ! job.type().isProduction()) return; if (step.completedAt(change, Optional.of(job)).isPresent()) return; Optional<Deployment> deployment = deploymentFor(job); Optional<Version> existingPlatform = deployment.map(Deployment::version); Optional<RevisionId> existingRevision = deployment.map(Deployment::revision); boolean deployingCompatibilityChange = areIncompatible(existingPlatform, change.revision(), job) || areIncompatible(change.platform(), existingRevision, job); if (assumeUpgradesSucceed) { if (deployingCompatibilityChange) return; Change currentChange = application.require(instance).change(); Versions target = Versions.from(currentChange, application, deployment, fallbackPlatform(currentChange, job)); existingPlatform = Optional.of(target.targetPlatform()); existingRevision = Optional.of(target.targetRevision()); } List<Job> toRun = new ArrayList<>(); List<Change> changes = deployingCompatibilityChange ? List.of(change) : changes(job, step, change); for (Change partial : changes) { Job jobToRun = new Job(job.type(), Versions.from(partial, application, existingPlatform, existingRevision, fallbackPlatform(partial, job)), step.readyAt(partial, Optional.of(job)), partial); toRun.add(jobToRun); existingPlatform = Optional.of(jobToRun.versions.targetPlatform()); existingRevision = Optional.of(jobToRun.versions.targetRevision()); } jobs.put(job, toRun); }); return jobs; } private boolean areIncompatible(Optional<Version> platform, Optional<RevisionId> revision, JobId job) { Optional<Version> compileVersion = revision.map(application.revisions()::get) .flatMap(ApplicationVersion::compileVersion); return platform.isPresent() && compileVersion.isPresent() && versionCompatibility.apply(job.application().instance()).refuse(platform.get(), compileVersion.get()); } /** Changes to deploy with the given job, possibly split in two steps. */ private List<Change> changes(JobId job, StepStatus step, Change change) { if (change.platform().isEmpty() || change.revision().isEmpty() || change.isPinned()) return List.of(change); if ( step.completedAt(change.withoutApplication(), Optional.of(job)).isPresent() || step.completedAt(change.withoutPlatform(), Optional.of(job)).isPresent()) return List.of(change); JobId deployment = new JobId(job.application(), JobType.deploymentTo(job.type().zone())); UpgradeRollout rollout = application.deploymentSpec().requireInstance(job.application().instance()).upgradeRollout(); if (job.type().isTest()) { Optional<Instant> platformDeployedAt = jobSteps.get(deployment).completedAt(change.withoutApplication(), Optional.of(deployment)); Optional<Instant> revisionDeployedAt = jobSteps.get(deployment).completedAt(change.withoutPlatform(), Optional.of(deployment)); if (platformDeployedAt.isEmpty() && revisionDeployedAt.isPresent()) return List.of(change.withoutPlatform(), change); if (platformDeployedAt.isPresent() && revisionDeployedAt.isEmpty()) { if (jobSteps.get(deployment).readyAt(change, Optional.of(deployment)) .map(ready -> ! now.isBefore(ready)).orElse(false)) { switch (rollout) { case separate: return hasFailures(jobSteps.get(deployment), jobSteps.get(job)) ? List.of(change) : List.of(change.withoutApplication(), change); case leading: return List.of(change); case simultaneous: return List.of(change.withoutPlatform(), change); } } return List.of(change.withoutApplication(), change); } } Optional<Instant> platformReadyAt = step.dependenciesCompletedAt(change.withoutApplication(), Optional.of(job)); Optional<Instant> revisionReadyAt = step.dependenciesCompletedAt(change.withoutPlatform(), Optional.of(job)); if (platformReadyAt.isEmpty() && revisionReadyAt.isEmpty()) { switch (rollout) { case separate: return List.of(change.withoutApplication(), change); case leading: return List.of(change); case simultaneous: return List.of(change.withoutPlatform(), change); } } if (platformReadyAt.isEmpty()) return List.of(change.withoutPlatform(), change); if (revisionReadyAt.isEmpty()) { return List.of(change.withoutApplication(), change); } boolean platformReadyFirst = platformReadyAt.get().isBefore(revisionReadyAt.get()); boolean revisionReadyFirst = revisionReadyAt.get().isBefore(platformReadyAt.get()); boolean failingUpgradeOnlyTests = ! jobs().type(systemTest(job.type()), stagingTest(job.type())) .failingHardOn(Versions.from(change.withoutApplication(), application, deploymentFor(job), systemVersion)) .isEmpty(); switch (rollout) { case separate: return (platformReadyFirst || platformReadyAt.get().equals(Instant.EPOCH)) ? step.job().flatMap(jobs()::get).flatMap(JobStatus::firstFailing).isPresent() || failingUpgradeOnlyTests ? List.of(change) : List.of(change.withoutApplication(), change) : revisionReadyFirst ? List.of(change.withoutPlatform(), change) : List.of(change); case leading: return List.of(change); case simultaneous: return platformReadyFirst ? List.of(change) : List.of(change.withoutPlatform(), change); default: throw new IllegalStateException("Unknown upgrade rollout policy"); } } /** The test jobs that need to run prior to the given production deployment jobs. */ public Map<JobId, List<Job>> testJobs(Map<JobId, List<Job>> jobs) { Map<JobId, List<Job>> testJobs = new LinkedHashMap<>(); jobs.forEach((job, versionsList) -> { for (JobType testType : List.of(systemTest(job.type()), stagingTest(job.type()))) { if (job.type().isProduction() && job.type().isDeployment()) { declaredTest(job.application(), testType).ifPresent(testJob -> { for (Job productionJob : versionsList) if (allJobs.successOn(testType, productionJob.versions()) .instance(testJob.application().instance()) .asList().isEmpty()) testJobs.merge(testJob, List.of(new Job(testJob.type(), productionJob.versions(), jobSteps().get(testJob).readyAt(productionJob.change), productionJob.change)), DeploymentStatus::union); }); } } }); jobs.forEach((job, versionsList) -> { for (JobType testType : List.of(systemTest(job.type()), stagingTest(job.type()))) { for (Job productionJob : versionsList) if ( job.type().isProduction() && job.type().isDeployment() && allJobs.successOn(testType, productionJob.versions()).asList().isEmpty() && testJobs.keySet().stream() .noneMatch(test -> test.type().equals(testType) && test.type().zone().equals(testType.zone()) && testJobs.get(test).stream().anyMatch(testJob -> testJob.versions().equals(productionJob.versions())))) { JobId testJob = firstDeclaredOrElseImplicitTest(testType); testJobs.merge(testJob, List.of(new Job(testJob.type(), productionJob.versions(), jobSteps.get(testJob).readyAt(productionJob.change), productionJob.change)), DeploymentStatus::union); } } }); return Collections.unmodifiableMap(testJobs); } private CloudName findCloud(JobType job) { return zones.zones().all().get(job.zone()).map(ZoneApi::getCloudName).orElse(zones.systemZone().getCloudName()); } private JobId firstDeclaredOrElseImplicitTest(JobType testJob) { return application.deploymentSpec().instanceNames().stream() .map(name -> new JobId(application.id().instance(name), testJob)) .filter(jobSteps::containsKey) .min(comparing(id -> ! jobSteps.get(id).isDeclared())).orElseThrow(); } /** JobId of any declared test of the given type, for the given instance. */ private Optional<JobId> declaredTest(ApplicationId instanceId, JobType testJob) { JobId jobId = new JobId(instanceId, testJob); return jobSteps.containsKey(jobId) && jobSteps.get(jobId).isDeclared() ? Optional.of(jobId) : Optional.empty(); } /** A DAG of the dependencies between the primitive steps in the spec, with iteration order equal to declaration order. */ private Map<JobId, StepStatus> jobDependencies(DeploymentSpec spec, List<StepStatus> allSteps, Function<JobId, JobStatus> jobs) { if (DeploymentSpec.empty.equals(spec)) return Map.of(); Map<JobId, StepStatus> dependencies = new LinkedHashMap<>(); List<StepStatus> previous = List.of(); for (DeploymentSpec.Step step : spec.steps()) previous = fillStep(dependencies, allSteps, step, previous, null, jobs, instanceWithImplicitTest(test, spec), instanceWithImplicitTest(staging, spec)); return Collections.unmodifiableMap(dependencies); } private static InstanceName instanceWithImplicitTest(Environment environment, DeploymentSpec spec) { InstanceName first = null; for (DeploymentInstanceSpec step : spec.instances()) { if (step.concerns(environment)) return null; first = first != null ? first : step.name(); } return first; } /** Adds the primitive steps contained in the given step, which depend on the given previous primitives, to the dependency graph. */ private List<StepStatus> fillStep(Map<JobId, StepStatus> dependencies, List<StepStatus> allSteps, DeploymentSpec.Step step, List<StepStatus> previous, InstanceName instance, Function<JobId, JobStatus> jobs, InstanceName implicitSystemTest, InstanceName implicitStagingTest) { if (step.steps().isEmpty() && ! (step instanceof DeploymentInstanceSpec)) { if (instance == null) return previous; if ( ! step.delay().isZero()) { StepStatus stepStatus = new DelayStatus((DeploymentSpec.Delay) step, previous, instance); allSteps.add(stepStatus); return List.of(stepStatus); } JobType jobType; JobId jobId; StepStatus stepStatus; if (step.concerns(test) || step.concerns(staging)) { jobType = step.concerns(test) ? systemTest(null) : stagingTest(null); jobId = new JobId(application.id().instance(instance), jobType); stepStatus = JobStepStatus.ofTestDeployment((DeclaredZone) step, List.of(), this, jobs.apply(jobId), true); previous = new ArrayList<>(previous); previous.add(stepStatus); } else if (step.isTest()) { jobType = JobType.test(((DeclaredTest) step).region()); jobId = new JobId(application.id().instance(instance), jobType); stepStatus = JobStepStatus.ofProductionTest((DeclaredTest) step, previous, this, jobs.apply(jobId)); previous = List.of(stepStatus); } else if (step.concerns(prod)) { jobType = JobType.prod(((DeclaredZone) step).region().get()); jobId = new JobId(application.id().instance(instance), jobType); stepStatus = JobStepStatus.ofProductionDeployment((DeclaredZone) step, previous, this, jobs.apply(jobId)); previous = List.of(stepStatus); } else return previous; allSteps.add(stepStatus); dependencies.put(jobId, stepStatus); return previous; } if (step instanceof DeploymentInstanceSpec) { DeploymentInstanceSpec spec = ((DeploymentInstanceSpec) step); StepStatus instanceStatus = new InstanceStatus(spec, previous, now, application.require(spec.name()), this); instance = spec.name(); allSteps.add(instanceStatus); previous = List.of(instanceStatus); if (instance.equals(implicitSystemTest)) { JobId job = new JobId(application.id().instance(instance), systemTest(null)); JobStepStatus testStatus = JobStepStatus.ofTestDeployment(new DeclaredZone(test), List.of(), this, jobs.apply(job), false); dependencies.put(job, testStatus); allSteps.add(testStatus); } if (instance.equals(implicitStagingTest)) { JobId job = new JobId(application.id().instance(instance), stagingTest(null)); JobStepStatus testStatus = JobStepStatus.ofTestDeployment(new DeclaredZone(staging), List.of(), this, jobs.apply(job), false); dependencies.put(job, testStatus); allSteps.add(testStatus); } } if (step.isOrdered()) { for (DeploymentSpec.Step nested : step.steps()) previous = fillStep(dependencies, allSteps, nested, previous, instance, jobs, implicitSystemTest, implicitStagingTest); return previous; } List<StepStatus> parallel = new ArrayList<>(); for (DeploymentSpec.Step nested : step.steps()) parallel.addAll(fillStep(dependencies, allSteps, nested, previous, instance, jobs, implicitSystemTest, implicitStagingTest)); return List.copyOf(parallel); } public enum StepType { /** An instance — completion marks a change as ready for the jobs contained in it. */ instance, /** A timed delay. */ delay, /** A system, staging or production test. */ test, /** A production deployment. */ deployment, } /** * Used to represent all steps — explicit and implicit — that may run in order to complete deployment of a change. * * Each node contains a step describing the node, * a list of steps which need to be complete before the step may start, * a list of jobs from which completion of the step is computed, and * optionally, an instance name used to identify a job type for the step, * * The completion criterion for each type of step is implemented in subclasses of this. */ public static abstract class StepStatus { private final StepType type; private final DeploymentSpec.Step step; private final List<StepStatus> dependencies; private final InstanceName instance; private StepStatus(StepType type, DeploymentSpec.Step step, List<StepStatus> dependencies, InstanceName instance) { this.type = requireNonNull(type); this.step = requireNonNull(step); this.dependencies = List.copyOf(dependencies); this.instance = instance; } /** The type of step this is. */ public final StepType type() { return type; } /** The step defining this. */ public final DeploymentSpec.Step step() { return step; } /** The list of steps that need to be complete before this may start. */ public final List<StepStatus> dependencies() { return dependencies; } /** The instance of this. */ public final InstanceName instance() { return instance; } /** The id of the job this corresponds to, if any. */ public Optional<JobId> job() { return Optional.empty(); } /** The time at which this is, or was, complete on the given change and / or versions. */ public Optional<Instant> completedAt(Change change) { return completedAt(change, Optional.empty()); } /** The time at which this is, or was, complete on the given change and / or versions. */ abstract Optional<Instant> completedAt(Change change, Optional<JobId> dependent); /** The time at which this step is ready to run the specified change and / or versions. */ public Optional<Instant> readyAt(Change change) { return readyAt(change, Optional.empty()); } /** The time at which this step is ready to run the specified change and / or versions. */ Optional<Instant> readyAt(Change change, Optional<JobId> dependent) { return dependenciesCompletedAt(change, dependent) .map(ready -> Stream.of(blockedUntil(change), pausedUntil(), coolingDownUntil(change)) .flatMap(Optional::stream) .reduce(ready, maxBy(naturalOrder()))); } /** The time at which all dependencies completed on the given change and / or versions. */ Optional<Instant> dependenciesCompletedAt(Change change, Optional<JobId> dependent) { Instant latest = Instant.EPOCH; for (StepStatus step : dependencies) { Optional<Instant> completedAt = step.completedAt(change, dependent); if (completedAt.isEmpty()) return Optional.empty(); latest = latest.isBefore(completedAt.get()) ? completedAt.get() : latest; } return Optional.of(latest); } /** The time until which this step is blocked by a change blocker. */ public Optional<Instant> blockedUntil(Change change) { return Optional.empty(); } /** The time until which this step is paused by user intervention. */ public Optional<Instant> pausedUntil() { return Optional.empty(); } /** The time until which this step is cooling down, due to consecutive failures. */ public Optional<Instant> coolingDownUntil(Change change) { return Optional.empty(); } /** Whether this step is declared in the deployment spec, or is an implicit step. */ public boolean isDeclared() { return true; } } private static class DelayStatus extends StepStatus { private DelayStatus(DeploymentSpec.Delay step, List<StepStatus> dependencies, InstanceName instance) { super(StepType.delay, step, dependencies, instance); } @Override Optional<Instant> completedAt(Change change, Optional<JobId> dependent) { return readyAt(change, dependent).map(completion -> completion.plus(step().delay())); } } private static class InstanceStatus extends StepStatus { private final DeploymentInstanceSpec spec; private final Instant now; private final Instance instance; private final DeploymentStatus status; private InstanceStatus(DeploymentInstanceSpec spec, List<StepStatus> dependencies, Instant now, Instance instance, DeploymentStatus status) { super(StepType.instance, spec, dependencies, spec.name()); this.spec = spec; this.now = now; this.instance = instance; this.status = status; } /** The time at which this step is ready to run the specified change and / or versions. */ @Override public Optional<Instant> readyAt(Change change) { return status.jobSteps.keySet().stream() .filter(job -> job.type().isProduction() && job.application().instance().equals(instance.name())) .map(job -> super.readyAt(change, Optional.of(job))) .reduce((o, n) -> o.isEmpty() || n.isEmpty() ? Optional.empty() : n.get().isBefore(o.get()) ? n : o) .orElseGet(() -> super.readyAt(change, Optional.empty())); } /** * Time of completion of its dependencies, if all parts of the given change are contained in the change * for this instance, or if no more jobs should run for this instance for the given change. */ @Override Optional<Instant> completedAt(Change change, Optional<JobId> dependent) { return ( (change.platform().isEmpty() || change.platform().equals(instance.change().platform())) && (change.revision().isEmpty() || change.revision().equals(instance.change().revision())) || step().steps().stream().noneMatch(step -> step.concerns(prod))) ? dependenciesCompletedAt(change, dependent).or(() -> Optional.of(Instant.EPOCH).filter(__ -> change.hasTargets())) : Optional.empty(); } @Override public Optional<Instant> blockedUntil(Change change) { for (Instant current = now; now.plus(Duration.ofDays(7)).isAfter(current); ) { boolean blocked = false; for (DeploymentSpec.ChangeBlocker blocker : spec.changeBlocker()) { while ( blocker.window().includes(current) && now.plus(Duration.ofDays(7)).isAfter(current) && ( change.platform().isPresent() && blocker.blocksVersions() || change.revision().isPresent() && blocker.blocksRevisions())) { blocked = true; current = current.plus(Duration.ofHours(1)).truncatedTo(ChronoUnit.HOURS); } } if ( ! blocked) return current == now ? Optional.empty() : Optional.of(current); } return Optional.of(now.plusSeconds(1 << 30)); } } private static abstract class JobStepStatus extends StepStatus { private final JobStatus job; private final DeploymentStatus status; private JobStepStatus(StepType type, DeploymentSpec.Step step, List<StepStatus> dependencies, JobStatus job, DeploymentStatus status) { super(type, step, dependencies, job.id().application().instance()); this.job = requireNonNull(job); this.status = requireNonNull(status); } @Override public Optional<JobId> job() { return Optional.of(job.id()); } @Override public Optional<Instant> pausedUntil() { return status.application().require(job.id().application().instance()).jobPause(job.id().type()); } @Override public Optional<Instant> coolingDownUntil(Change change) { if (job.lastTriggered().isEmpty()) return Optional.empty(); if (job.lastCompleted().isEmpty()) return Optional.empty(); if (job.firstFailing().isEmpty() || ! job.firstFailing().get().hasEnded()) return Optional.empty(); Versions lastVersions = job.lastCompleted().get().versions(); if (change.platform().isPresent() && ! change.platform().get().equals(lastVersions.targetPlatform())) return Optional.empty(); if (change.revision().isPresent() && ! change.revision().get().equals(lastVersions.targetRevision())) return Optional.empty(); if (job.id().type().environment().isTest() && job.isNodeAllocationFailure()) return Optional.empty(); Instant firstFailing = job.firstFailing().get().end().get(); Instant lastCompleted = job.lastCompleted().get().end().get(); return firstFailing.equals(lastCompleted) ? Optional.of(lastCompleted) : Optional.of(lastCompleted.plus(Duration.ofMinutes(10)) .plus(Duration.between(firstFailing, lastCompleted) .dividedBy(2))) .filter(status.now::isBefore); } private static JobStepStatus ofProductionDeployment(DeclaredZone step, List<StepStatus> dependencies, DeploymentStatus status, JobStatus job) { ZoneId zone = ZoneId.from(step.environment(), step.region().get()); Optional<Deployment> existingDeployment = Optional.ofNullable(status.application().require(job.id().application().instance()) .deployments().get(zone)); return new JobStepStatus(StepType.deployment, step, dependencies, job, status) { @Override public Optional<Instant> readyAt(Change change, Optional<JobId> dependent) { Optional<Instant> readyAt = super.readyAt(change, dependent); Optional<Instant> testedAt = status.verifiedAt(job.id(), Versions.from(change, status.application, existingDeployment, status.fallbackPlatform(change, job.id()))); if (readyAt.isEmpty() || testedAt.isEmpty()) return Optional.empty(); return readyAt.get().isAfter(testedAt.get()) ? readyAt : testedAt; } /** Complete if deployment is on pinned version, and last successful deployment, or if given versions is strictly a downgrade, and this isn't forced by a pin. */ @Override Optional<Instant> completedAt(Change change, Optional<JobId> dependent) { if ( change.isPinned() && change.platform().isPresent() && ! existingDeployment.map(Deployment::version).equals(change.platform())) return Optional.empty(); if ( change.revision().isPresent() && ! existingDeployment.map(Deployment::revision).equals(change.revision()) && dependent.equals(job())) return Optional.empty(); Change fullChange = status.application().require(job.id().application().instance()).change(); if (existingDeployment.map(deployment -> ! (change.upgrades(deployment.version()) || change.upgrades(deployment.revision())) && (fullChange.downgrades(deployment.version()) || fullChange.downgrades(deployment.revision()))) .orElse(false)) return job.lastCompleted().flatMap(Run::end); Optional<Instant> end = Optional.empty(); for (Run run : job.runs().descendingMap().values()) { if (run.versions().targetsMatch(change)) { if (run.hasSucceeded()) end = run.end(); } else if (dependent.equals(job())) break; } return end; } }; } private static JobStepStatus ofProductionTest(DeclaredTest step, List<StepStatus> dependencies, DeploymentStatus status, JobStatus job) { JobId prodId = new JobId(job.id().application(), JobType.deploymentTo(job.id().type().zone())); return new JobStepStatus(StepType.test, step, dependencies, job, status) { @Override Optional<Instant> readyAt(Change change, Optional<JobId> dependent) { Optional<Instant> readyAt = super.readyAt(change, dependent); Optional<Instant> deployedAt = status.jobSteps().get(prodId).completedAt(change, Optional.of(prodId)); if (readyAt.isEmpty() || deployedAt.isEmpty()) return Optional.empty(); return readyAt.get().isAfter(deployedAt.get()) ? readyAt : deployedAt; } @Override Optional<Instant> completedAt(Change change, Optional<JobId> dependent) { Optional<Instant> deployedAt = status.jobSteps().get(prodId).completedAt(change, Optional.of(prodId)); return (dependent.equals(job()) ? job.lastTriggered().filter(run -> deployedAt.map(at -> ! run.start().isBefore(at)).orElse(false)).stream() : job.runs().values().stream()) .filter(Run::hasSucceeded) .filter(run -> run.versions().targetsMatch(change)) .flatMap(run -> run.end().stream()).findFirst(); } }; } private static JobStepStatus ofTestDeployment(DeclaredZone step, List<StepStatus> dependencies, DeploymentStatus status, JobStatus job, boolean declared) { return new JobStepStatus(StepType.test, step, dependencies, job, status) { @Override Optional<Instant> completedAt(Change change, Optional<JobId> dependent) { Optional<ZoneId> requiredTestZone = dependent.map(dep -> job.id().type().isSystemTest() ? status.systemTest(dep.type()).zone() : status.stagingTest(dep.type()).zone()); return RunList.from(job) .matching(run -> dependent.flatMap(status::deploymentFor) .map(deployment -> run.versions().targetsMatch(Versions.from(change, status.application, Optional.of(deployment), status.fallbackPlatform(change, dependent.get())))) .orElseGet(() -> (change.platform().isEmpty() || change.platform().get().equals(run.versions().targetPlatform())) && (change.revision().isEmpty() || change.revision().get().equals(run.versions().targetRevision())))) .matching(Run::hasSucceeded) .matching(run -> requiredTestZone.isEmpty() || requiredTestZone.get().equals(run.id().type().zone())) .asList().stream() .map(run -> run.end().get()) .max(naturalOrder()); } @Override public boolean isDeclared() { return declared; } }; } } public static class Job { private final JobType type; private final Versions versions; private final Optional<Instant> readyAt; private final Change change; public Job(JobType type, Versions versions, Optional<Instant> readyAt, Change change) { this.type = type; this.versions = type.isSystemTest() ? versions.withoutSources() : versions; this.readyAt = readyAt; this.change = change; } public JobType type() { return type; } public Versions versions() { return versions; } public Optional<Instant> readyAt() { return readyAt; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; Job job = (Job) o; return type.zone().equals(job.type.zone()) && versions.equals(job.versions) && readyAt.equals(job.readyAt) && change.equals(job.change); } @Override public int hashCode() { return Objects.hash(type.zone(), versions, readyAt, change); } @Override public String toString() { return change + " with versions " + versions + ", ready at " + readyAt; } } }
Notifications are key'ed on (source, type) (see https://github.com/vespa-engine/vespa/blob/ceb337dea57c909ac83bc168e1b79872eae6dbfe/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/notification/NotificationsDb.java#L62), so any application with explicit major will either override the `applicationPackage` warnings from configserver prepare or clear them all together. This should probably be together with `submission`?
private void validateMajorVersion(TenantAndApplicationId id, Submission submission) { submission.applicationPackage().deploymentSpec().majorVersion().ifPresent(explicitMajor -> { if (explicitMajor < 8) controller.notificationsDb().setNotification(NotificationSource.from(id), Type.applicationPackage, Notification.Level.warning, "Vespa 7 will soon be end of life, upgrade to Vespa 8 now:" + "https: else controller.notificationsDb().removeNotification(NotificationSource.from(id), Type.applicationPackage); }); }
controller.notificationsDb().setNotification(NotificationSource.from(id),
private void validateMajorVersion(TenantAndApplicationId id, Submission submission) { submission.applicationPackage().deploymentSpec().majorVersion().ifPresent(explicitMajor -> { if (explicitMajor < 8) controller.notificationsDb().setNotification(NotificationSource.from(id), Type.applicationPackage, Notification.Level.warning, "Vespa 7 will soon be end of life, upgrade to Vespa 8 now:" + "https: else controller.notificationsDb().removeNotification(NotificationSource.from(id), Type.applicationPackage); }); }
class JobController { public static final Duration maxHistoryAge = Duration.ofDays(60); private static final Logger log = Logger.getLogger(JobController.class.getName()); private final int historyLength; private final Controller controller; private final CuratorDb curator; private final BufferedLogStore logs; private final TesterCloud cloud; private final JobMetrics metric; private final AtomicReference<Consumer<Run>> runner = new AtomicReference<>(__ -> { }); public JobController(Controller controller) { this.historyLength = controller.system().isCd() ? 256 : 64; this.controller = controller; this.curator = controller.curator(); this.logs = new BufferedLogStore(curator, controller.serviceRegistry().runDataStore()); this.cloud = controller.serviceRegistry().testerCloud(); this.metric = new JobMetrics(controller.metric(), controller::system); } public TesterCloud cloud() { return cloud; } public int historyLength() { return historyLength; } public void setRunner(Consumer<Run> runner) { this.runner.set(runner); } /** Rewrite all job data with the newest format. */ public void updateStorage() { for (ApplicationId id : instances()) for (JobType type : jobs(id)) { locked(id, type, runs -> { curator.readLastRun(id, type).ifPresent(curator::writeLastRun); }); } } /** Returns all entries currently logged for the given run. */ public Optional<RunLog> details(RunId id) { return details(id, -1); } /** Returns the logged entries for the given run, which are after the given id threshold. */ public Optional<RunLog> details(RunId id, long after) { try (Mutex __ = curator.lock(id.application(), id.type())) { Run run = runs(id.application(), id.type()).get(id); if (run == null) return Optional.empty(); return active(id).isPresent() ? Optional.of(logs.readActive(id.application(), id.type(), after)) : logs.readFinished(id, after); } } /** Stores the given log entries for the given run and step. */ public void log(RunId id, Step step, List<LogEntry> entries) { locked(id, __ -> { logs.append(id.application(), id.type(), step, entries, true); return __; }); } /** Stores the given log messages for the given run and step. */ public void log(RunId id, Step step, Level level, List<String> messages) { log(id, step, messages.stream() .map(message -> new LogEntry(0, controller.clock().instant(), LogEntry.typeOf(level), message)) .collect(toList())); } /** Stores the given log message for the given run and step. */ public void log(RunId id, Step step, Level level, String message) { log(id, step, level, Collections.singletonList(message)); } /** Fetches any new Vespa log entries, and records the timestamp of the last of these, for continuation. */ public void updateVespaLog(RunId id) { locked(id, run -> { if ( ! run.hasStep(copyVespaLogs)) return run; storeVespaLogs(id); ZoneId zone = id.type().zone(); Optional<Deployment> deployment = Optional.ofNullable(controller.applications().requireInstance(id.application()) .deployments().get(zone)); if (deployment.isEmpty() || deployment.get().at().isBefore(run.start())) return run; List<LogEntry> log; Instant deployedAt; Instant from; if ( ! run.id().type().isProduction()) { deployedAt = run.stepInfo(installInitialReal).or(() -> run.stepInfo(installReal)).flatMap(StepInfo::startTime).orElseThrow(); from = run.lastVespaLogTimestamp().isAfter(run.start()) ? run.lastVespaLogTimestamp() : deployedAt.minusSeconds(10); log = LogEntry.parseVespaLog(controller.serviceRegistry().configServer() .getLogs(new DeploymentId(id.application(), zone), Map.of("from", Long.toString(from.toEpochMilli()))), from); } else log = List.of(); if (id.type().isTest()) { deployedAt = run.stepInfo(installTester).flatMap(StepInfo::startTime).orElseThrow(); from = run.lastVespaLogTimestamp().isAfter(run.start()) ? run.lastVespaLogTimestamp() : deployedAt.minusSeconds(10); List<LogEntry> testerLog = LogEntry.parseVespaLog(controller.serviceRegistry().configServer() .getLogs(new DeploymentId(id.tester().id(), zone), Map.of("from", Long.toString(from.toEpochMilli()))), from); Instant justNow = controller.clock().instant().minusSeconds(2); log = Stream.concat(log.stream(), testerLog.stream()) .filter(entry -> entry.at().isBefore(justNow)) .sorted(comparing(LogEntry::at)) .collect(toUnmodifiableList()); } if (log.isEmpty()) return run; logs.append(id.application(), id.type(), Step.copyVespaLogs, log, false); return run.with(log.get(log.size() - 1).at()); }); } public InputStream getVespaLogs(RunId id, long fromMillis, boolean tester) { Run run = run(id); return run.stepStatus(copyVespaLogs).map(succeeded::equals).orElse(false) ? controller.serviceRegistry().runDataStore().getLogs(id, tester) : getVespaLogsFromLogserver(run, fromMillis, tester).orElse(InputStream.nullInputStream()); } public static Optional<Instant> deploymentCompletedAt(Run run, boolean tester) { return (tester ? run.stepInfo(installTester) : run.stepInfo(installInitialReal).or(() -> run.stepInfo(installReal))) .flatMap(StepInfo::startTime).map(start -> start.minusSeconds(10)); } public void storeVespaLogs(RunId id) { Run run = run(id); if ( ! id.type().isProduction()) { getVespaLogsFromLogserver(run, 0, false).ifPresent(logs -> { try (logs) { controller.serviceRegistry().runDataStore().putLogs(id, false, logs); } catch (IOException e) { throw new UncheckedIOException(e); } }); } if (id.type().isTest()) { getVespaLogsFromLogserver(run, 0, true).ifPresent(logs -> { try (logs) { controller.serviceRegistry().runDataStore().putLogs(id, true, logs); } catch(IOException e){ throw new UncheckedIOException(e); } }); } } private Optional<InputStream> getVespaLogsFromLogserver(Run run, long fromMillis, boolean tester) { return deploymentCompletedAt(run, tester).map(at -> controller.serviceRegistry().configServer().getLogs(new DeploymentId(tester ? run.id().tester().id() : run.id().application(), run.id().type().zone()), Map.of("from", Long.toString(Math.max(fromMillis, at.toEpochMilli())), "to", Long.toString(run.end().orElse(controller.clock().instant()).toEpochMilli())))); } /** Fetches any new test log entries, and records the id of the last of these, for continuation. */ public void updateTestLog(RunId id) { locked(id, run -> { Optional<Step> step = Stream.of(endStagingSetup, endTests) .filter(run.readySteps()::contains) .findAny(); if (step.isEmpty()) return run; List<LogEntry> entries = cloud.getLog(new DeploymentId(id.tester().id(), id.type().zone()), run.lastTestLogEntry()); if (entries.isEmpty()) return run; logs.append(id.application(), id.type(), step.get(), entries, false); return run.with(entries.stream().mapToLong(LogEntry::id).max().getAsLong()); }); } public void updateTestReport(RunId id) { locked(id, run -> { Optional<TestReport> report = cloud.getTestReport(new DeploymentId(id.tester().id(), id.type().zone())); if (report.isEmpty()) { return run; } logs.writeTestReport(id, report.get()); return run; }); } public Optional<String> getTestReports(RunId id) { return logs.readTestReports(id); } /** Stores the given certificate as the tester certificate for this run, or throws if it's already set. */ public void storeTesterCertificate(RunId id, X509Certificate testerCertificate) { locked(id, run -> run.with(testerCertificate)); } /** Returns a list of all instances of applications which have registered. */ public List<ApplicationId> instances() { return controller.applications().readable().stream() .flatMap(application -> application.instances().values().stream()) .map(Instance::id) .collect(toUnmodifiableList()); } /** Returns all job types which have been run for the given application. */ private List<JobType> jobs(ApplicationId id) { return JobType.allIn(controller.zoneRegistry()).stream() .filter(type -> last(id, type).isPresent()) .collect(toUnmodifiableList()); } /** Returns an immutable map of all known runs for the given application and job type. */ public NavigableMap<RunId, Run> runs(JobId id) { return runs(id.application(), id.type()); } /** Lists the start time of non-redeployment runs of the given job, in order of increasing age. */ public List<Instant> jobStarts(JobId id) { return runs(id).descendingMap().values().stream() .filter(run -> ! run.isRedeployment()) .map(Run::start) .collect(toUnmodifiableList()); } /** Returns when given deployment last started deploying, falling back to time of deployment if it cannot be determined from job runs */ public Instant lastDeploymentStart(ApplicationId instanceId, Deployment deployment) { return jobStarts(new JobId(instanceId, JobType.deploymentTo(deployment.zone()))).stream() .findFirst() .orElseGet(deployment::at); } /** Returns an immutable map of all known runs for the given application and job type. */ public NavigableMap<RunId, Run> runs(ApplicationId id, JobType type) { ImmutableSortedMap.Builder<RunId, Run> runs = ImmutableSortedMap.orderedBy(Comparator.comparing(RunId::number)); Optional<Run> last = last(id, type); curator.readHistoricRuns(id, type).forEach((runId, run) -> { if (last.isEmpty() || ! runId.equals(last.get().id())) runs.put(runId, run); }); last.ifPresent(run -> runs.put(run.id(), run)); return runs.build(); } /** Returns the run with the given id, or throws if no such run exists. */ public Run run(RunId id) { return runs(id.application(), id.type()).values().stream() .filter(run -> run.id().equals(id)) .findAny() .orElseThrow(() -> new NoSuchElementException("no run with id '" + id + "' exists")); } /** Returns the last run of the given type, for the given application, if one has been run. */ public Optional<Run> last(JobId job) { return curator.readLastRun(job.application(), job.type()); } /** Returns the last run of the given type, for the given application, if one has been run. */ public Optional<Run> last(ApplicationId id, JobType type) { return curator.readLastRun(id, type); } /** Returns the last completed of the given job. */ public Optional<Run> lastCompleted(JobId id) { return JobStatus.lastCompleted(runs(id)); } /** Returns the first failing of the given job. */ public Optional<Run> firstFailing(JobId id) { return JobStatus.firstFailing(runs(id)); } /** Returns the last success of the given job. */ public Optional<Run> lastSuccess(JobId id) { return JobStatus.lastSuccess(runs(id)); } /** Returns the run with the given id, provided it is still active. */ public Optional<Run> active(RunId id) { return last(id.application(), id.type()) .filter(run -> ! run.hasEnded()) .filter(run -> run.id().equals(id)); } /** Returns a list of all active runs. */ public List<Run> active() { return controller.applications().idList().stream() .flatMap(id -> active(id).stream()) .toList(); } /** Returns a list of all active runs for the given application. */ public List<Run> active(TenantAndApplicationId id) { return controller.applications().requireApplication(id).instances().keySet().stream() .flatMap(name -> JobType.allIn(controller.zoneRegistry()).stream() .map(type -> last(id.instance(name), type)) .flatMap(Optional::stream) .filter(run -> ! run.hasEnded())) .toList(); } /** Returns a list of all active runs for the given instance. */ public List<Run> active(ApplicationId id) { return JobType.allIn(controller.zoneRegistry()).stream() .map(type -> last(id, type)) .flatMap(Optional::stream) .filter(run -> !run.hasEnded()) .toList(); } /** Returns the job status of the given job, possibly empty. */ public JobStatus jobStatus(JobId id) { return new JobStatus(id, runs(id)); } /** Returns the deployment status of the given application. */ public DeploymentStatus deploymentStatus(Application application) { VersionStatus versionStatus = controller.readVersionStatus(); return deploymentStatus(application, versionStatus, controller.systemVersion(versionStatus)); } private DeploymentStatus deploymentStatus(Application application, VersionStatus versionStatus, Version systemVersion) { return new DeploymentStatus(application, this::jobStatus, controller.zoneRegistry(), versionStatus, systemVersion, instance -> controller.applications().versionCompatibility(application.id().instance(instance)), controller.clock().instant()); } /** Adds deployment status to each of the given applications. */ public DeploymentStatusList deploymentStatuses(ApplicationList applications, VersionStatus versionStatus) { Version systemVersion = controller.systemVersion(versionStatus); return DeploymentStatusList.from(applications.asList().stream() .map(application -> deploymentStatus(application, versionStatus, systemVersion)) .toList()); } /** Adds deployment status to each of the given applications. Calling this will do an implicit read of the controller's version status */ public DeploymentStatusList deploymentStatuses(ApplicationList applications) { VersionStatus versionStatus = controller.readVersionStatus(); return deploymentStatuses(applications, versionStatus); } /** Changes the status of the given step, for the given run, provided it is still active. */ public void update(RunId id, RunStatus status, LockedStep step) { locked(id, run -> run.with(status, step)); } /** * Changes the status of the given run to inactive, and stores it as a historic run. * Throws TimeoutException if some step in this job is still being run. */ public void finish(RunId id) throws TimeoutException { Deque<Mutex> locks = new ArrayDeque<>(); try { Run unlockedRun = run(id); locks.push(curator.lock(id.application(), id.type(), report)); for (Step step : report.allPrerequisites(unlockedRun.steps().keySet())) locks.push(curator.lock(id.application(), id.type(), step)); locked(id, run -> { if (run.status() == reset) { for (Step step : run.steps().keySet()) log(id, step, INFO, List.of(" return run.reset(); } if (run.status() == running && run.stepStatuses().values().stream().anyMatch(not(succeeded::equals))) return run; Run finishedRun = run.finished(controller.clock().instant()); locked(id.application(), id.type(), runs -> { runs.put(run.id(), finishedRun); long last = id.number(); long successes = runs.values().stream().filter(Run::hasSucceeded).count(); var oldEntries = runs.entrySet().iterator(); for (var old = oldEntries.next(); old.getKey().number() <= last - historyLength || old.getValue().start().isBefore(controller.clock().instant().minus(maxHistoryAge)); old = oldEntries.next()) { if ( successes == 1 && old.getValue().hasSucceeded() && ! old.getValue().start().isBefore(controller.clock().instant().minus(maxHistoryAge))) { oldEntries.next(); continue; } logs.delete(old.getKey()); oldEntries.remove(); } }); logs.flush(id); metric.jobFinished(run.id().job(), finishedRun.status()); pruneRevisions(unlockedRun); return finishedRun; }); } finally { for (Mutex lock : locks) { try { lock.close(); } catch (Throwable t) { log.log(WARNING, "Failed to close the lock " + lock + ": the lock may or may not " + "have been released in ZooKeeper, and if not this controller " + "must be restarted to release the lock", t); } } } } /** Marks the given run as aborted; no further normal steps will run, but run-always steps will try to succeed. */ public void abort(RunId id, String reason) { locked(id, run -> { run.stepStatuses().entrySet().stream() .filter(entry -> entry.getValue() == unfinished) .forEach(entry -> log(id, entry.getKey(), INFO, "Aborting run: " + reason)); return run.aborted(); }); } /** Accepts and stores a new application package and test jar pair under a generated application version key. */ public ApplicationVersion submit(TenantAndApplicationId id, Submission submission, long projectId) { ApplicationController applications = controller.applications(); AtomicReference<ApplicationVersion> version = new AtomicReference<>(); applications.lockApplicationOrThrow(id, application -> { Optional<ApplicationVersion> previousVersion = application.get().revisions().last(); Optional<ApplicationPackage> previousPackage = previousVersion.flatMap(previous -> applications.applicationStore().find(id.tenant(), id.application(), previous.buildNumber().getAsLong())) .map(ApplicationPackage::new); long previousBuild = previousVersion.map(latestVersion -> latestVersion.buildNumber().getAsLong()).orElse(0L); version.set(submission.toApplicationVersion(1 + previousBuild)); byte[] diff = previousPackage.map(previous -> ApplicationPackageDiff.diff(previous, submission.applicationPackage())) .orElseGet(() -> ApplicationPackageDiff.diffAgainstEmpty(submission.applicationPackage())); applications.applicationStore().put(id.tenant(), id.application(), version.get().id(), submission.applicationPackage().zippedContent(), submission.testPackage(), diff); applications.applicationStore().putMeta(id.tenant(), id.application(), controller.clock().instant(), submission.applicationPackage().metaDataZip()); application = application.withProjectId(projectId == -1 ? OptionalLong.empty() : OptionalLong.of(projectId)); application = application.withRevisions(revisions -> revisions.with(version.get())); application = withPrunedPackages(application, version.get().id()); validate(id, submission); applications.storeWithUpdatedConfig(application, submission.applicationPackage()); if (application.get().projectId().isPresent()) applications.deploymentTrigger().triggerNewRevision(id); }); return version.get(); } private void validate(TenantAndApplicationId id, Submission submission) { validateTests(id, submission); validateParentVersion(id, submission); validateMajorVersion(id, submission); } private void validateTests(TenantAndApplicationId id, Submission submission) { TestSummary testSummary = TestPackage.validateTests(submission.applicationPackage().deploymentSpec(), submission.testPackage()); if (testSummary.problems().isEmpty()) controller.notificationsDb().removeNotification(NotificationSource.from(id), Type.testPackage); else controller.notificationsDb().setNotification(NotificationSource.from(id), Type.testPackage, Notification.Level.warning, testSummary.problems()); } private void validateParentVersion(TenantAndApplicationId id, Submission submission) { submission.applicationPackage().parentVersion().ifPresent(parent -> { if (parent.getMajor() < controller.readSystemVersion().getMajor()) controller.notificationsDb().setNotification(NotificationSource.from(id), Type.submission, Notification.Level.warning, "Parent version used to compile the application is on a " + "lower major version than the current Vespa Cloud version"); else controller.notificationsDb().removeNotification(NotificationSource.from(id), Type.submission); }); } private LockedApplication withPrunedPackages(LockedApplication application, RevisionId latest){ TenantAndApplicationId id = application.get().id(); Application wrapped = application.get(); RevisionId oldestDeployed = application.get().oldestDeployedRevision() .or(() -> wrapped.instances().values().stream() .flatMap(instance -> instance.change().revision().stream()) .min(naturalOrder())) .orElse(latest); controller.applications().applicationStore().prune(id.tenant(), id.application(), oldestDeployed); for (ApplicationVersion version : application.get().revisions().withPackage()) if (version.id().compareTo(oldestDeployed) < 0) application = application.withRevisions(revisions -> revisions.with(version.withoutPackage())); return application; } /** Forget revisions no longer present in any relevant job history. */ private void pruneRevisions(Run run) { TenantAndApplicationId applicationId = TenantAndApplicationId.from(run.id().application()); boolean isProduction = run.versions().targetRevision().isProduction(); (isProduction ? deploymentStatus(controller.applications().requireApplication(applicationId)).jobs().asList().stream() : Stream.of(jobStatus(run.id().job()))) .flatMap(jobs -> jobs.runs().values().stream()) .map(r -> r.versions().targetRevision()) .filter(id -> id.isProduction() == isProduction) .min(naturalOrder()) .ifPresent(oldestRevision -> { controller.applications().lockApplicationOrThrow(applicationId, application -> { if (isProduction) { controller.applications().applicationStore().pruneDiffs(run.id().application().tenant(), run.id().application().application(), oldestRevision.number()); controller.applications().store(application.withRevisions(revisions -> revisions.withoutOlderThan(oldestRevision))); } else { controller.applications().applicationStore().pruneDevDiffs(new DeploymentId(run.id().application(), run.id().job().type().zone()), oldestRevision.number()); controller.applications().store(application.withRevisions(revisions -> revisions.withoutOlderThan(oldestRevision, run.id().job()))); } }); }); } /** Orders a run of the given type, or throws an IllegalStateException if that job type is already running. */ public void start(ApplicationId id, JobType type, Versions versions, boolean isRedeployment, Optional<String> reason) { start(id, type, versions, isRedeployment, JobProfile.of(type), reason); } /** Orders a run of the given type, or throws an IllegalStateException if that job type is already running. */ public void start(ApplicationId id, JobType type, Versions versions, boolean isRedeployment, JobProfile profile, Optional<String> reason) { ApplicationVersion revision = controller.applications().requireApplication(TenantAndApplicationId.from(id)).revisions().get(versions.targetRevision()); if (revision.compileVersion() .map(version -> controller.applications().versionCompatibility(id).refuse(versions.targetPlatform(), version)) .orElse(false)) throw new IllegalArgumentException("Will not start " + type + " for " + id + " with incompatible platform version (" + versions.targetPlatform() + ") " + "and compile versions (" + revision.compileVersion().get() + ")"); locked(id, type, __ -> { Optional<Run> last = last(id, type); if (last.flatMap(run -> active(run.id())).isPresent()) throw new IllegalArgumentException("Cannot start " + type + " for " + id + "; it is already running!"); RunId newId = new RunId(id, type, last.map(run -> run.id().number()).orElse(0L) + 1); curator.writeLastRun(Run.initial(newId, versions, isRedeployment, controller.clock().instant(), profile, reason)); metric.jobStarted(newId.job()); }); } /** Stores the given package and starts a deployment of it, after aborting any such ongoing deployment. */ public void deploy(ApplicationId id, JobType type, Optional<Version> platform, ApplicationPackage applicationPackage) { deploy(id, type, platform, applicationPackage, false); } /** Stores the given package and starts a deployment of it, after aborting any such ongoing deployment.*/ public void deploy(ApplicationId id, JobType type, Optional<Version> platform, ApplicationPackage applicationPackage, boolean dryRun) { if ( ! controller.zoneRegistry().hasZone(type.zone())) throw new IllegalArgumentException(type.zone() + " is not present in this system"); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { if ( ! application.get().instances().containsKey(id.instance())) application = controller.applications().withNewInstance(application, id); controller.applications().store(application); }); DeploymentId deploymentId = new DeploymentId(id, type.zone()); Optional<Run> lastRun = last(id, type); lastRun.filter(run -> ! run.hasEnded()).ifPresent(run -> abortAndWait(run.id(), Duration.ofMinutes(2))); long build = 1 + lastRun.map(run -> run.versions().targetRevision().number()).orElse(0L); RevisionId revisionId = RevisionId.forDevelopment(build, new JobId(id, type)); ApplicationVersion version = ApplicationVersion.forDevelopment(revisionId, applicationPackage.compileVersion()); byte[] diff = getDiff(applicationPackage, deploymentId, lastRun); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { controller.applications().applicationStore().putDev(deploymentId, version.id(), applicationPackage.zippedContent(), diff); Version targetPlatform = platform.orElseGet(() -> findTargetPlatform(applicationPackage, deploymentId, application.get().get(id.instance()))); controller.applications().store(application.withRevisions(revisions -> revisions.with(version))); start(id, type, new Versions(targetPlatform, version.id(), lastRun.map(run -> run.versions().targetPlatform()), lastRun.map(run -> run.versions().targetRevision())), false, dryRun ? JobProfile.developmentDryRun : JobProfile.development, Optional.empty()); }); locked(id, type, __ -> { runner.get().accept(last(id, type).get()); }); } /* Application package diff against previous version, or against empty version if previous does not exist or is invalid */ private byte[] getDiff(ApplicationPackage applicationPackage, DeploymentId deploymentId, Optional<Run> lastRun) { return lastRun.map(run -> run.versions().targetRevision()) .map(prevVersion -> { ApplicationPackage previous; try { previous = new ApplicationPackage(controller.applications().applicationStore().get(deploymentId, prevVersion)); } catch (IllegalArgumentException e) { return ApplicationPackageDiff.diffAgainstEmpty(applicationPackage); } return ApplicationPackageDiff.diff(previous, applicationPackage); }) .orElseGet(() -> ApplicationPackageDiff.diffAgainstEmpty(applicationPackage)); } private Version findTargetPlatform(ApplicationPackage applicationPackage, DeploymentId id, Optional<Instance> instance) { List<Version> versions = controller.readVersionStatus().deployableVersions().stream() .map(VespaVersion::versionNumber) .collect(toList()); instance.map(Instance::deployments) .map(deployments -> deployments.get(id.zoneId())) .map(Deployment::version) .ifPresent(versions::add); if (versions.isEmpty()) throw new IllegalStateException("no deployable platform version found in the system"); VersionCompatibility compatibility = controller.applications().versionCompatibility(id.applicationId()); List<Version> compatibleVersions = new ArrayList<>(); for (Version target : reversed(versions)) if (applicationPackage.compileVersion().isEmpty() || compatibility.accept(target, applicationPackage.compileVersion().get())) compatibleVersions.add(target); if (compatibleVersions.isEmpty()) throw new IllegalArgumentException("no platforms are compatible with compile version " + applicationPackage.compileVersion().get()); Optional<Integer> major = applicationPackage.deploymentSpec().majorVersion(); List<Version> versionOnRightMajor = new ArrayList<>(); for (Version target : reversed(versions)) if (major.isEmpty() || major.get() == target.getMajor()) versionOnRightMajor.add(target); if (versionOnRightMajor.isEmpty()) throw new IllegalArgumentException("no platforms were found for major version " + major.get() + " specified in deployment.xml"); for (Version target : compatibleVersions) if (versionOnRightMajor.contains(target)) return target; throw new IllegalArgumentException("no platforms on major version " + major.get() + " specified in deployment.xml " + "are compatible with compile version " + applicationPackage.compileVersion().get()); } /** Aborts a run and waits for it complete. */ private void abortAndWait(RunId id, Duration timeout) { abort(id, "replaced by new deployment"); runner.get().accept(last(id.application(), id.type()).get()); Instant doom = controller.clock().instant().plus(timeout); Duration sleep = Duration.ofMillis(100); while ( ! last(id.application(), id.type()).get().hasEnded()) { if (controller.clock().instant().plus(sleep).isAfter(doom)) throw new UncheckedTimeoutException("timeout waiting for " + id + " to abort and finish"); try { Thread.sleep(sleep.toMillis()); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new RuntimeException(e); } } } /** Deletes run data and tester deployments for applications which are unknown, or no longer built internally. */ public void collectGarbage() { Set<ApplicationId> applicationsToBuild = new HashSet<>(instances()); curator.applicationsWithJobs().stream() .filter(id -> ! applicationsToBuild.contains(id)) .forEach(id -> { try { TesterId tester = TesterId.of(id); for (JobType type : jobs(id)) locked(id, type, deactivateTester, __ -> { try (Mutex ___ = curator.lock(id, type)) { try { deactivateTester(tester, type); } catch (Exception e) { } curator.deleteRunData(id, type); } }); logs.delete(id); curator.deleteRunData(id); } catch (Exception e) { log.log(WARNING, "failed cleaning up after deleted application", e); } }); } public void deactivateTester(TesterId id, JobType type) { controller.serviceRegistry().configServer().deactivate(new DeploymentId(id.id(), type.zone())); } /** Locks all runs and modifies the list of historic runs for the given application and job type. */ private void locked(ApplicationId id, JobType type, Consumer<SortedMap<RunId, Run>> modifications) { try (Mutex __ = curator.lock(id, type)) { SortedMap<RunId, Run> runs = new TreeMap<>(curator.readHistoricRuns(id, type)); modifications.accept(runs); curator.writeHistoricRuns(id, type, runs.values()); } } /** Locks and modifies the run with the given id, provided it is still active. */ public void locked(RunId id, UnaryOperator<Run> modifications) { try (Mutex __ = curator.lock(id.application(), id.type())) { active(id).ifPresent(run -> { Run modified = modifications.apply(run); if (modified != null) curator.writeLastRun(modified); }); } } /** Locks the given step and checks none of its prerequisites are running, then performs the given actions. */ public void locked(ApplicationId id, JobType type, Step step, Consumer<LockedStep> action) throws TimeoutException { try (Mutex lock = curator.lock(id, type, step)) { for (Step prerequisite : step.allPrerequisites(last(id, type).get().steps().keySet())) try (Mutex __ = curator.lock(id, type, prerequisite)) { ; } action.accept(new LockedStep(lock, step)); } } }
class JobController { public static final Duration maxHistoryAge = Duration.ofDays(60); private static final Logger log = Logger.getLogger(JobController.class.getName()); private final int historyLength; private final Controller controller; private final CuratorDb curator; private final BufferedLogStore logs; private final TesterCloud cloud; private final JobMetrics metric; private final AtomicReference<Consumer<Run>> runner = new AtomicReference<>(__ -> { }); public JobController(Controller controller) { this.historyLength = controller.system().isCd() ? 256 : 64; this.controller = controller; this.curator = controller.curator(); this.logs = new BufferedLogStore(curator, controller.serviceRegistry().runDataStore()); this.cloud = controller.serviceRegistry().testerCloud(); this.metric = new JobMetrics(controller.metric(), controller::system); } public TesterCloud cloud() { return cloud; } public int historyLength() { return historyLength; } public void setRunner(Consumer<Run> runner) { this.runner.set(runner); } /** Rewrite all job data with the newest format. */ public void updateStorage() { for (ApplicationId id : instances()) for (JobType type : jobs(id)) { locked(id, type, runs -> { curator.readLastRun(id, type).ifPresent(curator::writeLastRun); }); } } /** Returns all entries currently logged for the given run. */ public Optional<RunLog> details(RunId id) { return details(id, -1); } /** Returns the logged entries for the given run, which are after the given id threshold. */ public Optional<RunLog> details(RunId id, long after) { try (Mutex __ = curator.lock(id.application(), id.type())) { Run run = runs(id.application(), id.type()).get(id); if (run == null) return Optional.empty(); return active(id).isPresent() ? Optional.of(logs.readActive(id.application(), id.type(), after)) : logs.readFinished(id, after); } } /** Stores the given log entries for the given run and step. */ public void log(RunId id, Step step, List<LogEntry> entries) { locked(id, __ -> { logs.append(id.application(), id.type(), step, entries, true); return __; }); } /** Stores the given log messages for the given run and step. */ public void log(RunId id, Step step, Level level, List<String> messages) { log(id, step, messages.stream() .map(message -> new LogEntry(0, controller.clock().instant(), LogEntry.typeOf(level), message)) .collect(toList())); } /** Stores the given log message for the given run and step. */ public void log(RunId id, Step step, Level level, String message) { log(id, step, level, Collections.singletonList(message)); } /** Fetches any new Vespa log entries, and records the timestamp of the last of these, for continuation. */ public void updateVespaLog(RunId id) { locked(id, run -> { if ( ! run.hasStep(copyVespaLogs)) return run; storeVespaLogs(id); ZoneId zone = id.type().zone(); Optional<Deployment> deployment = Optional.ofNullable(controller.applications().requireInstance(id.application()) .deployments().get(zone)); if (deployment.isEmpty() || deployment.get().at().isBefore(run.start())) return run; List<LogEntry> log; Instant deployedAt; Instant from; if ( ! run.id().type().isProduction()) { deployedAt = run.stepInfo(installInitialReal).or(() -> run.stepInfo(installReal)).flatMap(StepInfo::startTime).orElseThrow(); from = run.lastVespaLogTimestamp().isAfter(run.start()) ? run.lastVespaLogTimestamp() : deployedAt.minusSeconds(10); log = LogEntry.parseVespaLog(controller.serviceRegistry().configServer() .getLogs(new DeploymentId(id.application(), zone), Map.of("from", Long.toString(from.toEpochMilli()))), from); } else log = List.of(); if (id.type().isTest()) { deployedAt = run.stepInfo(installTester).flatMap(StepInfo::startTime).orElseThrow(); from = run.lastVespaLogTimestamp().isAfter(run.start()) ? run.lastVespaLogTimestamp() : deployedAt.minusSeconds(10); List<LogEntry> testerLog = LogEntry.parseVespaLog(controller.serviceRegistry().configServer() .getLogs(new DeploymentId(id.tester().id(), zone), Map.of("from", Long.toString(from.toEpochMilli()))), from); Instant justNow = controller.clock().instant().minusSeconds(2); log = Stream.concat(log.stream(), testerLog.stream()) .filter(entry -> entry.at().isBefore(justNow)) .sorted(comparing(LogEntry::at)) .collect(toUnmodifiableList()); } if (log.isEmpty()) return run; logs.append(id.application(), id.type(), Step.copyVespaLogs, log, false); return run.with(log.get(log.size() - 1).at()); }); } public InputStream getVespaLogs(RunId id, long fromMillis, boolean tester) { Run run = run(id); return run.stepStatus(copyVespaLogs).map(succeeded::equals).orElse(false) ? controller.serviceRegistry().runDataStore().getLogs(id, tester) : getVespaLogsFromLogserver(run, fromMillis, tester).orElse(InputStream.nullInputStream()); } public static Optional<Instant> deploymentCompletedAt(Run run, boolean tester) { return (tester ? run.stepInfo(installTester) : run.stepInfo(installInitialReal).or(() -> run.stepInfo(installReal))) .flatMap(StepInfo::startTime).map(start -> start.minusSeconds(10)); } public void storeVespaLogs(RunId id) { Run run = run(id); if ( ! id.type().isProduction()) { getVespaLogsFromLogserver(run, 0, false).ifPresent(logs -> { try (logs) { controller.serviceRegistry().runDataStore().putLogs(id, false, logs); } catch (IOException e) { throw new UncheckedIOException(e); } }); } if (id.type().isTest()) { getVespaLogsFromLogserver(run, 0, true).ifPresent(logs -> { try (logs) { controller.serviceRegistry().runDataStore().putLogs(id, true, logs); } catch(IOException e){ throw new UncheckedIOException(e); } }); } } private Optional<InputStream> getVespaLogsFromLogserver(Run run, long fromMillis, boolean tester) { return deploymentCompletedAt(run, tester).map(at -> controller.serviceRegistry().configServer().getLogs(new DeploymentId(tester ? run.id().tester().id() : run.id().application(), run.id().type().zone()), Map.of("from", Long.toString(Math.max(fromMillis, at.toEpochMilli())), "to", Long.toString(run.end().orElse(controller.clock().instant()).toEpochMilli())))); } /** Fetches any new test log entries, and records the id of the last of these, for continuation. */ public void updateTestLog(RunId id) { locked(id, run -> { Optional<Step> step = Stream.of(endStagingSetup, endTests) .filter(run.readySteps()::contains) .findAny(); if (step.isEmpty()) return run; List<LogEntry> entries = cloud.getLog(new DeploymentId(id.tester().id(), id.type().zone()), run.lastTestLogEntry()); if (entries.isEmpty()) return run; logs.append(id.application(), id.type(), step.get(), entries, false); return run.with(entries.stream().mapToLong(LogEntry::id).max().getAsLong()); }); } public void updateTestReport(RunId id) { locked(id, run -> { Optional<TestReport> report = cloud.getTestReport(new DeploymentId(id.tester().id(), id.type().zone())); if (report.isEmpty()) { return run; } logs.writeTestReport(id, report.get()); return run; }); } public Optional<String> getTestReports(RunId id) { return logs.readTestReports(id); } /** Stores the given certificate as the tester certificate for this run, or throws if it's already set. */ public void storeTesterCertificate(RunId id, X509Certificate testerCertificate) { locked(id, run -> run.with(testerCertificate)); } /** Returns a list of all instances of applications which have registered. */ public List<ApplicationId> instances() { return controller.applications().readable().stream() .flatMap(application -> application.instances().values().stream()) .map(Instance::id) .collect(toUnmodifiableList()); } /** Returns all job types which have been run for the given application. */ private List<JobType> jobs(ApplicationId id) { return JobType.allIn(controller.zoneRegistry()).stream() .filter(type -> last(id, type).isPresent()) .collect(toUnmodifiableList()); } /** Returns an immutable map of all known runs for the given application and job type. */ public NavigableMap<RunId, Run> runs(JobId id) { return runs(id.application(), id.type()); } /** Lists the start time of non-redeployment runs of the given job, in order of increasing age. */ public List<Instant> jobStarts(JobId id) { return runs(id).descendingMap().values().stream() .filter(run -> ! run.isRedeployment()) .map(Run::start) .collect(toUnmodifiableList()); } /** Returns when given deployment last started deploying, falling back to time of deployment if it cannot be determined from job runs */ public Instant lastDeploymentStart(ApplicationId instanceId, Deployment deployment) { return jobStarts(new JobId(instanceId, JobType.deploymentTo(deployment.zone()))).stream() .findFirst() .orElseGet(deployment::at); } /** Returns an immutable map of all known runs for the given application and job type. */ public NavigableMap<RunId, Run> runs(ApplicationId id, JobType type) { ImmutableSortedMap.Builder<RunId, Run> runs = ImmutableSortedMap.orderedBy(Comparator.comparing(RunId::number)); Optional<Run> last = last(id, type); curator.readHistoricRuns(id, type).forEach((runId, run) -> { if (last.isEmpty() || ! runId.equals(last.get().id())) runs.put(runId, run); }); last.ifPresent(run -> runs.put(run.id(), run)); return runs.build(); } /** Returns the run with the given id, or throws if no such run exists. */ public Run run(RunId id) { return runs(id.application(), id.type()).values().stream() .filter(run -> run.id().equals(id)) .findAny() .orElseThrow(() -> new NoSuchElementException("no run with id '" + id + "' exists")); } /** Returns the last run of the given type, for the given application, if one has been run. */ public Optional<Run> last(JobId job) { return curator.readLastRun(job.application(), job.type()); } /** Returns the last run of the given type, for the given application, if one has been run. */ public Optional<Run> last(ApplicationId id, JobType type) { return curator.readLastRun(id, type); } /** Returns the last completed of the given job. */ public Optional<Run> lastCompleted(JobId id) { return JobStatus.lastCompleted(runs(id)); } /** Returns the first failing of the given job. */ public Optional<Run> firstFailing(JobId id) { return JobStatus.firstFailing(runs(id)); } /** Returns the last success of the given job. */ public Optional<Run> lastSuccess(JobId id) { return JobStatus.lastSuccess(runs(id)); } /** Returns the run with the given id, provided it is still active. */ public Optional<Run> active(RunId id) { return last(id.application(), id.type()) .filter(run -> ! run.hasEnded()) .filter(run -> run.id().equals(id)); } /** Returns a list of all active runs. */ public List<Run> active() { return controller.applications().idList().stream() .flatMap(id -> active(id).stream()) .toList(); } /** Returns a list of all active runs for the given application. */ public List<Run> active(TenantAndApplicationId id) { return controller.applications().requireApplication(id).instances().keySet().stream() .flatMap(name -> JobType.allIn(controller.zoneRegistry()).stream() .map(type -> last(id.instance(name), type)) .flatMap(Optional::stream) .filter(run -> ! run.hasEnded())) .toList(); } /** Returns a list of all active runs for the given instance. */ public List<Run> active(ApplicationId id) { return JobType.allIn(controller.zoneRegistry()).stream() .map(type -> last(id, type)) .flatMap(Optional::stream) .filter(run -> !run.hasEnded()) .toList(); } /** Returns the job status of the given job, possibly empty. */ public JobStatus jobStatus(JobId id) { return new JobStatus(id, runs(id)); } /** Returns the deployment status of the given application. */ public DeploymentStatus deploymentStatus(Application application) { VersionStatus versionStatus = controller.readVersionStatus(); return deploymentStatus(application, versionStatus, controller.systemVersion(versionStatus)); } private DeploymentStatus deploymentStatus(Application application, VersionStatus versionStatus, Version systemVersion) { return new DeploymentStatus(application, this::jobStatus, controller.zoneRegistry(), versionStatus, systemVersion, instance -> controller.applications().versionCompatibility(application.id().instance(instance)), controller.clock().instant()); } /** Adds deployment status to each of the given applications. */ public DeploymentStatusList deploymentStatuses(ApplicationList applications, VersionStatus versionStatus) { Version systemVersion = controller.systemVersion(versionStatus); return DeploymentStatusList.from(applications.asList().stream() .map(application -> deploymentStatus(application, versionStatus, systemVersion)) .toList()); } /** Adds deployment status to each of the given applications. Calling this will do an implicit read of the controller's version status */ public DeploymentStatusList deploymentStatuses(ApplicationList applications) { VersionStatus versionStatus = controller.readVersionStatus(); return deploymentStatuses(applications, versionStatus); } /** Changes the status of the given step, for the given run, provided it is still active. */ public void update(RunId id, RunStatus status, LockedStep step) { locked(id, run -> run.with(status, step)); } /** * Changes the status of the given run to inactive, and stores it as a historic run. * Throws TimeoutException if some step in this job is still being run. */ public void finish(RunId id) throws TimeoutException { Deque<Mutex> locks = new ArrayDeque<>(); try { Run unlockedRun = run(id); locks.push(curator.lock(id.application(), id.type(), report)); for (Step step : report.allPrerequisites(unlockedRun.steps().keySet())) locks.push(curator.lock(id.application(), id.type(), step)); locked(id, run -> { if (run.status() == reset) { for (Step step : run.steps().keySet()) log(id, step, INFO, List.of(" return run.reset(); } if (run.status() == running && run.stepStatuses().values().stream().anyMatch(not(succeeded::equals))) return run; Run finishedRun = run.finished(controller.clock().instant()); locked(id.application(), id.type(), runs -> { runs.put(run.id(), finishedRun); long last = id.number(); long successes = runs.values().stream().filter(Run::hasSucceeded).count(); var oldEntries = runs.entrySet().iterator(); for (var old = oldEntries.next(); old.getKey().number() <= last - historyLength || old.getValue().start().isBefore(controller.clock().instant().minus(maxHistoryAge)); old = oldEntries.next()) { if ( successes == 1 && old.getValue().hasSucceeded() && ! old.getValue().start().isBefore(controller.clock().instant().minus(maxHistoryAge))) { oldEntries.next(); continue; } logs.delete(old.getKey()); oldEntries.remove(); } }); logs.flush(id); metric.jobFinished(run.id().job(), finishedRun.status()); pruneRevisions(unlockedRun); return finishedRun; }); } finally { for (Mutex lock : locks) { try { lock.close(); } catch (Throwable t) { log.log(WARNING, "Failed to close the lock " + lock + ": the lock may or may not " + "have been released in ZooKeeper, and if not this controller " + "must be restarted to release the lock", t); } } } } /** Marks the given run as aborted; no further normal steps will run, but run-always steps will try to succeed. */ public void abort(RunId id, String reason) { locked(id, run -> { run.stepStatuses().entrySet().stream() .filter(entry -> entry.getValue() == unfinished) .forEach(entry -> log(id, entry.getKey(), INFO, "Aborting run: " + reason)); return run.aborted(); }); } /** Accepts and stores a new application package and test jar pair under a generated application version key. */ public ApplicationVersion submit(TenantAndApplicationId id, Submission submission, long projectId) { ApplicationController applications = controller.applications(); AtomicReference<ApplicationVersion> version = new AtomicReference<>(); applications.lockApplicationOrThrow(id, application -> { Optional<ApplicationVersion> previousVersion = application.get().revisions().last(); Optional<ApplicationPackage> previousPackage = previousVersion.flatMap(previous -> applications.applicationStore().find(id.tenant(), id.application(), previous.buildNumber().getAsLong())) .map(ApplicationPackage::new); long previousBuild = previousVersion.map(latestVersion -> latestVersion.buildNumber().getAsLong()).orElse(0L); version.set(submission.toApplicationVersion(1 + previousBuild)); byte[] diff = previousPackage.map(previous -> ApplicationPackageDiff.diff(previous, submission.applicationPackage())) .orElseGet(() -> ApplicationPackageDiff.diffAgainstEmpty(submission.applicationPackage())); applications.applicationStore().put(id.tenant(), id.application(), version.get().id(), submission.applicationPackage().zippedContent(), submission.testPackage(), diff); applications.applicationStore().putMeta(id.tenant(), id.application(), controller.clock().instant(), submission.applicationPackage().metaDataZip()); application = application.withProjectId(projectId == -1 ? OptionalLong.empty() : OptionalLong.of(projectId)); application = application.withRevisions(revisions -> revisions.with(version.get())); application = withPrunedPackages(application, version.get().id()); validate(id, submission); applications.storeWithUpdatedConfig(application, submission.applicationPackage()); if (application.get().projectId().isPresent()) applications.deploymentTrigger().triggerNewRevision(id); }); return version.get(); } private void validate(TenantAndApplicationId id, Submission submission) { validateTests(id, submission); validateParentVersion(id, submission); validateMajorVersion(id, submission); } private void validateTests(TenantAndApplicationId id, Submission submission) { TestSummary testSummary = TestPackage.validateTests(submission.applicationPackage().deploymentSpec(), submission.testPackage()); if (testSummary.problems().isEmpty()) controller.notificationsDb().removeNotification(NotificationSource.from(id), Type.testPackage); else controller.notificationsDb().setNotification(NotificationSource.from(id), Type.testPackage, Notification.Level.warning, testSummary.problems()); } private void validateParentVersion(TenantAndApplicationId id, Submission submission) { submission.applicationPackage().parentVersion().ifPresent(parent -> { if (parent.getMajor() < controller.readSystemVersion().getMajor()) controller.notificationsDb().setNotification(NotificationSource.from(id), Type.submission, Notification.Level.warning, "Parent version used to compile the application is on a " + "lower major version than the current Vespa Cloud version"); else controller.notificationsDb().removeNotification(NotificationSource.from(id), Type.submission); }); } private LockedApplication withPrunedPackages(LockedApplication application, RevisionId latest){ TenantAndApplicationId id = application.get().id(); Application wrapped = application.get(); RevisionId oldestDeployed = application.get().oldestDeployedRevision() .or(() -> wrapped.instances().values().stream() .flatMap(instance -> instance.change().revision().stream()) .min(naturalOrder())) .orElse(latest); controller.applications().applicationStore().prune(id.tenant(), id.application(), oldestDeployed); for (ApplicationVersion version : application.get().revisions().withPackage()) if (version.id().compareTo(oldestDeployed) < 0) application = application.withRevisions(revisions -> revisions.with(version.withoutPackage())); return application; } /** Forget revisions no longer present in any relevant job history. */ private void pruneRevisions(Run run) { TenantAndApplicationId applicationId = TenantAndApplicationId.from(run.id().application()); boolean isProduction = run.versions().targetRevision().isProduction(); (isProduction ? deploymentStatus(controller.applications().requireApplication(applicationId)).jobs().asList().stream() : Stream.of(jobStatus(run.id().job()))) .flatMap(jobs -> jobs.runs().values().stream()) .map(r -> r.versions().targetRevision()) .filter(id -> id.isProduction() == isProduction) .min(naturalOrder()) .ifPresent(oldestRevision -> { controller.applications().lockApplicationOrThrow(applicationId, application -> { if (isProduction) { controller.applications().applicationStore().pruneDiffs(run.id().application().tenant(), run.id().application().application(), oldestRevision.number()); controller.applications().store(application.withRevisions(revisions -> revisions.withoutOlderThan(oldestRevision))); } else { controller.applications().applicationStore().pruneDevDiffs(new DeploymentId(run.id().application(), run.id().job().type().zone()), oldestRevision.number()); controller.applications().store(application.withRevisions(revisions -> revisions.withoutOlderThan(oldestRevision, run.id().job()))); } }); }); } /** Orders a run of the given type, or throws an IllegalStateException if that job type is already running. */ public void start(ApplicationId id, JobType type, Versions versions, boolean isRedeployment, Optional<String> reason) { start(id, type, versions, isRedeployment, JobProfile.of(type), reason); } /** Orders a run of the given type, or throws an IllegalStateException if that job type is already running. */ public void start(ApplicationId id, JobType type, Versions versions, boolean isRedeployment, JobProfile profile, Optional<String> reason) { ApplicationVersion revision = controller.applications().requireApplication(TenantAndApplicationId.from(id)).revisions().get(versions.targetRevision()); if (revision.compileVersion() .map(version -> controller.applications().versionCompatibility(id).refuse(versions.targetPlatform(), version)) .orElse(false)) throw new IllegalArgumentException("Will not start " + type + " for " + id + " with incompatible platform version (" + versions.targetPlatform() + ") " + "and compile versions (" + revision.compileVersion().get() + ")"); locked(id, type, __ -> { Optional<Run> last = last(id, type); if (last.flatMap(run -> active(run.id())).isPresent()) throw new IllegalArgumentException("Cannot start " + type + " for " + id + "; it is already running!"); RunId newId = new RunId(id, type, last.map(run -> run.id().number()).orElse(0L) + 1); curator.writeLastRun(Run.initial(newId, versions, isRedeployment, controller.clock().instant(), profile, reason)); metric.jobStarted(newId.job()); }); } /** Stores the given package and starts a deployment of it, after aborting any such ongoing deployment. */ public void deploy(ApplicationId id, JobType type, Optional<Version> platform, ApplicationPackage applicationPackage) { deploy(id, type, platform, applicationPackage, false); } /** Stores the given package and starts a deployment of it, after aborting any such ongoing deployment.*/ public void deploy(ApplicationId id, JobType type, Optional<Version> platform, ApplicationPackage applicationPackage, boolean dryRun) { if ( ! controller.zoneRegistry().hasZone(type.zone())) throw new IllegalArgumentException(type.zone() + " is not present in this system"); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { if ( ! application.get().instances().containsKey(id.instance())) application = controller.applications().withNewInstance(application, id); controller.applications().store(application); }); DeploymentId deploymentId = new DeploymentId(id, type.zone()); Optional<Run> lastRun = last(id, type); lastRun.filter(run -> ! run.hasEnded()).ifPresent(run -> abortAndWait(run.id(), Duration.ofMinutes(2))); long build = 1 + lastRun.map(run -> run.versions().targetRevision().number()).orElse(0L); RevisionId revisionId = RevisionId.forDevelopment(build, new JobId(id, type)); ApplicationVersion version = ApplicationVersion.forDevelopment(revisionId, applicationPackage.compileVersion()); byte[] diff = getDiff(applicationPackage, deploymentId, lastRun); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { controller.applications().applicationStore().putDev(deploymentId, version.id(), applicationPackage.zippedContent(), diff); Version targetPlatform = platform.orElseGet(() -> findTargetPlatform(applicationPackage, deploymentId, application.get().get(id.instance()))); controller.applications().store(application.withRevisions(revisions -> revisions.with(version))); start(id, type, new Versions(targetPlatform, version.id(), lastRun.map(run -> run.versions().targetPlatform()), lastRun.map(run -> run.versions().targetRevision())), false, dryRun ? JobProfile.developmentDryRun : JobProfile.development, Optional.empty()); }); locked(id, type, __ -> { runner.get().accept(last(id, type).get()); }); } /* Application package diff against previous version, or against empty version if previous does not exist or is invalid */ private byte[] getDiff(ApplicationPackage applicationPackage, DeploymentId deploymentId, Optional<Run> lastRun) { return lastRun.map(run -> run.versions().targetRevision()) .map(prevVersion -> { ApplicationPackage previous; try { previous = new ApplicationPackage(controller.applications().applicationStore().get(deploymentId, prevVersion)); } catch (IllegalArgumentException e) { return ApplicationPackageDiff.diffAgainstEmpty(applicationPackage); } return ApplicationPackageDiff.diff(previous, applicationPackage); }) .orElseGet(() -> ApplicationPackageDiff.diffAgainstEmpty(applicationPackage)); } private Version findTargetPlatform(ApplicationPackage applicationPackage, DeploymentId id, Optional<Instance> instance) { List<Version> versions = controller.readVersionStatus().deployableVersions().stream() .map(VespaVersion::versionNumber) .collect(toList()); instance.map(Instance::deployments) .map(deployments -> deployments.get(id.zoneId())) .map(Deployment::version) .ifPresent(versions::add); if (versions.isEmpty()) throw new IllegalStateException("no deployable platform version found in the system"); VersionCompatibility compatibility = controller.applications().versionCompatibility(id.applicationId()); List<Version> compatibleVersions = new ArrayList<>(); for (Version target : reversed(versions)) if (applicationPackage.compileVersion().isEmpty() || compatibility.accept(target, applicationPackage.compileVersion().get())) compatibleVersions.add(target); if (compatibleVersions.isEmpty()) throw new IllegalArgumentException("no platforms are compatible with compile version " + applicationPackage.compileVersion().get()); Optional<Integer> major = applicationPackage.deploymentSpec().majorVersion(); List<Version> versionOnRightMajor = new ArrayList<>(); for (Version target : reversed(versions)) if (major.isEmpty() || major.get() == target.getMajor()) versionOnRightMajor.add(target); if (versionOnRightMajor.isEmpty()) throw new IllegalArgumentException("no platforms were found for major version " + major.get() + " specified in deployment.xml"); for (Version target : compatibleVersions) if (versionOnRightMajor.contains(target)) return target; throw new IllegalArgumentException("no platforms on major version " + major.get() + " specified in deployment.xml " + "are compatible with compile version " + applicationPackage.compileVersion().get()); } /** Aborts a run and waits for it complete. */ private void abortAndWait(RunId id, Duration timeout) { abort(id, "replaced by new deployment"); runner.get().accept(last(id.application(), id.type()).get()); Instant doom = controller.clock().instant().plus(timeout); Duration sleep = Duration.ofMillis(100); while ( ! last(id.application(), id.type()).get().hasEnded()) { if (controller.clock().instant().plus(sleep).isAfter(doom)) throw new UncheckedTimeoutException("timeout waiting for " + id + " to abort and finish"); try { Thread.sleep(sleep.toMillis()); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new RuntimeException(e); } } } /** Deletes run data and tester deployments for applications which are unknown, or no longer built internally. */ public void collectGarbage() { Set<ApplicationId> applicationsToBuild = new HashSet<>(instances()); curator.applicationsWithJobs().stream() .filter(id -> ! applicationsToBuild.contains(id)) .forEach(id -> { try { TesterId tester = TesterId.of(id); for (JobType type : jobs(id)) locked(id, type, deactivateTester, __ -> { try (Mutex ___ = curator.lock(id, type)) { try { deactivateTester(tester, type); } catch (Exception e) { } curator.deleteRunData(id, type); } }); logs.delete(id); curator.deleteRunData(id); } catch (Exception e) { log.log(WARNING, "failed cleaning up after deleted application", e); } }); } public void deactivateTester(TesterId id, JobType type) { controller.serviceRegistry().configServer().deactivate(new DeploymentId(id.id(), type.zone())); } /** Locks all runs and modifies the list of historic runs for the given application and job type. */ private void locked(ApplicationId id, JobType type, Consumer<SortedMap<RunId, Run>> modifications) { try (Mutex __ = curator.lock(id, type)) { SortedMap<RunId, Run> runs = new TreeMap<>(curator.readHistoricRuns(id, type)); modifications.accept(runs); curator.writeHistoricRuns(id, type, runs.values()); } } /** Locks and modifies the run with the given id, provided it is still active. */ public void locked(RunId id, UnaryOperator<Run> modifications) { try (Mutex __ = curator.lock(id.application(), id.type())) { active(id).ifPresent(run -> { Run modified = modifications.apply(run); if (modified != null) curator.writeLastRun(modified); }); } } /** Locks the given step and checks none of its prerequisites are running, then performs the given actions. */ public void locked(ApplicationId id, JobType type, Step step, Consumer<LockedStep> action) throws TimeoutException { try (Mutex lock = curator.lock(id, type, step)) { for (Step prerequisite : step.allPrerequisites(last(id, type).get().steps().keySet())) try (Mutex __ = curator.lock(id, type, prerequisite)) { ; } action.accept(new LockedStep(lock, step)); } } }
It's a property of the application package, which I think holds for all the submission warnings, so submission is just created as a workaround to this problem? It doesn't seem like we have this entirely right - why keep warnings associated with the application package around if they are not generated by the current version of it? And given that why should code emitting individual warnings clear a _category_ of them? I think we should - Combine applicationPackage and submission to one category, unless we really have warnings associated with the parameters of the submission. - Always clear this category when a new submission is received. - Remove any code clearing warnings from individual validations.
private void validateMajorVersion(TenantAndApplicationId id, Submission submission) { submission.applicationPackage().deploymentSpec().majorVersion().ifPresent(explicitMajor -> { if (explicitMajor < 8) controller.notificationsDb().setNotification(NotificationSource.from(id), Type.applicationPackage, Notification.Level.warning, "Vespa 7 will soon be end of life, upgrade to Vespa 8 now:" + "https: else controller.notificationsDb().removeNotification(NotificationSource.from(id), Type.applicationPackage); }); }
controller.notificationsDb().setNotification(NotificationSource.from(id),
private void validateMajorVersion(TenantAndApplicationId id, Submission submission) { submission.applicationPackage().deploymentSpec().majorVersion().ifPresent(explicitMajor -> { if (explicitMajor < 8) controller.notificationsDb().setNotification(NotificationSource.from(id), Type.applicationPackage, Notification.Level.warning, "Vespa 7 will soon be end of life, upgrade to Vespa 8 now:" + "https: else controller.notificationsDb().removeNotification(NotificationSource.from(id), Type.applicationPackage); }); }
class JobController { public static final Duration maxHistoryAge = Duration.ofDays(60); private static final Logger log = Logger.getLogger(JobController.class.getName()); private final int historyLength; private final Controller controller; private final CuratorDb curator; private final BufferedLogStore logs; private final TesterCloud cloud; private final JobMetrics metric; private final AtomicReference<Consumer<Run>> runner = new AtomicReference<>(__ -> { }); public JobController(Controller controller) { this.historyLength = controller.system().isCd() ? 256 : 64; this.controller = controller; this.curator = controller.curator(); this.logs = new BufferedLogStore(curator, controller.serviceRegistry().runDataStore()); this.cloud = controller.serviceRegistry().testerCloud(); this.metric = new JobMetrics(controller.metric(), controller::system); } public TesterCloud cloud() { return cloud; } public int historyLength() { return historyLength; } public void setRunner(Consumer<Run> runner) { this.runner.set(runner); } /** Rewrite all job data with the newest format. */ public void updateStorage() { for (ApplicationId id : instances()) for (JobType type : jobs(id)) { locked(id, type, runs -> { curator.readLastRun(id, type).ifPresent(curator::writeLastRun); }); } } /** Returns all entries currently logged for the given run. */ public Optional<RunLog> details(RunId id) { return details(id, -1); } /** Returns the logged entries for the given run, which are after the given id threshold. */ public Optional<RunLog> details(RunId id, long after) { try (Mutex __ = curator.lock(id.application(), id.type())) { Run run = runs(id.application(), id.type()).get(id); if (run == null) return Optional.empty(); return active(id).isPresent() ? Optional.of(logs.readActive(id.application(), id.type(), after)) : logs.readFinished(id, after); } } /** Stores the given log entries for the given run and step. */ public void log(RunId id, Step step, List<LogEntry> entries) { locked(id, __ -> { logs.append(id.application(), id.type(), step, entries, true); return __; }); } /** Stores the given log messages for the given run and step. */ public void log(RunId id, Step step, Level level, List<String> messages) { log(id, step, messages.stream() .map(message -> new LogEntry(0, controller.clock().instant(), LogEntry.typeOf(level), message)) .collect(toList())); } /** Stores the given log message for the given run and step. */ public void log(RunId id, Step step, Level level, String message) { log(id, step, level, Collections.singletonList(message)); } /** Fetches any new Vespa log entries, and records the timestamp of the last of these, for continuation. */ public void updateVespaLog(RunId id) { locked(id, run -> { if ( ! run.hasStep(copyVespaLogs)) return run; storeVespaLogs(id); ZoneId zone = id.type().zone(); Optional<Deployment> deployment = Optional.ofNullable(controller.applications().requireInstance(id.application()) .deployments().get(zone)); if (deployment.isEmpty() || deployment.get().at().isBefore(run.start())) return run; List<LogEntry> log; Instant deployedAt; Instant from; if ( ! run.id().type().isProduction()) { deployedAt = run.stepInfo(installInitialReal).or(() -> run.stepInfo(installReal)).flatMap(StepInfo::startTime).orElseThrow(); from = run.lastVespaLogTimestamp().isAfter(run.start()) ? run.lastVespaLogTimestamp() : deployedAt.minusSeconds(10); log = LogEntry.parseVespaLog(controller.serviceRegistry().configServer() .getLogs(new DeploymentId(id.application(), zone), Map.of("from", Long.toString(from.toEpochMilli()))), from); } else log = List.of(); if (id.type().isTest()) { deployedAt = run.stepInfo(installTester).flatMap(StepInfo::startTime).orElseThrow(); from = run.lastVespaLogTimestamp().isAfter(run.start()) ? run.lastVespaLogTimestamp() : deployedAt.minusSeconds(10); List<LogEntry> testerLog = LogEntry.parseVespaLog(controller.serviceRegistry().configServer() .getLogs(new DeploymentId(id.tester().id(), zone), Map.of("from", Long.toString(from.toEpochMilli()))), from); Instant justNow = controller.clock().instant().minusSeconds(2); log = Stream.concat(log.stream(), testerLog.stream()) .filter(entry -> entry.at().isBefore(justNow)) .sorted(comparing(LogEntry::at)) .collect(toUnmodifiableList()); } if (log.isEmpty()) return run; logs.append(id.application(), id.type(), Step.copyVespaLogs, log, false); return run.with(log.get(log.size() - 1).at()); }); } public InputStream getVespaLogs(RunId id, long fromMillis, boolean tester) { Run run = run(id); return run.stepStatus(copyVespaLogs).map(succeeded::equals).orElse(false) ? controller.serviceRegistry().runDataStore().getLogs(id, tester) : getVespaLogsFromLogserver(run, fromMillis, tester).orElse(InputStream.nullInputStream()); } public static Optional<Instant> deploymentCompletedAt(Run run, boolean tester) { return (tester ? run.stepInfo(installTester) : run.stepInfo(installInitialReal).or(() -> run.stepInfo(installReal))) .flatMap(StepInfo::startTime).map(start -> start.minusSeconds(10)); } public void storeVespaLogs(RunId id) { Run run = run(id); if ( ! id.type().isProduction()) { getVespaLogsFromLogserver(run, 0, false).ifPresent(logs -> { try (logs) { controller.serviceRegistry().runDataStore().putLogs(id, false, logs); } catch (IOException e) { throw new UncheckedIOException(e); } }); } if (id.type().isTest()) { getVespaLogsFromLogserver(run, 0, true).ifPresent(logs -> { try (logs) { controller.serviceRegistry().runDataStore().putLogs(id, true, logs); } catch(IOException e){ throw new UncheckedIOException(e); } }); } } private Optional<InputStream> getVespaLogsFromLogserver(Run run, long fromMillis, boolean tester) { return deploymentCompletedAt(run, tester).map(at -> controller.serviceRegistry().configServer().getLogs(new DeploymentId(tester ? run.id().tester().id() : run.id().application(), run.id().type().zone()), Map.of("from", Long.toString(Math.max(fromMillis, at.toEpochMilli())), "to", Long.toString(run.end().orElse(controller.clock().instant()).toEpochMilli())))); } /** Fetches any new test log entries, and records the id of the last of these, for continuation. */ public void updateTestLog(RunId id) { locked(id, run -> { Optional<Step> step = Stream.of(endStagingSetup, endTests) .filter(run.readySteps()::contains) .findAny(); if (step.isEmpty()) return run; List<LogEntry> entries = cloud.getLog(new DeploymentId(id.tester().id(), id.type().zone()), run.lastTestLogEntry()); if (entries.isEmpty()) return run; logs.append(id.application(), id.type(), step.get(), entries, false); return run.with(entries.stream().mapToLong(LogEntry::id).max().getAsLong()); }); } public void updateTestReport(RunId id) { locked(id, run -> { Optional<TestReport> report = cloud.getTestReport(new DeploymentId(id.tester().id(), id.type().zone())); if (report.isEmpty()) { return run; } logs.writeTestReport(id, report.get()); return run; }); } public Optional<String> getTestReports(RunId id) { return logs.readTestReports(id); } /** Stores the given certificate as the tester certificate for this run, or throws if it's already set. */ public void storeTesterCertificate(RunId id, X509Certificate testerCertificate) { locked(id, run -> run.with(testerCertificate)); } /** Returns a list of all instances of applications which have registered. */ public List<ApplicationId> instances() { return controller.applications().readable().stream() .flatMap(application -> application.instances().values().stream()) .map(Instance::id) .collect(toUnmodifiableList()); } /** Returns all job types which have been run for the given application. */ private List<JobType> jobs(ApplicationId id) { return JobType.allIn(controller.zoneRegistry()).stream() .filter(type -> last(id, type).isPresent()) .collect(toUnmodifiableList()); } /** Returns an immutable map of all known runs for the given application and job type. */ public NavigableMap<RunId, Run> runs(JobId id) { return runs(id.application(), id.type()); } /** Lists the start time of non-redeployment runs of the given job, in order of increasing age. */ public List<Instant> jobStarts(JobId id) { return runs(id).descendingMap().values().stream() .filter(run -> ! run.isRedeployment()) .map(Run::start) .collect(toUnmodifiableList()); } /** Returns when given deployment last started deploying, falling back to time of deployment if it cannot be determined from job runs */ public Instant lastDeploymentStart(ApplicationId instanceId, Deployment deployment) { return jobStarts(new JobId(instanceId, JobType.deploymentTo(deployment.zone()))).stream() .findFirst() .orElseGet(deployment::at); } /** Returns an immutable map of all known runs for the given application and job type. */ public NavigableMap<RunId, Run> runs(ApplicationId id, JobType type) { ImmutableSortedMap.Builder<RunId, Run> runs = ImmutableSortedMap.orderedBy(Comparator.comparing(RunId::number)); Optional<Run> last = last(id, type); curator.readHistoricRuns(id, type).forEach((runId, run) -> { if (last.isEmpty() || ! runId.equals(last.get().id())) runs.put(runId, run); }); last.ifPresent(run -> runs.put(run.id(), run)); return runs.build(); } /** Returns the run with the given id, or throws if no such run exists. */ public Run run(RunId id) { return runs(id.application(), id.type()).values().stream() .filter(run -> run.id().equals(id)) .findAny() .orElseThrow(() -> new NoSuchElementException("no run with id '" + id + "' exists")); } /** Returns the last run of the given type, for the given application, if one has been run. */ public Optional<Run> last(JobId job) { return curator.readLastRun(job.application(), job.type()); } /** Returns the last run of the given type, for the given application, if one has been run. */ public Optional<Run> last(ApplicationId id, JobType type) { return curator.readLastRun(id, type); } /** Returns the last completed of the given job. */ public Optional<Run> lastCompleted(JobId id) { return JobStatus.lastCompleted(runs(id)); } /** Returns the first failing of the given job. */ public Optional<Run> firstFailing(JobId id) { return JobStatus.firstFailing(runs(id)); } /** Returns the last success of the given job. */ public Optional<Run> lastSuccess(JobId id) { return JobStatus.lastSuccess(runs(id)); } /** Returns the run with the given id, provided it is still active. */ public Optional<Run> active(RunId id) { return last(id.application(), id.type()) .filter(run -> ! run.hasEnded()) .filter(run -> run.id().equals(id)); } /** Returns a list of all active runs. */ public List<Run> active() { return controller.applications().idList().stream() .flatMap(id -> active(id).stream()) .toList(); } /** Returns a list of all active runs for the given application. */ public List<Run> active(TenantAndApplicationId id) { return controller.applications().requireApplication(id).instances().keySet().stream() .flatMap(name -> JobType.allIn(controller.zoneRegistry()).stream() .map(type -> last(id.instance(name), type)) .flatMap(Optional::stream) .filter(run -> ! run.hasEnded())) .toList(); } /** Returns a list of all active runs for the given instance. */ public List<Run> active(ApplicationId id) { return JobType.allIn(controller.zoneRegistry()).stream() .map(type -> last(id, type)) .flatMap(Optional::stream) .filter(run -> !run.hasEnded()) .toList(); } /** Returns the job status of the given job, possibly empty. */ public JobStatus jobStatus(JobId id) { return new JobStatus(id, runs(id)); } /** Returns the deployment status of the given application. */ public DeploymentStatus deploymentStatus(Application application) { VersionStatus versionStatus = controller.readVersionStatus(); return deploymentStatus(application, versionStatus, controller.systemVersion(versionStatus)); } private DeploymentStatus deploymentStatus(Application application, VersionStatus versionStatus, Version systemVersion) { return new DeploymentStatus(application, this::jobStatus, controller.zoneRegistry(), versionStatus, systemVersion, instance -> controller.applications().versionCompatibility(application.id().instance(instance)), controller.clock().instant()); } /** Adds deployment status to each of the given applications. */ public DeploymentStatusList deploymentStatuses(ApplicationList applications, VersionStatus versionStatus) { Version systemVersion = controller.systemVersion(versionStatus); return DeploymentStatusList.from(applications.asList().stream() .map(application -> deploymentStatus(application, versionStatus, systemVersion)) .toList()); } /** Adds deployment status to each of the given applications. Calling this will do an implicit read of the controller's version status */ public DeploymentStatusList deploymentStatuses(ApplicationList applications) { VersionStatus versionStatus = controller.readVersionStatus(); return deploymentStatuses(applications, versionStatus); } /** Changes the status of the given step, for the given run, provided it is still active. */ public void update(RunId id, RunStatus status, LockedStep step) { locked(id, run -> run.with(status, step)); } /** * Changes the status of the given run to inactive, and stores it as a historic run. * Throws TimeoutException if some step in this job is still being run. */ public void finish(RunId id) throws TimeoutException { Deque<Mutex> locks = new ArrayDeque<>(); try { Run unlockedRun = run(id); locks.push(curator.lock(id.application(), id.type(), report)); for (Step step : report.allPrerequisites(unlockedRun.steps().keySet())) locks.push(curator.lock(id.application(), id.type(), step)); locked(id, run -> { if (run.status() == reset) { for (Step step : run.steps().keySet()) log(id, step, INFO, List.of(" return run.reset(); } if (run.status() == running && run.stepStatuses().values().stream().anyMatch(not(succeeded::equals))) return run; Run finishedRun = run.finished(controller.clock().instant()); locked(id.application(), id.type(), runs -> { runs.put(run.id(), finishedRun); long last = id.number(); long successes = runs.values().stream().filter(Run::hasSucceeded).count(); var oldEntries = runs.entrySet().iterator(); for (var old = oldEntries.next(); old.getKey().number() <= last - historyLength || old.getValue().start().isBefore(controller.clock().instant().minus(maxHistoryAge)); old = oldEntries.next()) { if ( successes == 1 && old.getValue().hasSucceeded() && ! old.getValue().start().isBefore(controller.clock().instant().minus(maxHistoryAge))) { oldEntries.next(); continue; } logs.delete(old.getKey()); oldEntries.remove(); } }); logs.flush(id); metric.jobFinished(run.id().job(), finishedRun.status()); pruneRevisions(unlockedRun); return finishedRun; }); } finally { for (Mutex lock : locks) { try { lock.close(); } catch (Throwable t) { log.log(WARNING, "Failed to close the lock " + lock + ": the lock may or may not " + "have been released in ZooKeeper, and if not this controller " + "must be restarted to release the lock", t); } } } } /** Marks the given run as aborted; no further normal steps will run, but run-always steps will try to succeed. */ public void abort(RunId id, String reason) { locked(id, run -> { run.stepStatuses().entrySet().stream() .filter(entry -> entry.getValue() == unfinished) .forEach(entry -> log(id, entry.getKey(), INFO, "Aborting run: " + reason)); return run.aborted(); }); } /** Accepts and stores a new application package and test jar pair under a generated application version key. */ public ApplicationVersion submit(TenantAndApplicationId id, Submission submission, long projectId) { ApplicationController applications = controller.applications(); AtomicReference<ApplicationVersion> version = new AtomicReference<>(); applications.lockApplicationOrThrow(id, application -> { Optional<ApplicationVersion> previousVersion = application.get().revisions().last(); Optional<ApplicationPackage> previousPackage = previousVersion.flatMap(previous -> applications.applicationStore().find(id.tenant(), id.application(), previous.buildNumber().getAsLong())) .map(ApplicationPackage::new); long previousBuild = previousVersion.map(latestVersion -> latestVersion.buildNumber().getAsLong()).orElse(0L); version.set(submission.toApplicationVersion(1 + previousBuild)); byte[] diff = previousPackage.map(previous -> ApplicationPackageDiff.diff(previous, submission.applicationPackage())) .orElseGet(() -> ApplicationPackageDiff.diffAgainstEmpty(submission.applicationPackage())); applications.applicationStore().put(id.tenant(), id.application(), version.get().id(), submission.applicationPackage().zippedContent(), submission.testPackage(), diff); applications.applicationStore().putMeta(id.tenant(), id.application(), controller.clock().instant(), submission.applicationPackage().metaDataZip()); application = application.withProjectId(projectId == -1 ? OptionalLong.empty() : OptionalLong.of(projectId)); application = application.withRevisions(revisions -> revisions.with(version.get())); application = withPrunedPackages(application, version.get().id()); validate(id, submission); applications.storeWithUpdatedConfig(application, submission.applicationPackage()); if (application.get().projectId().isPresent()) applications.deploymentTrigger().triggerNewRevision(id); }); return version.get(); } private void validate(TenantAndApplicationId id, Submission submission) { validateTests(id, submission); validateParentVersion(id, submission); validateMajorVersion(id, submission); } private void validateTests(TenantAndApplicationId id, Submission submission) { TestSummary testSummary = TestPackage.validateTests(submission.applicationPackage().deploymentSpec(), submission.testPackage()); if (testSummary.problems().isEmpty()) controller.notificationsDb().removeNotification(NotificationSource.from(id), Type.testPackage); else controller.notificationsDb().setNotification(NotificationSource.from(id), Type.testPackage, Notification.Level.warning, testSummary.problems()); } private void validateParentVersion(TenantAndApplicationId id, Submission submission) { submission.applicationPackage().parentVersion().ifPresent(parent -> { if (parent.getMajor() < controller.readSystemVersion().getMajor()) controller.notificationsDb().setNotification(NotificationSource.from(id), Type.submission, Notification.Level.warning, "Parent version used to compile the application is on a " + "lower major version than the current Vespa Cloud version"); else controller.notificationsDb().removeNotification(NotificationSource.from(id), Type.submission); }); } private LockedApplication withPrunedPackages(LockedApplication application, RevisionId latest){ TenantAndApplicationId id = application.get().id(); Application wrapped = application.get(); RevisionId oldestDeployed = application.get().oldestDeployedRevision() .or(() -> wrapped.instances().values().stream() .flatMap(instance -> instance.change().revision().stream()) .min(naturalOrder())) .orElse(latest); controller.applications().applicationStore().prune(id.tenant(), id.application(), oldestDeployed); for (ApplicationVersion version : application.get().revisions().withPackage()) if (version.id().compareTo(oldestDeployed) < 0) application = application.withRevisions(revisions -> revisions.with(version.withoutPackage())); return application; } /** Forget revisions no longer present in any relevant job history. */ private void pruneRevisions(Run run) { TenantAndApplicationId applicationId = TenantAndApplicationId.from(run.id().application()); boolean isProduction = run.versions().targetRevision().isProduction(); (isProduction ? deploymentStatus(controller.applications().requireApplication(applicationId)).jobs().asList().stream() : Stream.of(jobStatus(run.id().job()))) .flatMap(jobs -> jobs.runs().values().stream()) .map(r -> r.versions().targetRevision()) .filter(id -> id.isProduction() == isProduction) .min(naturalOrder()) .ifPresent(oldestRevision -> { controller.applications().lockApplicationOrThrow(applicationId, application -> { if (isProduction) { controller.applications().applicationStore().pruneDiffs(run.id().application().tenant(), run.id().application().application(), oldestRevision.number()); controller.applications().store(application.withRevisions(revisions -> revisions.withoutOlderThan(oldestRevision))); } else { controller.applications().applicationStore().pruneDevDiffs(new DeploymentId(run.id().application(), run.id().job().type().zone()), oldestRevision.number()); controller.applications().store(application.withRevisions(revisions -> revisions.withoutOlderThan(oldestRevision, run.id().job()))); } }); }); } /** Orders a run of the given type, or throws an IllegalStateException if that job type is already running. */ public void start(ApplicationId id, JobType type, Versions versions, boolean isRedeployment, Optional<String> reason) { start(id, type, versions, isRedeployment, JobProfile.of(type), reason); } /** Orders a run of the given type, or throws an IllegalStateException if that job type is already running. */ public void start(ApplicationId id, JobType type, Versions versions, boolean isRedeployment, JobProfile profile, Optional<String> reason) { ApplicationVersion revision = controller.applications().requireApplication(TenantAndApplicationId.from(id)).revisions().get(versions.targetRevision()); if (revision.compileVersion() .map(version -> controller.applications().versionCompatibility(id).refuse(versions.targetPlatform(), version)) .orElse(false)) throw new IllegalArgumentException("Will not start " + type + " for " + id + " with incompatible platform version (" + versions.targetPlatform() + ") " + "and compile versions (" + revision.compileVersion().get() + ")"); locked(id, type, __ -> { Optional<Run> last = last(id, type); if (last.flatMap(run -> active(run.id())).isPresent()) throw new IllegalArgumentException("Cannot start " + type + " for " + id + "; it is already running!"); RunId newId = new RunId(id, type, last.map(run -> run.id().number()).orElse(0L) + 1); curator.writeLastRun(Run.initial(newId, versions, isRedeployment, controller.clock().instant(), profile, reason)); metric.jobStarted(newId.job()); }); } /** Stores the given package and starts a deployment of it, after aborting any such ongoing deployment. */ public void deploy(ApplicationId id, JobType type, Optional<Version> platform, ApplicationPackage applicationPackage) { deploy(id, type, platform, applicationPackage, false); } /** Stores the given package and starts a deployment of it, after aborting any such ongoing deployment.*/ public void deploy(ApplicationId id, JobType type, Optional<Version> platform, ApplicationPackage applicationPackage, boolean dryRun) { if ( ! controller.zoneRegistry().hasZone(type.zone())) throw new IllegalArgumentException(type.zone() + " is not present in this system"); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { if ( ! application.get().instances().containsKey(id.instance())) application = controller.applications().withNewInstance(application, id); controller.applications().store(application); }); DeploymentId deploymentId = new DeploymentId(id, type.zone()); Optional<Run> lastRun = last(id, type); lastRun.filter(run -> ! run.hasEnded()).ifPresent(run -> abortAndWait(run.id(), Duration.ofMinutes(2))); long build = 1 + lastRun.map(run -> run.versions().targetRevision().number()).orElse(0L); RevisionId revisionId = RevisionId.forDevelopment(build, new JobId(id, type)); ApplicationVersion version = ApplicationVersion.forDevelopment(revisionId, applicationPackage.compileVersion()); byte[] diff = getDiff(applicationPackage, deploymentId, lastRun); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { controller.applications().applicationStore().putDev(deploymentId, version.id(), applicationPackage.zippedContent(), diff); Version targetPlatform = platform.orElseGet(() -> findTargetPlatform(applicationPackage, deploymentId, application.get().get(id.instance()))); controller.applications().store(application.withRevisions(revisions -> revisions.with(version))); start(id, type, new Versions(targetPlatform, version.id(), lastRun.map(run -> run.versions().targetPlatform()), lastRun.map(run -> run.versions().targetRevision())), false, dryRun ? JobProfile.developmentDryRun : JobProfile.development, Optional.empty()); }); locked(id, type, __ -> { runner.get().accept(last(id, type).get()); }); } /* Application package diff against previous version, or against empty version if previous does not exist or is invalid */ private byte[] getDiff(ApplicationPackage applicationPackage, DeploymentId deploymentId, Optional<Run> lastRun) { return lastRun.map(run -> run.versions().targetRevision()) .map(prevVersion -> { ApplicationPackage previous; try { previous = new ApplicationPackage(controller.applications().applicationStore().get(deploymentId, prevVersion)); } catch (IllegalArgumentException e) { return ApplicationPackageDiff.diffAgainstEmpty(applicationPackage); } return ApplicationPackageDiff.diff(previous, applicationPackage); }) .orElseGet(() -> ApplicationPackageDiff.diffAgainstEmpty(applicationPackage)); } private Version findTargetPlatform(ApplicationPackage applicationPackage, DeploymentId id, Optional<Instance> instance) { List<Version> versions = controller.readVersionStatus().deployableVersions().stream() .map(VespaVersion::versionNumber) .collect(toList()); instance.map(Instance::deployments) .map(deployments -> deployments.get(id.zoneId())) .map(Deployment::version) .ifPresent(versions::add); if (versions.isEmpty()) throw new IllegalStateException("no deployable platform version found in the system"); VersionCompatibility compatibility = controller.applications().versionCompatibility(id.applicationId()); List<Version> compatibleVersions = new ArrayList<>(); for (Version target : reversed(versions)) if (applicationPackage.compileVersion().isEmpty() || compatibility.accept(target, applicationPackage.compileVersion().get())) compatibleVersions.add(target); if (compatibleVersions.isEmpty()) throw new IllegalArgumentException("no platforms are compatible with compile version " + applicationPackage.compileVersion().get()); Optional<Integer> major = applicationPackage.deploymentSpec().majorVersion(); List<Version> versionOnRightMajor = new ArrayList<>(); for (Version target : reversed(versions)) if (major.isEmpty() || major.get() == target.getMajor()) versionOnRightMajor.add(target); if (versionOnRightMajor.isEmpty()) throw new IllegalArgumentException("no platforms were found for major version " + major.get() + " specified in deployment.xml"); for (Version target : compatibleVersions) if (versionOnRightMajor.contains(target)) return target; throw new IllegalArgumentException("no platforms on major version " + major.get() + " specified in deployment.xml " + "are compatible with compile version " + applicationPackage.compileVersion().get()); } /** Aborts a run and waits for it complete. */ private void abortAndWait(RunId id, Duration timeout) { abort(id, "replaced by new deployment"); runner.get().accept(last(id.application(), id.type()).get()); Instant doom = controller.clock().instant().plus(timeout); Duration sleep = Duration.ofMillis(100); while ( ! last(id.application(), id.type()).get().hasEnded()) { if (controller.clock().instant().plus(sleep).isAfter(doom)) throw new UncheckedTimeoutException("timeout waiting for " + id + " to abort and finish"); try { Thread.sleep(sleep.toMillis()); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new RuntimeException(e); } } } /** Deletes run data and tester deployments for applications which are unknown, or no longer built internally. */ public void collectGarbage() { Set<ApplicationId> applicationsToBuild = new HashSet<>(instances()); curator.applicationsWithJobs().stream() .filter(id -> ! applicationsToBuild.contains(id)) .forEach(id -> { try { TesterId tester = TesterId.of(id); for (JobType type : jobs(id)) locked(id, type, deactivateTester, __ -> { try (Mutex ___ = curator.lock(id, type)) { try { deactivateTester(tester, type); } catch (Exception e) { } curator.deleteRunData(id, type); } }); logs.delete(id); curator.deleteRunData(id); } catch (Exception e) { log.log(WARNING, "failed cleaning up after deleted application", e); } }); } public void deactivateTester(TesterId id, JobType type) { controller.serviceRegistry().configServer().deactivate(new DeploymentId(id.id(), type.zone())); } /** Locks all runs and modifies the list of historic runs for the given application and job type. */ private void locked(ApplicationId id, JobType type, Consumer<SortedMap<RunId, Run>> modifications) { try (Mutex __ = curator.lock(id, type)) { SortedMap<RunId, Run> runs = new TreeMap<>(curator.readHistoricRuns(id, type)); modifications.accept(runs); curator.writeHistoricRuns(id, type, runs.values()); } } /** Locks and modifies the run with the given id, provided it is still active. */ public void locked(RunId id, UnaryOperator<Run> modifications) { try (Mutex __ = curator.lock(id.application(), id.type())) { active(id).ifPresent(run -> { Run modified = modifications.apply(run); if (modified != null) curator.writeLastRun(modified); }); } } /** Locks the given step and checks none of its prerequisites are running, then performs the given actions. */ public void locked(ApplicationId id, JobType type, Step step, Consumer<LockedStep> action) throws TimeoutException { try (Mutex lock = curator.lock(id, type, step)) { for (Step prerequisite : step.allPrerequisites(last(id, type).get().steps().keySet())) try (Mutex __ = curator.lock(id, type, prerequisite)) { ; } action.accept(new LockedStep(lock, step)); } } }
class JobController { public static final Duration maxHistoryAge = Duration.ofDays(60); private static final Logger log = Logger.getLogger(JobController.class.getName()); private final int historyLength; private final Controller controller; private final CuratorDb curator; private final BufferedLogStore logs; private final TesterCloud cloud; private final JobMetrics metric; private final AtomicReference<Consumer<Run>> runner = new AtomicReference<>(__ -> { }); public JobController(Controller controller) { this.historyLength = controller.system().isCd() ? 256 : 64; this.controller = controller; this.curator = controller.curator(); this.logs = new BufferedLogStore(curator, controller.serviceRegistry().runDataStore()); this.cloud = controller.serviceRegistry().testerCloud(); this.metric = new JobMetrics(controller.metric(), controller::system); } public TesterCloud cloud() { return cloud; } public int historyLength() { return historyLength; } public void setRunner(Consumer<Run> runner) { this.runner.set(runner); } /** Rewrite all job data with the newest format. */ public void updateStorage() { for (ApplicationId id : instances()) for (JobType type : jobs(id)) { locked(id, type, runs -> { curator.readLastRun(id, type).ifPresent(curator::writeLastRun); }); } } /** Returns all entries currently logged for the given run. */ public Optional<RunLog> details(RunId id) { return details(id, -1); } /** Returns the logged entries for the given run, which are after the given id threshold. */ public Optional<RunLog> details(RunId id, long after) { try (Mutex __ = curator.lock(id.application(), id.type())) { Run run = runs(id.application(), id.type()).get(id); if (run == null) return Optional.empty(); return active(id).isPresent() ? Optional.of(logs.readActive(id.application(), id.type(), after)) : logs.readFinished(id, after); } } /** Stores the given log entries for the given run and step. */ public void log(RunId id, Step step, List<LogEntry> entries) { locked(id, __ -> { logs.append(id.application(), id.type(), step, entries, true); return __; }); } /** Stores the given log messages for the given run and step. */ public void log(RunId id, Step step, Level level, List<String> messages) { log(id, step, messages.stream() .map(message -> new LogEntry(0, controller.clock().instant(), LogEntry.typeOf(level), message)) .collect(toList())); } /** Stores the given log message for the given run and step. */ public void log(RunId id, Step step, Level level, String message) { log(id, step, level, Collections.singletonList(message)); } /** Fetches any new Vespa log entries, and records the timestamp of the last of these, for continuation. */ public void updateVespaLog(RunId id) { locked(id, run -> { if ( ! run.hasStep(copyVespaLogs)) return run; storeVespaLogs(id); ZoneId zone = id.type().zone(); Optional<Deployment> deployment = Optional.ofNullable(controller.applications().requireInstance(id.application()) .deployments().get(zone)); if (deployment.isEmpty() || deployment.get().at().isBefore(run.start())) return run; List<LogEntry> log; Instant deployedAt; Instant from; if ( ! run.id().type().isProduction()) { deployedAt = run.stepInfo(installInitialReal).or(() -> run.stepInfo(installReal)).flatMap(StepInfo::startTime).orElseThrow(); from = run.lastVespaLogTimestamp().isAfter(run.start()) ? run.lastVespaLogTimestamp() : deployedAt.minusSeconds(10); log = LogEntry.parseVespaLog(controller.serviceRegistry().configServer() .getLogs(new DeploymentId(id.application(), zone), Map.of("from", Long.toString(from.toEpochMilli()))), from); } else log = List.of(); if (id.type().isTest()) { deployedAt = run.stepInfo(installTester).flatMap(StepInfo::startTime).orElseThrow(); from = run.lastVespaLogTimestamp().isAfter(run.start()) ? run.lastVespaLogTimestamp() : deployedAt.minusSeconds(10); List<LogEntry> testerLog = LogEntry.parseVespaLog(controller.serviceRegistry().configServer() .getLogs(new DeploymentId(id.tester().id(), zone), Map.of("from", Long.toString(from.toEpochMilli()))), from); Instant justNow = controller.clock().instant().minusSeconds(2); log = Stream.concat(log.stream(), testerLog.stream()) .filter(entry -> entry.at().isBefore(justNow)) .sorted(comparing(LogEntry::at)) .collect(toUnmodifiableList()); } if (log.isEmpty()) return run; logs.append(id.application(), id.type(), Step.copyVespaLogs, log, false); return run.with(log.get(log.size() - 1).at()); }); } public InputStream getVespaLogs(RunId id, long fromMillis, boolean tester) { Run run = run(id); return run.stepStatus(copyVespaLogs).map(succeeded::equals).orElse(false) ? controller.serviceRegistry().runDataStore().getLogs(id, tester) : getVespaLogsFromLogserver(run, fromMillis, tester).orElse(InputStream.nullInputStream()); } public static Optional<Instant> deploymentCompletedAt(Run run, boolean tester) { return (tester ? run.stepInfo(installTester) : run.stepInfo(installInitialReal).or(() -> run.stepInfo(installReal))) .flatMap(StepInfo::startTime).map(start -> start.minusSeconds(10)); } public void storeVespaLogs(RunId id) { Run run = run(id); if ( ! id.type().isProduction()) { getVespaLogsFromLogserver(run, 0, false).ifPresent(logs -> { try (logs) { controller.serviceRegistry().runDataStore().putLogs(id, false, logs); } catch (IOException e) { throw new UncheckedIOException(e); } }); } if (id.type().isTest()) { getVespaLogsFromLogserver(run, 0, true).ifPresent(logs -> { try (logs) { controller.serviceRegistry().runDataStore().putLogs(id, true, logs); } catch(IOException e){ throw new UncheckedIOException(e); } }); } } private Optional<InputStream> getVespaLogsFromLogserver(Run run, long fromMillis, boolean tester) { return deploymentCompletedAt(run, tester).map(at -> controller.serviceRegistry().configServer().getLogs(new DeploymentId(tester ? run.id().tester().id() : run.id().application(), run.id().type().zone()), Map.of("from", Long.toString(Math.max(fromMillis, at.toEpochMilli())), "to", Long.toString(run.end().orElse(controller.clock().instant()).toEpochMilli())))); } /** Fetches any new test log entries, and records the id of the last of these, for continuation. */ public void updateTestLog(RunId id) { locked(id, run -> { Optional<Step> step = Stream.of(endStagingSetup, endTests) .filter(run.readySteps()::contains) .findAny(); if (step.isEmpty()) return run; List<LogEntry> entries = cloud.getLog(new DeploymentId(id.tester().id(), id.type().zone()), run.lastTestLogEntry()); if (entries.isEmpty()) return run; logs.append(id.application(), id.type(), step.get(), entries, false); return run.with(entries.stream().mapToLong(LogEntry::id).max().getAsLong()); }); } public void updateTestReport(RunId id) { locked(id, run -> { Optional<TestReport> report = cloud.getTestReport(new DeploymentId(id.tester().id(), id.type().zone())); if (report.isEmpty()) { return run; } logs.writeTestReport(id, report.get()); return run; }); } public Optional<String> getTestReports(RunId id) { return logs.readTestReports(id); } /** Stores the given certificate as the tester certificate for this run, or throws if it's already set. */ public void storeTesterCertificate(RunId id, X509Certificate testerCertificate) { locked(id, run -> run.with(testerCertificate)); } /** Returns a list of all instances of applications which have registered. */ public List<ApplicationId> instances() { return controller.applications().readable().stream() .flatMap(application -> application.instances().values().stream()) .map(Instance::id) .collect(toUnmodifiableList()); } /** Returns all job types which have been run for the given application. */ private List<JobType> jobs(ApplicationId id) { return JobType.allIn(controller.zoneRegistry()).stream() .filter(type -> last(id, type).isPresent()) .collect(toUnmodifiableList()); } /** Returns an immutable map of all known runs for the given application and job type. */ public NavigableMap<RunId, Run> runs(JobId id) { return runs(id.application(), id.type()); } /** Lists the start time of non-redeployment runs of the given job, in order of increasing age. */ public List<Instant> jobStarts(JobId id) { return runs(id).descendingMap().values().stream() .filter(run -> ! run.isRedeployment()) .map(Run::start) .collect(toUnmodifiableList()); } /** Returns when given deployment last started deploying, falling back to time of deployment if it cannot be determined from job runs */ public Instant lastDeploymentStart(ApplicationId instanceId, Deployment deployment) { return jobStarts(new JobId(instanceId, JobType.deploymentTo(deployment.zone()))).stream() .findFirst() .orElseGet(deployment::at); } /** Returns an immutable map of all known runs for the given application and job type. */ public NavigableMap<RunId, Run> runs(ApplicationId id, JobType type) { ImmutableSortedMap.Builder<RunId, Run> runs = ImmutableSortedMap.orderedBy(Comparator.comparing(RunId::number)); Optional<Run> last = last(id, type); curator.readHistoricRuns(id, type).forEach((runId, run) -> { if (last.isEmpty() || ! runId.equals(last.get().id())) runs.put(runId, run); }); last.ifPresent(run -> runs.put(run.id(), run)); return runs.build(); } /** Returns the run with the given id, or throws if no such run exists. */ public Run run(RunId id) { return runs(id.application(), id.type()).values().stream() .filter(run -> run.id().equals(id)) .findAny() .orElseThrow(() -> new NoSuchElementException("no run with id '" + id + "' exists")); } /** Returns the last run of the given type, for the given application, if one has been run. */ public Optional<Run> last(JobId job) { return curator.readLastRun(job.application(), job.type()); } /** Returns the last run of the given type, for the given application, if one has been run. */ public Optional<Run> last(ApplicationId id, JobType type) { return curator.readLastRun(id, type); } /** Returns the last completed of the given job. */ public Optional<Run> lastCompleted(JobId id) { return JobStatus.lastCompleted(runs(id)); } /** Returns the first failing of the given job. */ public Optional<Run> firstFailing(JobId id) { return JobStatus.firstFailing(runs(id)); } /** Returns the last success of the given job. */ public Optional<Run> lastSuccess(JobId id) { return JobStatus.lastSuccess(runs(id)); } /** Returns the run with the given id, provided it is still active. */ public Optional<Run> active(RunId id) { return last(id.application(), id.type()) .filter(run -> ! run.hasEnded()) .filter(run -> run.id().equals(id)); } /** Returns a list of all active runs. */ public List<Run> active() { return controller.applications().idList().stream() .flatMap(id -> active(id).stream()) .toList(); } /** Returns a list of all active runs for the given application. */ public List<Run> active(TenantAndApplicationId id) { return controller.applications().requireApplication(id).instances().keySet().stream() .flatMap(name -> JobType.allIn(controller.zoneRegistry()).stream() .map(type -> last(id.instance(name), type)) .flatMap(Optional::stream) .filter(run -> ! run.hasEnded())) .toList(); } /** Returns a list of all active runs for the given instance. */ public List<Run> active(ApplicationId id) { return JobType.allIn(controller.zoneRegistry()).stream() .map(type -> last(id, type)) .flatMap(Optional::stream) .filter(run -> !run.hasEnded()) .toList(); } /** Returns the job status of the given job, possibly empty. */ public JobStatus jobStatus(JobId id) { return new JobStatus(id, runs(id)); } /** Returns the deployment status of the given application. */ public DeploymentStatus deploymentStatus(Application application) { VersionStatus versionStatus = controller.readVersionStatus(); return deploymentStatus(application, versionStatus, controller.systemVersion(versionStatus)); } private DeploymentStatus deploymentStatus(Application application, VersionStatus versionStatus, Version systemVersion) { return new DeploymentStatus(application, this::jobStatus, controller.zoneRegistry(), versionStatus, systemVersion, instance -> controller.applications().versionCompatibility(application.id().instance(instance)), controller.clock().instant()); } /** Adds deployment status to each of the given applications. */ public DeploymentStatusList deploymentStatuses(ApplicationList applications, VersionStatus versionStatus) { Version systemVersion = controller.systemVersion(versionStatus); return DeploymentStatusList.from(applications.asList().stream() .map(application -> deploymentStatus(application, versionStatus, systemVersion)) .toList()); } /** Adds deployment status to each of the given applications. Calling this will do an implicit read of the controller's version status */ public DeploymentStatusList deploymentStatuses(ApplicationList applications) { VersionStatus versionStatus = controller.readVersionStatus(); return deploymentStatuses(applications, versionStatus); } /** Changes the status of the given step, for the given run, provided it is still active. */ public void update(RunId id, RunStatus status, LockedStep step) { locked(id, run -> run.with(status, step)); } /** * Changes the status of the given run to inactive, and stores it as a historic run. * Throws TimeoutException if some step in this job is still being run. */ public void finish(RunId id) throws TimeoutException { Deque<Mutex> locks = new ArrayDeque<>(); try { Run unlockedRun = run(id); locks.push(curator.lock(id.application(), id.type(), report)); for (Step step : report.allPrerequisites(unlockedRun.steps().keySet())) locks.push(curator.lock(id.application(), id.type(), step)); locked(id, run -> { if (run.status() == reset) { for (Step step : run.steps().keySet()) log(id, step, INFO, List.of(" return run.reset(); } if (run.status() == running && run.stepStatuses().values().stream().anyMatch(not(succeeded::equals))) return run; Run finishedRun = run.finished(controller.clock().instant()); locked(id.application(), id.type(), runs -> { runs.put(run.id(), finishedRun); long last = id.number(); long successes = runs.values().stream().filter(Run::hasSucceeded).count(); var oldEntries = runs.entrySet().iterator(); for (var old = oldEntries.next(); old.getKey().number() <= last - historyLength || old.getValue().start().isBefore(controller.clock().instant().minus(maxHistoryAge)); old = oldEntries.next()) { if ( successes == 1 && old.getValue().hasSucceeded() && ! old.getValue().start().isBefore(controller.clock().instant().minus(maxHistoryAge))) { oldEntries.next(); continue; } logs.delete(old.getKey()); oldEntries.remove(); } }); logs.flush(id); metric.jobFinished(run.id().job(), finishedRun.status()); pruneRevisions(unlockedRun); return finishedRun; }); } finally { for (Mutex lock : locks) { try { lock.close(); } catch (Throwable t) { log.log(WARNING, "Failed to close the lock " + lock + ": the lock may or may not " + "have been released in ZooKeeper, and if not this controller " + "must be restarted to release the lock", t); } } } } /** Marks the given run as aborted; no further normal steps will run, but run-always steps will try to succeed. */ public void abort(RunId id, String reason) { locked(id, run -> { run.stepStatuses().entrySet().stream() .filter(entry -> entry.getValue() == unfinished) .forEach(entry -> log(id, entry.getKey(), INFO, "Aborting run: " + reason)); return run.aborted(); }); } /** Accepts and stores a new application package and test jar pair under a generated application version key. */ public ApplicationVersion submit(TenantAndApplicationId id, Submission submission, long projectId) { ApplicationController applications = controller.applications(); AtomicReference<ApplicationVersion> version = new AtomicReference<>(); applications.lockApplicationOrThrow(id, application -> { Optional<ApplicationVersion> previousVersion = application.get().revisions().last(); Optional<ApplicationPackage> previousPackage = previousVersion.flatMap(previous -> applications.applicationStore().find(id.tenant(), id.application(), previous.buildNumber().getAsLong())) .map(ApplicationPackage::new); long previousBuild = previousVersion.map(latestVersion -> latestVersion.buildNumber().getAsLong()).orElse(0L); version.set(submission.toApplicationVersion(1 + previousBuild)); byte[] diff = previousPackage.map(previous -> ApplicationPackageDiff.diff(previous, submission.applicationPackage())) .orElseGet(() -> ApplicationPackageDiff.diffAgainstEmpty(submission.applicationPackage())); applications.applicationStore().put(id.tenant(), id.application(), version.get().id(), submission.applicationPackage().zippedContent(), submission.testPackage(), diff); applications.applicationStore().putMeta(id.tenant(), id.application(), controller.clock().instant(), submission.applicationPackage().metaDataZip()); application = application.withProjectId(projectId == -1 ? OptionalLong.empty() : OptionalLong.of(projectId)); application = application.withRevisions(revisions -> revisions.with(version.get())); application = withPrunedPackages(application, version.get().id()); validate(id, submission); applications.storeWithUpdatedConfig(application, submission.applicationPackage()); if (application.get().projectId().isPresent()) applications.deploymentTrigger().triggerNewRevision(id); }); return version.get(); } private void validate(TenantAndApplicationId id, Submission submission) { validateTests(id, submission); validateParentVersion(id, submission); validateMajorVersion(id, submission); } private void validateTests(TenantAndApplicationId id, Submission submission) { TestSummary testSummary = TestPackage.validateTests(submission.applicationPackage().deploymentSpec(), submission.testPackage()); if (testSummary.problems().isEmpty()) controller.notificationsDb().removeNotification(NotificationSource.from(id), Type.testPackage); else controller.notificationsDb().setNotification(NotificationSource.from(id), Type.testPackage, Notification.Level.warning, testSummary.problems()); } private void validateParentVersion(TenantAndApplicationId id, Submission submission) { submission.applicationPackage().parentVersion().ifPresent(parent -> { if (parent.getMajor() < controller.readSystemVersion().getMajor()) controller.notificationsDb().setNotification(NotificationSource.from(id), Type.submission, Notification.Level.warning, "Parent version used to compile the application is on a " + "lower major version than the current Vespa Cloud version"); else controller.notificationsDb().removeNotification(NotificationSource.from(id), Type.submission); }); } private LockedApplication withPrunedPackages(LockedApplication application, RevisionId latest){ TenantAndApplicationId id = application.get().id(); Application wrapped = application.get(); RevisionId oldestDeployed = application.get().oldestDeployedRevision() .or(() -> wrapped.instances().values().stream() .flatMap(instance -> instance.change().revision().stream()) .min(naturalOrder())) .orElse(latest); controller.applications().applicationStore().prune(id.tenant(), id.application(), oldestDeployed); for (ApplicationVersion version : application.get().revisions().withPackage()) if (version.id().compareTo(oldestDeployed) < 0) application = application.withRevisions(revisions -> revisions.with(version.withoutPackage())); return application; } /** Forget revisions no longer present in any relevant job history. */ private void pruneRevisions(Run run) { TenantAndApplicationId applicationId = TenantAndApplicationId.from(run.id().application()); boolean isProduction = run.versions().targetRevision().isProduction(); (isProduction ? deploymentStatus(controller.applications().requireApplication(applicationId)).jobs().asList().stream() : Stream.of(jobStatus(run.id().job()))) .flatMap(jobs -> jobs.runs().values().stream()) .map(r -> r.versions().targetRevision()) .filter(id -> id.isProduction() == isProduction) .min(naturalOrder()) .ifPresent(oldestRevision -> { controller.applications().lockApplicationOrThrow(applicationId, application -> { if (isProduction) { controller.applications().applicationStore().pruneDiffs(run.id().application().tenant(), run.id().application().application(), oldestRevision.number()); controller.applications().store(application.withRevisions(revisions -> revisions.withoutOlderThan(oldestRevision))); } else { controller.applications().applicationStore().pruneDevDiffs(new DeploymentId(run.id().application(), run.id().job().type().zone()), oldestRevision.number()); controller.applications().store(application.withRevisions(revisions -> revisions.withoutOlderThan(oldestRevision, run.id().job()))); } }); }); } /** Orders a run of the given type, or throws an IllegalStateException if that job type is already running. */ public void start(ApplicationId id, JobType type, Versions versions, boolean isRedeployment, Optional<String> reason) { start(id, type, versions, isRedeployment, JobProfile.of(type), reason); } /** Orders a run of the given type, or throws an IllegalStateException if that job type is already running. */ public void start(ApplicationId id, JobType type, Versions versions, boolean isRedeployment, JobProfile profile, Optional<String> reason) { ApplicationVersion revision = controller.applications().requireApplication(TenantAndApplicationId.from(id)).revisions().get(versions.targetRevision()); if (revision.compileVersion() .map(version -> controller.applications().versionCompatibility(id).refuse(versions.targetPlatform(), version)) .orElse(false)) throw new IllegalArgumentException("Will not start " + type + " for " + id + " with incompatible platform version (" + versions.targetPlatform() + ") " + "and compile versions (" + revision.compileVersion().get() + ")"); locked(id, type, __ -> { Optional<Run> last = last(id, type); if (last.flatMap(run -> active(run.id())).isPresent()) throw new IllegalArgumentException("Cannot start " + type + " for " + id + "; it is already running!"); RunId newId = new RunId(id, type, last.map(run -> run.id().number()).orElse(0L) + 1); curator.writeLastRun(Run.initial(newId, versions, isRedeployment, controller.clock().instant(), profile, reason)); metric.jobStarted(newId.job()); }); } /** Stores the given package and starts a deployment of it, after aborting any such ongoing deployment. */ public void deploy(ApplicationId id, JobType type, Optional<Version> platform, ApplicationPackage applicationPackage) { deploy(id, type, platform, applicationPackage, false); } /** Stores the given package and starts a deployment of it, after aborting any such ongoing deployment.*/ public void deploy(ApplicationId id, JobType type, Optional<Version> platform, ApplicationPackage applicationPackage, boolean dryRun) { if ( ! controller.zoneRegistry().hasZone(type.zone())) throw new IllegalArgumentException(type.zone() + " is not present in this system"); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { if ( ! application.get().instances().containsKey(id.instance())) application = controller.applications().withNewInstance(application, id); controller.applications().store(application); }); DeploymentId deploymentId = new DeploymentId(id, type.zone()); Optional<Run> lastRun = last(id, type); lastRun.filter(run -> ! run.hasEnded()).ifPresent(run -> abortAndWait(run.id(), Duration.ofMinutes(2))); long build = 1 + lastRun.map(run -> run.versions().targetRevision().number()).orElse(0L); RevisionId revisionId = RevisionId.forDevelopment(build, new JobId(id, type)); ApplicationVersion version = ApplicationVersion.forDevelopment(revisionId, applicationPackage.compileVersion()); byte[] diff = getDiff(applicationPackage, deploymentId, lastRun); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { controller.applications().applicationStore().putDev(deploymentId, version.id(), applicationPackage.zippedContent(), diff); Version targetPlatform = platform.orElseGet(() -> findTargetPlatform(applicationPackage, deploymentId, application.get().get(id.instance()))); controller.applications().store(application.withRevisions(revisions -> revisions.with(version))); start(id, type, new Versions(targetPlatform, version.id(), lastRun.map(run -> run.versions().targetPlatform()), lastRun.map(run -> run.versions().targetRevision())), false, dryRun ? JobProfile.developmentDryRun : JobProfile.development, Optional.empty()); }); locked(id, type, __ -> { runner.get().accept(last(id, type).get()); }); } /* Application package diff against previous version, or against empty version if previous does not exist or is invalid */ private byte[] getDiff(ApplicationPackage applicationPackage, DeploymentId deploymentId, Optional<Run> lastRun) { return lastRun.map(run -> run.versions().targetRevision()) .map(prevVersion -> { ApplicationPackage previous; try { previous = new ApplicationPackage(controller.applications().applicationStore().get(deploymentId, prevVersion)); } catch (IllegalArgumentException e) { return ApplicationPackageDiff.diffAgainstEmpty(applicationPackage); } return ApplicationPackageDiff.diff(previous, applicationPackage); }) .orElseGet(() -> ApplicationPackageDiff.diffAgainstEmpty(applicationPackage)); } private Version findTargetPlatform(ApplicationPackage applicationPackage, DeploymentId id, Optional<Instance> instance) { List<Version> versions = controller.readVersionStatus().deployableVersions().stream() .map(VespaVersion::versionNumber) .collect(toList()); instance.map(Instance::deployments) .map(deployments -> deployments.get(id.zoneId())) .map(Deployment::version) .ifPresent(versions::add); if (versions.isEmpty()) throw new IllegalStateException("no deployable platform version found in the system"); VersionCompatibility compatibility = controller.applications().versionCompatibility(id.applicationId()); List<Version> compatibleVersions = new ArrayList<>(); for (Version target : reversed(versions)) if (applicationPackage.compileVersion().isEmpty() || compatibility.accept(target, applicationPackage.compileVersion().get())) compatibleVersions.add(target); if (compatibleVersions.isEmpty()) throw new IllegalArgumentException("no platforms are compatible with compile version " + applicationPackage.compileVersion().get()); Optional<Integer> major = applicationPackage.deploymentSpec().majorVersion(); List<Version> versionOnRightMajor = new ArrayList<>(); for (Version target : reversed(versions)) if (major.isEmpty() || major.get() == target.getMajor()) versionOnRightMajor.add(target); if (versionOnRightMajor.isEmpty()) throw new IllegalArgumentException("no platforms were found for major version " + major.get() + " specified in deployment.xml"); for (Version target : compatibleVersions) if (versionOnRightMajor.contains(target)) return target; throw new IllegalArgumentException("no platforms on major version " + major.get() + " specified in deployment.xml " + "are compatible with compile version " + applicationPackage.compileVersion().get()); } /** Aborts a run and waits for it complete. */ private void abortAndWait(RunId id, Duration timeout) { abort(id, "replaced by new deployment"); runner.get().accept(last(id.application(), id.type()).get()); Instant doom = controller.clock().instant().plus(timeout); Duration sleep = Duration.ofMillis(100); while ( ! last(id.application(), id.type()).get().hasEnded()) { if (controller.clock().instant().plus(sleep).isAfter(doom)) throw new UncheckedTimeoutException("timeout waiting for " + id + " to abort and finish"); try { Thread.sleep(sleep.toMillis()); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new RuntimeException(e); } } } /** Deletes run data and tester deployments for applications which are unknown, or no longer built internally. */ public void collectGarbage() { Set<ApplicationId> applicationsToBuild = new HashSet<>(instances()); curator.applicationsWithJobs().stream() .filter(id -> ! applicationsToBuild.contains(id)) .forEach(id -> { try { TesterId tester = TesterId.of(id); for (JobType type : jobs(id)) locked(id, type, deactivateTester, __ -> { try (Mutex ___ = curator.lock(id, type)) { try { deactivateTester(tester, type); } catch (Exception e) { } curator.deleteRunData(id, type); } }); logs.delete(id); curator.deleteRunData(id); } catch (Exception e) { log.log(WARNING, "failed cleaning up after deleted application", e); } }); } public void deactivateTester(TesterId id, JobType type) { controller.serviceRegistry().configServer().deactivate(new DeploymentId(id.id(), type.zone())); } /** Locks all runs and modifies the list of historic runs for the given application and job type. */ private void locked(ApplicationId id, JobType type, Consumer<SortedMap<RunId, Run>> modifications) { try (Mutex __ = curator.lock(id, type)) { SortedMap<RunId, Run> runs = new TreeMap<>(curator.readHistoricRuns(id, type)); modifications.accept(runs); curator.writeHistoricRuns(id, type, runs.values()); } } /** Locks and modifies the run with the given id, provided it is still active. */ public void locked(RunId id, UnaryOperator<Run> modifications) { try (Mutex __ = curator.lock(id.application(), id.type())) { active(id).ifPresent(run -> { Run modified = modifications.apply(run); if (modified != null) curator.writeLastRun(modified); }); } } /** Locks the given step and checks none of its prerequisites are running, then performs the given actions. */ public void locked(ApplicationId id, JobType type, Step step, Consumer<LockedStep> action) throws TimeoutException { try (Mutex lock = curator.lock(id, type, step)) { for (Step prerequisite : step.allPrerequisites(last(id, type).get().steps().keySet())) try (Mutex __ = curator.lock(id, type, prerequisite)) { ; } action.accept(new LockedStep(lock, step)); } } }
The main issue is that the different notifications are generated in different places at different times, and because we also want to automatically clear the notification once the issue is fixed, there must be a single source for each type. The `submission` type is just application package warnings generated in controller, while `applicationPackage` are generated in the configserver (and stored in the controller ZK after a deploy, which happens at a different time and by a different maintainer). But both types are emitted as `applicationPackage` to the console https://github.com/vespa-engine/vespa/blob/5f19d58210dee17bf2c8161b6fbea66163cd79d6/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java#L881
private void validateMajorVersion(TenantAndApplicationId id, Submission submission) { submission.applicationPackage().deploymentSpec().majorVersion().ifPresent(explicitMajor -> { if (explicitMajor < 8) controller.notificationsDb().setNotification(NotificationSource.from(id), Type.applicationPackage, Notification.Level.warning, "Vespa 7 will soon be end of life, upgrade to Vespa 8 now:" + "https: else controller.notificationsDb().removeNotification(NotificationSource.from(id), Type.applicationPackage); }); }
controller.notificationsDb().setNotification(NotificationSource.from(id),
private void validateMajorVersion(TenantAndApplicationId id, Submission submission) { submission.applicationPackage().deploymentSpec().majorVersion().ifPresent(explicitMajor -> { if (explicitMajor < 8) controller.notificationsDb().setNotification(NotificationSource.from(id), Type.applicationPackage, Notification.Level.warning, "Vespa 7 will soon be end of life, upgrade to Vespa 8 now:" + "https: else controller.notificationsDb().removeNotification(NotificationSource.from(id), Type.applicationPackage); }); }
class JobController { public static final Duration maxHistoryAge = Duration.ofDays(60); private static final Logger log = Logger.getLogger(JobController.class.getName()); private final int historyLength; private final Controller controller; private final CuratorDb curator; private final BufferedLogStore logs; private final TesterCloud cloud; private final JobMetrics metric; private final AtomicReference<Consumer<Run>> runner = new AtomicReference<>(__ -> { }); public JobController(Controller controller) { this.historyLength = controller.system().isCd() ? 256 : 64; this.controller = controller; this.curator = controller.curator(); this.logs = new BufferedLogStore(curator, controller.serviceRegistry().runDataStore()); this.cloud = controller.serviceRegistry().testerCloud(); this.metric = new JobMetrics(controller.metric(), controller::system); } public TesterCloud cloud() { return cloud; } public int historyLength() { return historyLength; } public void setRunner(Consumer<Run> runner) { this.runner.set(runner); } /** Rewrite all job data with the newest format. */ public void updateStorage() { for (ApplicationId id : instances()) for (JobType type : jobs(id)) { locked(id, type, runs -> { curator.readLastRun(id, type).ifPresent(curator::writeLastRun); }); } } /** Returns all entries currently logged for the given run. */ public Optional<RunLog> details(RunId id) { return details(id, -1); } /** Returns the logged entries for the given run, which are after the given id threshold. */ public Optional<RunLog> details(RunId id, long after) { try (Mutex __ = curator.lock(id.application(), id.type())) { Run run = runs(id.application(), id.type()).get(id); if (run == null) return Optional.empty(); return active(id).isPresent() ? Optional.of(logs.readActive(id.application(), id.type(), after)) : logs.readFinished(id, after); } } /** Stores the given log entries for the given run and step. */ public void log(RunId id, Step step, List<LogEntry> entries) { locked(id, __ -> { logs.append(id.application(), id.type(), step, entries, true); return __; }); } /** Stores the given log messages for the given run and step. */ public void log(RunId id, Step step, Level level, List<String> messages) { log(id, step, messages.stream() .map(message -> new LogEntry(0, controller.clock().instant(), LogEntry.typeOf(level), message)) .collect(toList())); } /** Stores the given log message for the given run and step. */ public void log(RunId id, Step step, Level level, String message) { log(id, step, level, Collections.singletonList(message)); } /** Fetches any new Vespa log entries, and records the timestamp of the last of these, for continuation. */ public void updateVespaLog(RunId id) { locked(id, run -> { if ( ! run.hasStep(copyVespaLogs)) return run; storeVespaLogs(id); ZoneId zone = id.type().zone(); Optional<Deployment> deployment = Optional.ofNullable(controller.applications().requireInstance(id.application()) .deployments().get(zone)); if (deployment.isEmpty() || deployment.get().at().isBefore(run.start())) return run; List<LogEntry> log; Instant deployedAt; Instant from; if ( ! run.id().type().isProduction()) { deployedAt = run.stepInfo(installInitialReal).or(() -> run.stepInfo(installReal)).flatMap(StepInfo::startTime).orElseThrow(); from = run.lastVespaLogTimestamp().isAfter(run.start()) ? run.lastVespaLogTimestamp() : deployedAt.minusSeconds(10); log = LogEntry.parseVespaLog(controller.serviceRegistry().configServer() .getLogs(new DeploymentId(id.application(), zone), Map.of("from", Long.toString(from.toEpochMilli()))), from); } else log = List.of(); if (id.type().isTest()) { deployedAt = run.stepInfo(installTester).flatMap(StepInfo::startTime).orElseThrow(); from = run.lastVespaLogTimestamp().isAfter(run.start()) ? run.lastVespaLogTimestamp() : deployedAt.minusSeconds(10); List<LogEntry> testerLog = LogEntry.parseVespaLog(controller.serviceRegistry().configServer() .getLogs(new DeploymentId(id.tester().id(), zone), Map.of("from", Long.toString(from.toEpochMilli()))), from); Instant justNow = controller.clock().instant().minusSeconds(2); log = Stream.concat(log.stream(), testerLog.stream()) .filter(entry -> entry.at().isBefore(justNow)) .sorted(comparing(LogEntry::at)) .collect(toUnmodifiableList()); } if (log.isEmpty()) return run; logs.append(id.application(), id.type(), Step.copyVespaLogs, log, false); return run.with(log.get(log.size() - 1).at()); }); } public InputStream getVespaLogs(RunId id, long fromMillis, boolean tester) { Run run = run(id); return run.stepStatus(copyVespaLogs).map(succeeded::equals).orElse(false) ? controller.serviceRegistry().runDataStore().getLogs(id, tester) : getVespaLogsFromLogserver(run, fromMillis, tester).orElse(InputStream.nullInputStream()); } public static Optional<Instant> deploymentCompletedAt(Run run, boolean tester) { return (tester ? run.stepInfo(installTester) : run.stepInfo(installInitialReal).or(() -> run.stepInfo(installReal))) .flatMap(StepInfo::startTime).map(start -> start.minusSeconds(10)); } public void storeVespaLogs(RunId id) { Run run = run(id); if ( ! id.type().isProduction()) { getVespaLogsFromLogserver(run, 0, false).ifPresent(logs -> { try (logs) { controller.serviceRegistry().runDataStore().putLogs(id, false, logs); } catch (IOException e) { throw new UncheckedIOException(e); } }); } if (id.type().isTest()) { getVespaLogsFromLogserver(run, 0, true).ifPresent(logs -> { try (logs) { controller.serviceRegistry().runDataStore().putLogs(id, true, logs); } catch(IOException e){ throw new UncheckedIOException(e); } }); } } private Optional<InputStream> getVespaLogsFromLogserver(Run run, long fromMillis, boolean tester) { return deploymentCompletedAt(run, tester).map(at -> controller.serviceRegistry().configServer().getLogs(new DeploymentId(tester ? run.id().tester().id() : run.id().application(), run.id().type().zone()), Map.of("from", Long.toString(Math.max(fromMillis, at.toEpochMilli())), "to", Long.toString(run.end().orElse(controller.clock().instant()).toEpochMilli())))); } /** Fetches any new test log entries, and records the id of the last of these, for continuation. */ public void updateTestLog(RunId id) { locked(id, run -> { Optional<Step> step = Stream.of(endStagingSetup, endTests) .filter(run.readySteps()::contains) .findAny(); if (step.isEmpty()) return run; List<LogEntry> entries = cloud.getLog(new DeploymentId(id.tester().id(), id.type().zone()), run.lastTestLogEntry()); if (entries.isEmpty()) return run; logs.append(id.application(), id.type(), step.get(), entries, false); return run.with(entries.stream().mapToLong(LogEntry::id).max().getAsLong()); }); } public void updateTestReport(RunId id) { locked(id, run -> { Optional<TestReport> report = cloud.getTestReport(new DeploymentId(id.tester().id(), id.type().zone())); if (report.isEmpty()) { return run; } logs.writeTestReport(id, report.get()); return run; }); } public Optional<String> getTestReports(RunId id) { return logs.readTestReports(id); } /** Stores the given certificate as the tester certificate for this run, or throws if it's already set. */ public void storeTesterCertificate(RunId id, X509Certificate testerCertificate) { locked(id, run -> run.with(testerCertificate)); } /** Returns a list of all instances of applications which have registered. */ public List<ApplicationId> instances() { return controller.applications().readable().stream() .flatMap(application -> application.instances().values().stream()) .map(Instance::id) .collect(toUnmodifiableList()); } /** Returns all job types which have been run for the given application. */ private List<JobType> jobs(ApplicationId id) { return JobType.allIn(controller.zoneRegistry()).stream() .filter(type -> last(id, type).isPresent()) .collect(toUnmodifiableList()); } /** Returns an immutable map of all known runs for the given application and job type. */ public NavigableMap<RunId, Run> runs(JobId id) { return runs(id.application(), id.type()); } /** Lists the start time of non-redeployment runs of the given job, in order of increasing age. */ public List<Instant> jobStarts(JobId id) { return runs(id).descendingMap().values().stream() .filter(run -> ! run.isRedeployment()) .map(Run::start) .collect(toUnmodifiableList()); } /** Returns when given deployment last started deploying, falling back to time of deployment if it cannot be determined from job runs */ public Instant lastDeploymentStart(ApplicationId instanceId, Deployment deployment) { return jobStarts(new JobId(instanceId, JobType.deploymentTo(deployment.zone()))).stream() .findFirst() .orElseGet(deployment::at); } /** Returns an immutable map of all known runs for the given application and job type. */ public NavigableMap<RunId, Run> runs(ApplicationId id, JobType type) { ImmutableSortedMap.Builder<RunId, Run> runs = ImmutableSortedMap.orderedBy(Comparator.comparing(RunId::number)); Optional<Run> last = last(id, type); curator.readHistoricRuns(id, type).forEach((runId, run) -> { if (last.isEmpty() || ! runId.equals(last.get().id())) runs.put(runId, run); }); last.ifPresent(run -> runs.put(run.id(), run)); return runs.build(); } /** Returns the run with the given id, or throws if no such run exists. */ public Run run(RunId id) { return runs(id.application(), id.type()).values().stream() .filter(run -> run.id().equals(id)) .findAny() .orElseThrow(() -> new NoSuchElementException("no run with id '" + id + "' exists")); } /** Returns the last run of the given type, for the given application, if one has been run. */ public Optional<Run> last(JobId job) { return curator.readLastRun(job.application(), job.type()); } /** Returns the last run of the given type, for the given application, if one has been run. */ public Optional<Run> last(ApplicationId id, JobType type) { return curator.readLastRun(id, type); } /** Returns the last completed of the given job. */ public Optional<Run> lastCompleted(JobId id) { return JobStatus.lastCompleted(runs(id)); } /** Returns the first failing of the given job. */ public Optional<Run> firstFailing(JobId id) { return JobStatus.firstFailing(runs(id)); } /** Returns the last success of the given job. */ public Optional<Run> lastSuccess(JobId id) { return JobStatus.lastSuccess(runs(id)); } /** Returns the run with the given id, provided it is still active. */ public Optional<Run> active(RunId id) { return last(id.application(), id.type()) .filter(run -> ! run.hasEnded()) .filter(run -> run.id().equals(id)); } /** Returns a list of all active runs. */ public List<Run> active() { return controller.applications().idList().stream() .flatMap(id -> active(id).stream()) .toList(); } /** Returns a list of all active runs for the given application. */ public List<Run> active(TenantAndApplicationId id) { return controller.applications().requireApplication(id).instances().keySet().stream() .flatMap(name -> JobType.allIn(controller.zoneRegistry()).stream() .map(type -> last(id.instance(name), type)) .flatMap(Optional::stream) .filter(run -> ! run.hasEnded())) .toList(); } /** Returns a list of all active runs for the given instance. */ public List<Run> active(ApplicationId id) { return JobType.allIn(controller.zoneRegistry()).stream() .map(type -> last(id, type)) .flatMap(Optional::stream) .filter(run -> !run.hasEnded()) .toList(); } /** Returns the job status of the given job, possibly empty. */ public JobStatus jobStatus(JobId id) { return new JobStatus(id, runs(id)); } /** Returns the deployment status of the given application. */ public DeploymentStatus deploymentStatus(Application application) { VersionStatus versionStatus = controller.readVersionStatus(); return deploymentStatus(application, versionStatus, controller.systemVersion(versionStatus)); } private DeploymentStatus deploymentStatus(Application application, VersionStatus versionStatus, Version systemVersion) { return new DeploymentStatus(application, this::jobStatus, controller.zoneRegistry(), versionStatus, systemVersion, instance -> controller.applications().versionCompatibility(application.id().instance(instance)), controller.clock().instant()); } /** Adds deployment status to each of the given applications. */ public DeploymentStatusList deploymentStatuses(ApplicationList applications, VersionStatus versionStatus) { Version systemVersion = controller.systemVersion(versionStatus); return DeploymentStatusList.from(applications.asList().stream() .map(application -> deploymentStatus(application, versionStatus, systemVersion)) .toList()); } /** Adds deployment status to each of the given applications. Calling this will do an implicit read of the controller's version status */ public DeploymentStatusList deploymentStatuses(ApplicationList applications) { VersionStatus versionStatus = controller.readVersionStatus(); return deploymentStatuses(applications, versionStatus); } /** Changes the status of the given step, for the given run, provided it is still active. */ public void update(RunId id, RunStatus status, LockedStep step) { locked(id, run -> run.with(status, step)); } /** * Changes the status of the given run to inactive, and stores it as a historic run. * Throws TimeoutException if some step in this job is still being run. */ public void finish(RunId id) throws TimeoutException { Deque<Mutex> locks = new ArrayDeque<>(); try { Run unlockedRun = run(id); locks.push(curator.lock(id.application(), id.type(), report)); for (Step step : report.allPrerequisites(unlockedRun.steps().keySet())) locks.push(curator.lock(id.application(), id.type(), step)); locked(id, run -> { if (run.status() == reset) { for (Step step : run.steps().keySet()) log(id, step, INFO, List.of(" return run.reset(); } if (run.status() == running && run.stepStatuses().values().stream().anyMatch(not(succeeded::equals))) return run; Run finishedRun = run.finished(controller.clock().instant()); locked(id.application(), id.type(), runs -> { runs.put(run.id(), finishedRun); long last = id.number(); long successes = runs.values().stream().filter(Run::hasSucceeded).count(); var oldEntries = runs.entrySet().iterator(); for (var old = oldEntries.next(); old.getKey().number() <= last - historyLength || old.getValue().start().isBefore(controller.clock().instant().minus(maxHistoryAge)); old = oldEntries.next()) { if ( successes == 1 && old.getValue().hasSucceeded() && ! old.getValue().start().isBefore(controller.clock().instant().minus(maxHistoryAge))) { oldEntries.next(); continue; } logs.delete(old.getKey()); oldEntries.remove(); } }); logs.flush(id); metric.jobFinished(run.id().job(), finishedRun.status()); pruneRevisions(unlockedRun); return finishedRun; }); } finally { for (Mutex lock : locks) { try { lock.close(); } catch (Throwable t) { log.log(WARNING, "Failed to close the lock " + lock + ": the lock may or may not " + "have been released in ZooKeeper, and if not this controller " + "must be restarted to release the lock", t); } } } } /** Marks the given run as aborted; no further normal steps will run, but run-always steps will try to succeed. */ public void abort(RunId id, String reason) { locked(id, run -> { run.stepStatuses().entrySet().stream() .filter(entry -> entry.getValue() == unfinished) .forEach(entry -> log(id, entry.getKey(), INFO, "Aborting run: " + reason)); return run.aborted(); }); } /** Accepts and stores a new application package and test jar pair under a generated application version key. */ public ApplicationVersion submit(TenantAndApplicationId id, Submission submission, long projectId) { ApplicationController applications = controller.applications(); AtomicReference<ApplicationVersion> version = new AtomicReference<>(); applications.lockApplicationOrThrow(id, application -> { Optional<ApplicationVersion> previousVersion = application.get().revisions().last(); Optional<ApplicationPackage> previousPackage = previousVersion.flatMap(previous -> applications.applicationStore().find(id.tenant(), id.application(), previous.buildNumber().getAsLong())) .map(ApplicationPackage::new); long previousBuild = previousVersion.map(latestVersion -> latestVersion.buildNumber().getAsLong()).orElse(0L); version.set(submission.toApplicationVersion(1 + previousBuild)); byte[] diff = previousPackage.map(previous -> ApplicationPackageDiff.diff(previous, submission.applicationPackage())) .orElseGet(() -> ApplicationPackageDiff.diffAgainstEmpty(submission.applicationPackage())); applications.applicationStore().put(id.tenant(), id.application(), version.get().id(), submission.applicationPackage().zippedContent(), submission.testPackage(), diff); applications.applicationStore().putMeta(id.tenant(), id.application(), controller.clock().instant(), submission.applicationPackage().metaDataZip()); application = application.withProjectId(projectId == -1 ? OptionalLong.empty() : OptionalLong.of(projectId)); application = application.withRevisions(revisions -> revisions.with(version.get())); application = withPrunedPackages(application, version.get().id()); validate(id, submission); applications.storeWithUpdatedConfig(application, submission.applicationPackage()); if (application.get().projectId().isPresent()) applications.deploymentTrigger().triggerNewRevision(id); }); return version.get(); } private void validate(TenantAndApplicationId id, Submission submission) { validateTests(id, submission); validateParentVersion(id, submission); validateMajorVersion(id, submission); } private void validateTests(TenantAndApplicationId id, Submission submission) { TestSummary testSummary = TestPackage.validateTests(submission.applicationPackage().deploymentSpec(), submission.testPackage()); if (testSummary.problems().isEmpty()) controller.notificationsDb().removeNotification(NotificationSource.from(id), Type.testPackage); else controller.notificationsDb().setNotification(NotificationSource.from(id), Type.testPackage, Notification.Level.warning, testSummary.problems()); } private void validateParentVersion(TenantAndApplicationId id, Submission submission) { submission.applicationPackage().parentVersion().ifPresent(parent -> { if (parent.getMajor() < controller.readSystemVersion().getMajor()) controller.notificationsDb().setNotification(NotificationSource.from(id), Type.submission, Notification.Level.warning, "Parent version used to compile the application is on a " + "lower major version than the current Vespa Cloud version"); else controller.notificationsDb().removeNotification(NotificationSource.from(id), Type.submission); }); } private LockedApplication withPrunedPackages(LockedApplication application, RevisionId latest){ TenantAndApplicationId id = application.get().id(); Application wrapped = application.get(); RevisionId oldestDeployed = application.get().oldestDeployedRevision() .or(() -> wrapped.instances().values().stream() .flatMap(instance -> instance.change().revision().stream()) .min(naturalOrder())) .orElse(latest); controller.applications().applicationStore().prune(id.tenant(), id.application(), oldestDeployed); for (ApplicationVersion version : application.get().revisions().withPackage()) if (version.id().compareTo(oldestDeployed) < 0) application = application.withRevisions(revisions -> revisions.with(version.withoutPackage())); return application; } /** Forget revisions no longer present in any relevant job history. */ private void pruneRevisions(Run run) { TenantAndApplicationId applicationId = TenantAndApplicationId.from(run.id().application()); boolean isProduction = run.versions().targetRevision().isProduction(); (isProduction ? deploymentStatus(controller.applications().requireApplication(applicationId)).jobs().asList().stream() : Stream.of(jobStatus(run.id().job()))) .flatMap(jobs -> jobs.runs().values().stream()) .map(r -> r.versions().targetRevision()) .filter(id -> id.isProduction() == isProduction) .min(naturalOrder()) .ifPresent(oldestRevision -> { controller.applications().lockApplicationOrThrow(applicationId, application -> { if (isProduction) { controller.applications().applicationStore().pruneDiffs(run.id().application().tenant(), run.id().application().application(), oldestRevision.number()); controller.applications().store(application.withRevisions(revisions -> revisions.withoutOlderThan(oldestRevision))); } else { controller.applications().applicationStore().pruneDevDiffs(new DeploymentId(run.id().application(), run.id().job().type().zone()), oldestRevision.number()); controller.applications().store(application.withRevisions(revisions -> revisions.withoutOlderThan(oldestRevision, run.id().job()))); } }); }); } /** Orders a run of the given type, or throws an IllegalStateException if that job type is already running. */ public void start(ApplicationId id, JobType type, Versions versions, boolean isRedeployment, Optional<String> reason) { start(id, type, versions, isRedeployment, JobProfile.of(type), reason); } /** Orders a run of the given type, or throws an IllegalStateException if that job type is already running. */ public void start(ApplicationId id, JobType type, Versions versions, boolean isRedeployment, JobProfile profile, Optional<String> reason) { ApplicationVersion revision = controller.applications().requireApplication(TenantAndApplicationId.from(id)).revisions().get(versions.targetRevision()); if (revision.compileVersion() .map(version -> controller.applications().versionCompatibility(id).refuse(versions.targetPlatform(), version)) .orElse(false)) throw new IllegalArgumentException("Will not start " + type + " for " + id + " with incompatible platform version (" + versions.targetPlatform() + ") " + "and compile versions (" + revision.compileVersion().get() + ")"); locked(id, type, __ -> { Optional<Run> last = last(id, type); if (last.flatMap(run -> active(run.id())).isPresent()) throw new IllegalArgumentException("Cannot start " + type + " for " + id + "; it is already running!"); RunId newId = new RunId(id, type, last.map(run -> run.id().number()).orElse(0L) + 1); curator.writeLastRun(Run.initial(newId, versions, isRedeployment, controller.clock().instant(), profile, reason)); metric.jobStarted(newId.job()); }); } /** Stores the given package and starts a deployment of it, after aborting any such ongoing deployment. */ public void deploy(ApplicationId id, JobType type, Optional<Version> platform, ApplicationPackage applicationPackage) { deploy(id, type, platform, applicationPackage, false); } /** Stores the given package and starts a deployment of it, after aborting any such ongoing deployment.*/ public void deploy(ApplicationId id, JobType type, Optional<Version> platform, ApplicationPackage applicationPackage, boolean dryRun) { if ( ! controller.zoneRegistry().hasZone(type.zone())) throw new IllegalArgumentException(type.zone() + " is not present in this system"); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { if ( ! application.get().instances().containsKey(id.instance())) application = controller.applications().withNewInstance(application, id); controller.applications().store(application); }); DeploymentId deploymentId = new DeploymentId(id, type.zone()); Optional<Run> lastRun = last(id, type); lastRun.filter(run -> ! run.hasEnded()).ifPresent(run -> abortAndWait(run.id(), Duration.ofMinutes(2))); long build = 1 + lastRun.map(run -> run.versions().targetRevision().number()).orElse(0L); RevisionId revisionId = RevisionId.forDevelopment(build, new JobId(id, type)); ApplicationVersion version = ApplicationVersion.forDevelopment(revisionId, applicationPackage.compileVersion()); byte[] diff = getDiff(applicationPackage, deploymentId, lastRun); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { controller.applications().applicationStore().putDev(deploymentId, version.id(), applicationPackage.zippedContent(), diff); Version targetPlatform = platform.orElseGet(() -> findTargetPlatform(applicationPackage, deploymentId, application.get().get(id.instance()))); controller.applications().store(application.withRevisions(revisions -> revisions.with(version))); start(id, type, new Versions(targetPlatform, version.id(), lastRun.map(run -> run.versions().targetPlatform()), lastRun.map(run -> run.versions().targetRevision())), false, dryRun ? JobProfile.developmentDryRun : JobProfile.development, Optional.empty()); }); locked(id, type, __ -> { runner.get().accept(last(id, type).get()); }); } /* Application package diff against previous version, or against empty version if previous does not exist or is invalid */ private byte[] getDiff(ApplicationPackage applicationPackage, DeploymentId deploymentId, Optional<Run> lastRun) { return lastRun.map(run -> run.versions().targetRevision()) .map(prevVersion -> { ApplicationPackage previous; try { previous = new ApplicationPackage(controller.applications().applicationStore().get(deploymentId, prevVersion)); } catch (IllegalArgumentException e) { return ApplicationPackageDiff.diffAgainstEmpty(applicationPackage); } return ApplicationPackageDiff.diff(previous, applicationPackage); }) .orElseGet(() -> ApplicationPackageDiff.diffAgainstEmpty(applicationPackage)); } private Version findTargetPlatform(ApplicationPackage applicationPackage, DeploymentId id, Optional<Instance> instance) { List<Version> versions = controller.readVersionStatus().deployableVersions().stream() .map(VespaVersion::versionNumber) .collect(toList()); instance.map(Instance::deployments) .map(deployments -> deployments.get(id.zoneId())) .map(Deployment::version) .ifPresent(versions::add); if (versions.isEmpty()) throw new IllegalStateException("no deployable platform version found in the system"); VersionCompatibility compatibility = controller.applications().versionCompatibility(id.applicationId()); List<Version> compatibleVersions = new ArrayList<>(); for (Version target : reversed(versions)) if (applicationPackage.compileVersion().isEmpty() || compatibility.accept(target, applicationPackage.compileVersion().get())) compatibleVersions.add(target); if (compatibleVersions.isEmpty()) throw new IllegalArgumentException("no platforms are compatible with compile version " + applicationPackage.compileVersion().get()); Optional<Integer> major = applicationPackage.deploymentSpec().majorVersion(); List<Version> versionOnRightMajor = new ArrayList<>(); for (Version target : reversed(versions)) if (major.isEmpty() || major.get() == target.getMajor()) versionOnRightMajor.add(target); if (versionOnRightMajor.isEmpty()) throw new IllegalArgumentException("no platforms were found for major version " + major.get() + " specified in deployment.xml"); for (Version target : compatibleVersions) if (versionOnRightMajor.contains(target)) return target; throw new IllegalArgumentException("no platforms on major version " + major.get() + " specified in deployment.xml " + "are compatible with compile version " + applicationPackage.compileVersion().get()); } /** Aborts a run and waits for it complete. */ private void abortAndWait(RunId id, Duration timeout) { abort(id, "replaced by new deployment"); runner.get().accept(last(id.application(), id.type()).get()); Instant doom = controller.clock().instant().plus(timeout); Duration sleep = Duration.ofMillis(100); while ( ! last(id.application(), id.type()).get().hasEnded()) { if (controller.clock().instant().plus(sleep).isAfter(doom)) throw new UncheckedTimeoutException("timeout waiting for " + id + " to abort and finish"); try { Thread.sleep(sleep.toMillis()); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new RuntimeException(e); } } } /** Deletes run data and tester deployments for applications which are unknown, or no longer built internally. */ public void collectGarbage() { Set<ApplicationId> applicationsToBuild = new HashSet<>(instances()); curator.applicationsWithJobs().stream() .filter(id -> ! applicationsToBuild.contains(id)) .forEach(id -> { try { TesterId tester = TesterId.of(id); for (JobType type : jobs(id)) locked(id, type, deactivateTester, __ -> { try (Mutex ___ = curator.lock(id, type)) { try { deactivateTester(tester, type); } catch (Exception e) { } curator.deleteRunData(id, type); } }); logs.delete(id); curator.deleteRunData(id); } catch (Exception e) { log.log(WARNING, "failed cleaning up after deleted application", e); } }); } public void deactivateTester(TesterId id, JobType type) { controller.serviceRegistry().configServer().deactivate(new DeploymentId(id.id(), type.zone())); } /** Locks all runs and modifies the list of historic runs for the given application and job type. */ private void locked(ApplicationId id, JobType type, Consumer<SortedMap<RunId, Run>> modifications) { try (Mutex __ = curator.lock(id, type)) { SortedMap<RunId, Run> runs = new TreeMap<>(curator.readHistoricRuns(id, type)); modifications.accept(runs); curator.writeHistoricRuns(id, type, runs.values()); } } /** Locks and modifies the run with the given id, provided it is still active. */ public void locked(RunId id, UnaryOperator<Run> modifications) { try (Mutex __ = curator.lock(id.application(), id.type())) { active(id).ifPresent(run -> { Run modified = modifications.apply(run); if (modified != null) curator.writeLastRun(modified); }); } } /** Locks the given step and checks none of its prerequisites are running, then performs the given actions. */ public void locked(ApplicationId id, JobType type, Step step, Consumer<LockedStep> action) throws TimeoutException { try (Mutex lock = curator.lock(id, type, step)) { for (Step prerequisite : step.allPrerequisites(last(id, type).get().steps().keySet())) try (Mutex __ = curator.lock(id, type, prerequisite)) { ; } action.accept(new LockedStep(lock, step)); } } }
class JobController { public static final Duration maxHistoryAge = Duration.ofDays(60); private static final Logger log = Logger.getLogger(JobController.class.getName()); private final int historyLength; private final Controller controller; private final CuratorDb curator; private final BufferedLogStore logs; private final TesterCloud cloud; private final JobMetrics metric; private final AtomicReference<Consumer<Run>> runner = new AtomicReference<>(__ -> { }); public JobController(Controller controller) { this.historyLength = controller.system().isCd() ? 256 : 64; this.controller = controller; this.curator = controller.curator(); this.logs = new BufferedLogStore(curator, controller.serviceRegistry().runDataStore()); this.cloud = controller.serviceRegistry().testerCloud(); this.metric = new JobMetrics(controller.metric(), controller::system); } public TesterCloud cloud() { return cloud; } public int historyLength() { return historyLength; } public void setRunner(Consumer<Run> runner) { this.runner.set(runner); } /** Rewrite all job data with the newest format. */ public void updateStorage() { for (ApplicationId id : instances()) for (JobType type : jobs(id)) { locked(id, type, runs -> { curator.readLastRun(id, type).ifPresent(curator::writeLastRun); }); } } /** Returns all entries currently logged for the given run. */ public Optional<RunLog> details(RunId id) { return details(id, -1); } /** Returns the logged entries for the given run, which are after the given id threshold. */ public Optional<RunLog> details(RunId id, long after) { try (Mutex __ = curator.lock(id.application(), id.type())) { Run run = runs(id.application(), id.type()).get(id); if (run == null) return Optional.empty(); return active(id).isPresent() ? Optional.of(logs.readActive(id.application(), id.type(), after)) : logs.readFinished(id, after); } } /** Stores the given log entries for the given run and step. */ public void log(RunId id, Step step, List<LogEntry> entries) { locked(id, __ -> { logs.append(id.application(), id.type(), step, entries, true); return __; }); } /** Stores the given log messages for the given run and step. */ public void log(RunId id, Step step, Level level, List<String> messages) { log(id, step, messages.stream() .map(message -> new LogEntry(0, controller.clock().instant(), LogEntry.typeOf(level), message)) .collect(toList())); } /** Stores the given log message for the given run and step. */ public void log(RunId id, Step step, Level level, String message) { log(id, step, level, Collections.singletonList(message)); } /** Fetches any new Vespa log entries, and records the timestamp of the last of these, for continuation. */ public void updateVespaLog(RunId id) { locked(id, run -> { if ( ! run.hasStep(copyVespaLogs)) return run; storeVespaLogs(id); ZoneId zone = id.type().zone(); Optional<Deployment> deployment = Optional.ofNullable(controller.applications().requireInstance(id.application()) .deployments().get(zone)); if (deployment.isEmpty() || deployment.get().at().isBefore(run.start())) return run; List<LogEntry> log; Instant deployedAt; Instant from; if ( ! run.id().type().isProduction()) { deployedAt = run.stepInfo(installInitialReal).or(() -> run.stepInfo(installReal)).flatMap(StepInfo::startTime).orElseThrow(); from = run.lastVespaLogTimestamp().isAfter(run.start()) ? run.lastVespaLogTimestamp() : deployedAt.minusSeconds(10); log = LogEntry.parseVespaLog(controller.serviceRegistry().configServer() .getLogs(new DeploymentId(id.application(), zone), Map.of("from", Long.toString(from.toEpochMilli()))), from); } else log = List.of(); if (id.type().isTest()) { deployedAt = run.stepInfo(installTester).flatMap(StepInfo::startTime).orElseThrow(); from = run.lastVespaLogTimestamp().isAfter(run.start()) ? run.lastVespaLogTimestamp() : deployedAt.minusSeconds(10); List<LogEntry> testerLog = LogEntry.parseVespaLog(controller.serviceRegistry().configServer() .getLogs(new DeploymentId(id.tester().id(), zone), Map.of("from", Long.toString(from.toEpochMilli()))), from); Instant justNow = controller.clock().instant().minusSeconds(2); log = Stream.concat(log.stream(), testerLog.stream()) .filter(entry -> entry.at().isBefore(justNow)) .sorted(comparing(LogEntry::at)) .collect(toUnmodifiableList()); } if (log.isEmpty()) return run; logs.append(id.application(), id.type(), Step.copyVespaLogs, log, false); return run.with(log.get(log.size() - 1).at()); }); } public InputStream getVespaLogs(RunId id, long fromMillis, boolean tester) { Run run = run(id); return run.stepStatus(copyVespaLogs).map(succeeded::equals).orElse(false) ? controller.serviceRegistry().runDataStore().getLogs(id, tester) : getVespaLogsFromLogserver(run, fromMillis, tester).orElse(InputStream.nullInputStream()); } public static Optional<Instant> deploymentCompletedAt(Run run, boolean tester) { return (tester ? run.stepInfo(installTester) : run.stepInfo(installInitialReal).or(() -> run.stepInfo(installReal))) .flatMap(StepInfo::startTime).map(start -> start.minusSeconds(10)); } public void storeVespaLogs(RunId id) { Run run = run(id); if ( ! id.type().isProduction()) { getVespaLogsFromLogserver(run, 0, false).ifPresent(logs -> { try (logs) { controller.serviceRegistry().runDataStore().putLogs(id, false, logs); } catch (IOException e) { throw new UncheckedIOException(e); } }); } if (id.type().isTest()) { getVespaLogsFromLogserver(run, 0, true).ifPresent(logs -> { try (logs) { controller.serviceRegistry().runDataStore().putLogs(id, true, logs); } catch(IOException e){ throw new UncheckedIOException(e); } }); } } private Optional<InputStream> getVespaLogsFromLogserver(Run run, long fromMillis, boolean tester) { return deploymentCompletedAt(run, tester).map(at -> controller.serviceRegistry().configServer().getLogs(new DeploymentId(tester ? run.id().tester().id() : run.id().application(), run.id().type().zone()), Map.of("from", Long.toString(Math.max(fromMillis, at.toEpochMilli())), "to", Long.toString(run.end().orElse(controller.clock().instant()).toEpochMilli())))); } /** Fetches any new test log entries, and records the id of the last of these, for continuation. */ public void updateTestLog(RunId id) { locked(id, run -> { Optional<Step> step = Stream.of(endStagingSetup, endTests) .filter(run.readySteps()::contains) .findAny(); if (step.isEmpty()) return run; List<LogEntry> entries = cloud.getLog(new DeploymentId(id.tester().id(), id.type().zone()), run.lastTestLogEntry()); if (entries.isEmpty()) return run; logs.append(id.application(), id.type(), step.get(), entries, false); return run.with(entries.stream().mapToLong(LogEntry::id).max().getAsLong()); }); } public void updateTestReport(RunId id) { locked(id, run -> { Optional<TestReport> report = cloud.getTestReport(new DeploymentId(id.tester().id(), id.type().zone())); if (report.isEmpty()) { return run; } logs.writeTestReport(id, report.get()); return run; }); } public Optional<String> getTestReports(RunId id) { return logs.readTestReports(id); } /** Stores the given certificate as the tester certificate for this run, or throws if it's already set. */ public void storeTesterCertificate(RunId id, X509Certificate testerCertificate) { locked(id, run -> run.with(testerCertificate)); } /** Returns a list of all instances of applications which have registered. */ public List<ApplicationId> instances() { return controller.applications().readable().stream() .flatMap(application -> application.instances().values().stream()) .map(Instance::id) .collect(toUnmodifiableList()); } /** Returns all job types which have been run for the given application. */ private List<JobType> jobs(ApplicationId id) { return JobType.allIn(controller.zoneRegistry()).stream() .filter(type -> last(id, type).isPresent()) .collect(toUnmodifiableList()); } /** Returns an immutable map of all known runs for the given application and job type. */ public NavigableMap<RunId, Run> runs(JobId id) { return runs(id.application(), id.type()); } /** Lists the start time of non-redeployment runs of the given job, in order of increasing age. */ public List<Instant> jobStarts(JobId id) { return runs(id).descendingMap().values().stream() .filter(run -> ! run.isRedeployment()) .map(Run::start) .collect(toUnmodifiableList()); } /** Returns when given deployment last started deploying, falling back to time of deployment if it cannot be determined from job runs */ public Instant lastDeploymentStart(ApplicationId instanceId, Deployment deployment) { return jobStarts(new JobId(instanceId, JobType.deploymentTo(deployment.zone()))).stream() .findFirst() .orElseGet(deployment::at); } /** Returns an immutable map of all known runs for the given application and job type. */ public NavigableMap<RunId, Run> runs(ApplicationId id, JobType type) { ImmutableSortedMap.Builder<RunId, Run> runs = ImmutableSortedMap.orderedBy(Comparator.comparing(RunId::number)); Optional<Run> last = last(id, type); curator.readHistoricRuns(id, type).forEach((runId, run) -> { if (last.isEmpty() || ! runId.equals(last.get().id())) runs.put(runId, run); }); last.ifPresent(run -> runs.put(run.id(), run)); return runs.build(); } /** Returns the run with the given id, or throws if no such run exists. */ public Run run(RunId id) { return runs(id.application(), id.type()).values().stream() .filter(run -> run.id().equals(id)) .findAny() .orElseThrow(() -> new NoSuchElementException("no run with id '" + id + "' exists")); } /** Returns the last run of the given type, for the given application, if one has been run. */ public Optional<Run> last(JobId job) { return curator.readLastRun(job.application(), job.type()); } /** Returns the last run of the given type, for the given application, if one has been run. */ public Optional<Run> last(ApplicationId id, JobType type) { return curator.readLastRun(id, type); } /** Returns the last completed of the given job. */ public Optional<Run> lastCompleted(JobId id) { return JobStatus.lastCompleted(runs(id)); } /** Returns the first failing of the given job. */ public Optional<Run> firstFailing(JobId id) { return JobStatus.firstFailing(runs(id)); } /** Returns the last success of the given job. */ public Optional<Run> lastSuccess(JobId id) { return JobStatus.lastSuccess(runs(id)); } /** Returns the run with the given id, provided it is still active. */ public Optional<Run> active(RunId id) { return last(id.application(), id.type()) .filter(run -> ! run.hasEnded()) .filter(run -> run.id().equals(id)); } /** Returns a list of all active runs. */ public List<Run> active() { return controller.applications().idList().stream() .flatMap(id -> active(id).stream()) .toList(); } /** Returns a list of all active runs for the given application. */ public List<Run> active(TenantAndApplicationId id) { return controller.applications().requireApplication(id).instances().keySet().stream() .flatMap(name -> JobType.allIn(controller.zoneRegistry()).stream() .map(type -> last(id.instance(name), type)) .flatMap(Optional::stream) .filter(run -> ! run.hasEnded())) .toList(); } /** Returns a list of all active runs for the given instance. */ public List<Run> active(ApplicationId id) { return JobType.allIn(controller.zoneRegistry()).stream() .map(type -> last(id, type)) .flatMap(Optional::stream) .filter(run -> !run.hasEnded()) .toList(); } /** Returns the job status of the given job, possibly empty. */ public JobStatus jobStatus(JobId id) { return new JobStatus(id, runs(id)); } /** Returns the deployment status of the given application. */ public DeploymentStatus deploymentStatus(Application application) { VersionStatus versionStatus = controller.readVersionStatus(); return deploymentStatus(application, versionStatus, controller.systemVersion(versionStatus)); } private DeploymentStatus deploymentStatus(Application application, VersionStatus versionStatus, Version systemVersion) { return new DeploymentStatus(application, this::jobStatus, controller.zoneRegistry(), versionStatus, systemVersion, instance -> controller.applications().versionCompatibility(application.id().instance(instance)), controller.clock().instant()); } /** Adds deployment status to each of the given applications. */ public DeploymentStatusList deploymentStatuses(ApplicationList applications, VersionStatus versionStatus) { Version systemVersion = controller.systemVersion(versionStatus); return DeploymentStatusList.from(applications.asList().stream() .map(application -> deploymentStatus(application, versionStatus, systemVersion)) .toList()); } /** Adds deployment status to each of the given applications. Calling this will do an implicit read of the controller's version status */ public DeploymentStatusList deploymentStatuses(ApplicationList applications) { VersionStatus versionStatus = controller.readVersionStatus(); return deploymentStatuses(applications, versionStatus); } /** Changes the status of the given step, for the given run, provided it is still active. */ public void update(RunId id, RunStatus status, LockedStep step) { locked(id, run -> run.with(status, step)); } /** * Changes the status of the given run to inactive, and stores it as a historic run. * Throws TimeoutException if some step in this job is still being run. */ public void finish(RunId id) throws TimeoutException { Deque<Mutex> locks = new ArrayDeque<>(); try { Run unlockedRun = run(id); locks.push(curator.lock(id.application(), id.type(), report)); for (Step step : report.allPrerequisites(unlockedRun.steps().keySet())) locks.push(curator.lock(id.application(), id.type(), step)); locked(id, run -> { if (run.status() == reset) { for (Step step : run.steps().keySet()) log(id, step, INFO, List.of(" return run.reset(); } if (run.status() == running && run.stepStatuses().values().stream().anyMatch(not(succeeded::equals))) return run; Run finishedRun = run.finished(controller.clock().instant()); locked(id.application(), id.type(), runs -> { runs.put(run.id(), finishedRun); long last = id.number(); long successes = runs.values().stream().filter(Run::hasSucceeded).count(); var oldEntries = runs.entrySet().iterator(); for (var old = oldEntries.next(); old.getKey().number() <= last - historyLength || old.getValue().start().isBefore(controller.clock().instant().minus(maxHistoryAge)); old = oldEntries.next()) { if ( successes == 1 && old.getValue().hasSucceeded() && ! old.getValue().start().isBefore(controller.clock().instant().minus(maxHistoryAge))) { oldEntries.next(); continue; } logs.delete(old.getKey()); oldEntries.remove(); } }); logs.flush(id); metric.jobFinished(run.id().job(), finishedRun.status()); pruneRevisions(unlockedRun); return finishedRun; }); } finally { for (Mutex lock : locks) { try { lock.close(); } catch (Throwable t) { log.log(WARNING, "Failed to close the lock " + lock + ": the lock may or may not " + "have been released in ZooKeeper, and if not this controller " + "must be restarted to release the lock", t); } } } } /** Marks the given run as aborted; no further normal steps will run, but run-always steps will try to succeed. */ public void abort(RunId id, String reason) { locked(id, run -> { run.stepStatuses().entrySet().stream() .filter(entry -> entry.getValue() == unfinished) .forEach(entry -> log(id, entry.getKey(), INFO, "Aborting run: " + reason)); return run.aborted(); }); } /** Accepts and stores a new application package and test jar pair under a generated application version key. */ public ApplicationVersion submit(TenantAndApplicationId id, Submission submission, long projectId) { ApplicationController applications = controller.applications(); AtomicReference<ApplicationVersion> version = new AtomicReference<>(); applications.lockApplicationOrThrow(id, application -> { Optional<ApplicationVersion> previousVersion = application.get().revisions().last(); Optional<ApplicationPackage> previousPackage = previousVersion.flatMap(previous -> applications.applicationStore().find(id.tenant(), id.application(), previous.buildNumber().getAsLong())) .map(ApplicationPackage::new); long previousBuild = previousVersion.map(latestVersion -> latestVersion.buildNumber().getAsLong()).orElse(0L); version.set(submission.toApplicationVersion(1 + previousBuild)); byte[] diff = previousPackage.map(previous -> ApplicationPackageDiff.diff(previous, submission.applicationPackage())) .orElseGet(() -> ApplicationPackageDiff.diffAgainstEmpty(submission.applicationPackage())); applications.applicationStore().put(id.tenant(), id.application(), version.get().id(), submission.applicationPackage().zippedContent(), submission.testPackage(), diff); applications.applicationStore().putMeta(id.tenant(), id.application(), controller.clock().instant(), submission.applicationPackage().metaDataZip()); application = application.withProjectId(projectId == -1 ? OptionalLong.empty() : OptionalLong.of(projectId)); application = application.withRevisions(revisions -> revisions.with(version.get())); application = withPrunedPackages(application, version.get().id()); validate(id, submission); applications.storeWithUpdatedConfig(application, submission.applicationPackage()); if (application.get().projectId().isPresent()) applications.deploymentTrigger().triggerNewRevision(id); }); return version.get(); } private void validate(TenantAndApplicationId id, Submission submission) { validateTests(id, submission); validateParentVersion(id, submission); validateMajorVersion(id, submission); } private void validateTests(TenantAndApplicationId id, Submission submission) { TestSummary testSummary = TestPackage.validateTests(submission.applicationPackage().deploymentSpec(), submission.testPackage()); if (testSummary.problems().isEmpty()) controller.notificationsDb().removeNotification(NotificationSource.from(id), Type.testPackage); else controller.notificationsDb().setNotification(NotificationSource.from(id), Type.testPackage, Notification.Level.warning, testSummary.problems()); } private void validateParentVersion(TenantAndApplicationId id, Submission submission) { submission.applicationPackage().parentVersion().ifPresent(parent -> { if (parent.getMajor() < controller.readSystemVersion().getMajor()) controller.notificationsDb().setNotification(NotificationSource.from(id), Type.submission, Notification.Level.warning, "Parent version used to compile the application is on a " + "lower major version than the current Vespa Cloud version"); else controller.notificationsDb().removeNotification(NotificationSource.from(id), Type.submission); }); } private LockedApplication withPrunedPackages(LockedApplication application, RevisionId latest){ TenantAndApplicationId id = application.get().id(); Application wrapped = application.get(); RevisionId oldestDeployed = application.get().oldestDeployedRevision() .or(() -> wrapped.instances().values().stream() .flatMap(instance -> instance.change().revision().stream()) .min(naturalOrder())) .orElse(latest); controller.applications().applicationStore().prune(id.tenant(), id.application(), oldestDeployed); for (ApplicationVersion version : application.get().revisions().withPackage()) if (version.id().compareTo(oldestDeployed) < 0) application = application.withRevisions(revisions -> revisions.with(version.withoutPackage())); return application; } /** Forget revisions no longer present in any relevant job history. */ private void pruneRevisions(Run run) { TenantAndApplicationId applicationId = TenantAndApplicationId.from(run.id().application()); boolean isProduction = run.versions().targetRevision().isProduction(); (isProduction ? deploymentStatus(controller.applications().requireApplication(applicationId)).jobs().asList().stream() : Stream.of(jobStatus(run.id().job()))) .flatMap(jobs -> jobs.runs().values().stream()) .map(r -> r.versions().targetRevision()) .filter(id -> id.isProduction() == isProduction) .min(naturalOrder()) .ifPresent(oldestRevision -> { controller.applications().lockApplicationOrThrow(applicationId, application -> { if (isProduction) { controller.applications().applicationStore().pruneDiffs(run.id().application().tenant(), run.id().application().application(), oldestRevision.number()); controller.applications().store(application.withRevisions(revisions -> revisions.withoutOlderThan(oldestRevision))); } else { controller.applications().applicationStore().pruneDevDiffs(new DeploymentId(run.id().application(), run.id().job().type().zone()), oldestRevision.number()); controller.applications().store(application.withRevisions(revisions -> revisions.withoutOlderThan(oldestRevision, run.id().job()))); } }); }); } /** Orders a run of the given type, or throws an IllegalStateException if that job type is already running. */ public void start(ApplicationId id, JobType type, Versions versions, boolean isRedeployment, Optional<String> reason) { start(id, type, versions, isRedeployment, JobProfile.of(type), reason); } /** Orders a run of the given type, or throws an IllegalStateException if that job type is already running. */ public void start(ApplicationId id, JobType type, Versions versions, boolean isRedeployment, JobProfile profile, Optional<String> reason) { ApplicationVersion revision = controller.applications().requireApplication(TenantAndApplicationId.from(id)).revisions().get(versions.targetRevision()); if (revision.compileVersion() .map(version -> controller.applications().versionCompatibility(id).refuse(versions.targetPlatform(), version)) .orElse(false)) throw new IllegalArgumentException("Will not start " + type + " for " + id + " with incompatible platform version (" + versions.targetPlatform() + ") " + "and compile versions (" + revision.compileVersion().get() + ")"); locked(id, type, __ -> { Optional<Run> last = last(id, type); if (last.flatMap(run -> active(run.id())).isPresent()) throw new IllegalArgumentException("Cannot start " + type + " for " + id + "; it is already running!"); RunId newId = new RunId(id, type, last.map(run -> run.id().number()).orElse(0L) + 1); curator.writeLastRun(Run.initial(newId, versions, isRedeployment, controller.clock().instant(), profile, reason)); metric.jobStarted(newId.job()); }); } /** Stores the given package and starts a deployment of it, after aborting any such ongoing deployment. */ public void deploy(ApplicationId id, JobType type, Optional<Version> platform, ApplicationPackage applicationPackage) { deploy(id, type, platform, applicationPackage, false); } /** Stores the given package and starts a deployment of it, after aborting any such ongoing deployment.*/ public void deploy(ApplicationId id, JobType type, Optional<Version> platform, ApplicationPackage applicationPackage, boolean dryRun) { if ( ! controller.zoneRegistry().hasZone(type.zone())) throw new IllegalArgumentException(type.zone() + " is not present in this system"); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { if ( ! application.get().instances().containsKey(id.instance())) application = controller.applications().withNewInstance(application, id); controller.applications().store(application); }); DeploymentId deploymentId = new DeploymentId(id, type.zone()); Optional<Run> lastRun = last(id, type); lastRun.filter(run -> ! run.hasEnded()).ifPresent(run -> abortAndWait(run.id(), Duration.ofMinutes(2))); long build = 1 + lastRun.map(run -> run.versions().targetRevision().number()).orElse(0L); RevisionId revisionId = RevisionId.forDevelopment(build, new JobId(id, type)); ApplicationVersion version = ApplicationVersion.forDevelopment(revisionId, applicationPackage.compileVersion()); byte[] diff = getDiff(applicationPackage, deploymentId, lastRun); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { controller.applications().applicationStore().putDev(deploymentId, version.id(), applicationPackage.zippedContent(), diff); Version targetPlatform = platform.orElseGet(() -> findTargetPlatform(applicationPackage, deploymentId, application.get().get(id.instance()))); controller.applications().store(application.withRevisions(revisions -> revisions.with(version))); start(id, type, new Versions(targetPlatform, version.id(), lastRun.map(run -> run.versions().targetPlatform()), lastRun.map(run -> run.versions().targetRevision())), false, dryRun ? JobProfile.developmentDryRun : JobProfile.development, Optional.empty()); }); locked(id, type, __ -> { runner.get().accept(last(id, type).get()); }); } /* Application package diff against previous version, or against empty version if previous does not exist or is invalid */ private byte[] getDiff(ApplicationPackage applicationPackage, DeploymentId deploymentId, Optional<Run> lastRun) { return lastRun.map(run -> run.versions().targetRevision()) .map(prevVersion -> { ApplicationPackage previous; try { previous = new ApplicationPackage(controller.applications().applicationStore().get(deploymentId, prevVersion)); } catch (IllegalArgumentException e) { return ApplicationPackageDiff.diffAgainstEmpty(applicationPackage); } return ApplicationPackageDiff.diff(previous, applicationPackage); }) .orElseGet(() -> ApplicationPackageDiff.diffAgainstEmpty(applicationPackage)); } private Version findTargetPlatform(ApplicationPackage applicationPackage, DeploymentId id, Optional<Instance> instance) { List<Version> versions = controller.readVersionStatus().deployableVersions().stream() .map(VespaVersion::versionNumber) .collect(toList()); instance.map(Instance::deployments) .map(deployments -> deployments.get(id.zoneId())) .map(Deployment::version) .ifPresent(versions::add); if (versions.isEmpty()) throw new IllegalStateException("no deployable platform version found in the system"); VersionCompatibility compatibility = controller.applications().versionCompatibility(id.applicationId()); List<Version> compatibleVersions = new ArrayList<>(); for (Version target : reversed(versions)) if (applicationPackage.compileVersion().isEmpty() || compatibility.accept(target, applicationPackage.compileVersion().get())) compatibleVersions.add(target); if (compatibleVersions.isEmpty()) throw new IllegalArgumentException("no platforms are compatible with compile version " + applicationPackage.compileVersion().get()); Optional<Integer> major = applicationPackage.deploymentSpec().majorVersion(); List<Version> versionOnRightMajor = new ArrayList<>(); for (Version target : reversed(versions)) if (major.isEmpty() || major.get() == target.getMajor()) versionOnRightMajor.add(target); if (versionOnRightMajor.isEmpty()) throw new IllegalArgumentException("no platforms were found for major version " + major.get() + " specified in deployment.xml"); for (Version target : compatibleVersions) if (versionOnRightMajor.contains(target)) return target; throw new IllegalArgumentException("no platforms on major version " + major.get() + " specified in deployment.xml " + "are compatible with compile version " + applicationPackage.compileVersion().get()); } /** Aborts a run and waits for it complete. */ private void abortAndWait(RunId id, Duration timeout) { abort(id, "replaced by new deployment"); runner.get().accept(last(id.application(), id.type()).get()); Instant doom = controller.clock().instant().plus(timeout); Duration sleep = Duration.ofMillis(100); while ( ! last(id.application(), id.type()).get().hasEnded()) { if (controller.clock().instant().plus(sleep).isAfter(doom)) throw new UncheckedTimeoutException("timeout waiting for " + id + " to abort and finish"); try { Thread.sleep(sleep.toMillis()); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new RuntimeException(e); } } } /** Deletes run data and tester deployments for applications which are unknown, or no longer built internally. */ public void collectGarbage() { Set<ApplicationId> applicationsToBuild = new HashSet<>(instances()); curator.applicationsWithJobs().stream() .filter(id -> ! applicationsToBuild.contains(id)) .forEach(id -> { try { TesterId tester = TesterId.of(id); for (JobType type : jobs(id)) locked(id, type, deactivateTester, __ -> { try (Mutex ___ = curator.lock(id, type)) { try { deactivateTester(tester, type); } catch (Exception e) { } curator.deleteRunData(id, type); } }); logs.delete(id); curator.deleteRunData(id); } catch (Exception e) { log.log(WARNING, "failed cleaning up after deleted application", e); } }); } public void deactivateTester(TesterId id, JobType type) { controller.serviceRegistry().configServer().deactivate(new DeploymentId(id.id(), type.zone())); } /** Locks all runs and modifies the list of historic runs for the given application and job type. */ private void locked(ApplicationId id, JobType type, Consumer<SortedMap<RunId, Run>> modifications) { try (Mutex __ = curator.lock(id, type)) { SortedMap<RunId, Run> runs = new TreeMap<>(curator.readHistoricRuns(id, type)); modifications.accept(runs); curator.writeHistoricRuns(id, type, runs.values()); } } /** Locks and modifies the run with the given id, provided it is still active. */ public void locked(RunId id, UnaryOperator<Run> modifications) { try (Mutex __ = curator.lock(id.application(), id.type())) { active(id).ifPresent(run -> { Run modified = modifications.apply(run); if (modified != null) curator.writeLastRun(modified); }); } } /** Locks the given step and checks none of its prerequisites are running, then performs the given actions. */ public void locked(ApplicationId id, JobType type, Step step, Consumer<LockedStep> action) throws TimeoutException { try (Mutex lock = curator.lock(id, type, step)) { for (Step prerequisite : step.allPrerequisites(last(id, type).get().steps().keySet())) try (Mutex __ = curator.lock(id, type, prerequisite)) { ; } action.accept(new LockedStep(lock, step)); } } }
Okay, so "applicationPackage" are warnings issued by the config model etc., which are managed centrally already, while "submission" are application package warnings issued by the controller, which are currently not managed centrally.
private void validateMajorVersion(TenantAndApplicationId id, Submission submission) { submission.applicationPackage().deploymentSpec().majorVersion().ifPresent(explicitMajor -> { if (explicitMajor < 8) controller.notificationsDb().setNotification(NotificationSource.from(id), Type.applicationPackage, Notification.Level.warning, "Vespa 7 will soon be end of life, upgrade to Vespa 8 now:" + "https: else controller.notificationsDb().removeNotification(NotificationSource.from(id), Type.applicationPackage); }); }
controller.notificationsDb().setNotification(NotificationSource.from(id),
private void validateMajorVersion(TenantAndApplicationId id, Submission submission) { submission.applicationPackage().deploymentSpec().majorVersion().ifPresent(explicitMajor -> { if (explicitMajor < 8) controller.notificationsDb().setNotification(NotificationSource.from(id), Type.applicationPackage, Notification.Level.warning, "Vespa 7 will soon be end of life, upgrade to Vespa 8 now:" + "https: else controller.notificationsDb().removeNotification(NotificationSource.from(id), Type.applicationPackage); }); }
class JobController { public static final Duration maxHistoryAge = Duration.ofDays(60); private static final Logger log = Logger.getLogger(JobController.class.getName()); private final int historyLength; private final Controller controller; private final CuratorDb curator; private final BufferedLogStore logs; private final TesterCloud cloud; private final JobMetrics metric; private final AtomicReference<Consumer<Run>> runner = new AtomicReference<>(__ -> { }); public JobController(Controller controller) { this.historyLength = controller.system().isCd() ? 256 : 64; this.controller = controller; this.curator = controller.curator(); this.logs = new BufferedLogStore(curator, controller.serviceRegistry().runDataStore()); this.cloud = controller.serviceRegistry().testerCloud(); this.metric = new JobMetrics(controller.metric(), controller::system); } public TesterCloud cloud() { return cloud; } public int historyLength() { return historyLength; } public void setRunner(Consumer<Run> runner) { this.runner.set(runner); } /** Rewrite all job data with the newest format. */ public void updateStorage() { for (ApplicationId id : instances()) for (JobType type : jobs(id)) { locked(id, type, runs -> { curator.readLastRun(id, type).ifPresent(curator::writeLastRun); }); } } /** Returns all entries currently logged for the given run. */ public Optional<RunLog> details(RunId id) { return details(id, -1); } /** Returns the logged entries for the given run, which are after the given id threshold. */ public Optional<RunLog> details(RunId id, long after) { try (Mutex __ = curator.lock(id.application(), id.type())) { Run run = runs(id.application(), id.type()).get(id); if (run == null) return Optional.empty(); return active(id).isPresent() ? Optional.of(logs.readActive(id.application(), id.type(), after)) : logs.readFinished(id, after); } } /** Stores the given log entries for the given run and step. */ public void log(RunId id, Step step, List<LogEntry> entries) { locked(id, __ -> { logs.append(id.application(), id.type(), step, entries, true); return __; }); } /** Stores the given log messages for the given run and step. */ public void log(RunId id, Step step, Level level, List<String> messages) { log(id, step, messages.stream() .map(message -> new LogEntry(0, controller.clock().instant(), LogEntry.typeOf(level), message)) .collect(toList())); } /** Stores the given log message for the given run and step. */ public void log(RunId id, Step step, Level level, String message) { log(id, step, level, Collections.singletonList(message)); } /** Fetches any new Vespa log entries, and records the timestamp of the last of these, for continuation. */ public void updateVespaLog(RunId id) { locked(id, run -> { if ( ! run.hasStep(copyVespaLogs)) return run; storeVespaLogs(id); ZoneId zone = id.type().zone(); Optional<Deployment> deployment = Optional.ofNullable(controller.applications().requireInstance(id.application()) .deployments().get(zone)); if (deployment.isEmpty() || deployment.get().at().isBefore(run.start())) return run; List<LogEntry> log; Instant deployedAt; Instant from; if ( ! run.id().type().isProduction()) { deployedAt = run.stepInfo(installInitialReal).or(() -> run.stepInfo(installReal)).flatMap(StepInfo::startTime).orElseThrow(); from = run.lastVespaLogTimestamp().isAfter(run.start()) ? run.lastVespaLogTimestamp() : deployedAt.minusSeconds(10); log = LogEntry.parseVespaLog(controller.serviceRegistry().configServer() .getLogs(new DeploymentId(id.application(), zone), Map.of("from", Long.toString(from.toEpochMilli()))), from); } else log = List.of(); if (id.type().isTest()) { deployedAt = run.stepInfo(installTester).flatMap(StepInfo::startTime).orElseThrow(); from = run.lastVespaLogTimestamp().isAfter(run.start()) ? run.lastVespaLogTimestamp() : deployedAt.minusSeconds(10); List<LogEntry> testerLog = LogEntry.parseVespaLog(controller.serviceRegistry().configServer() .getLogs(new DeploymentId(id.tester().id(), zone), Map.of("from", Long.toString(from.toEpochMilli()))), from); Instant justNow = controller.clock().instant().minusSeconds(2); log = Stream.concat(log.stream(), testerLog.stream()) .filter(entry -> entry.at().isBefore(justNow)) .sorted(comparing(LogEntry::at)) .collect(toUnmodifiableList()); } if (log.isEmpty()) return run; logs.append(id.application(), id.type(), Step.copyVespaLogs, log, false); return run.with(log.get(log.size() - 1).at()); }); } public InputStream getVespaLogs(RunId id, long fromMillis, boolean tester) { Run run = run(id); return run.stepStatus(copyVespaLogs).map(succeeded::equals).orElse(false) ? controller.serviceRegistry().runDataStore().getLogs(id, tester) : getVespaLogsFromLogserver(run, fromMillis, tester).orElse(InputStream.nullInputStream()); } public static Optional<Instant> deploymentCompletedAt(Run run, boolean tester) { return (tester ? run.stepInfo(installTester) : run.stepInfo(installInitialReal).or(() -> run.stepInfo(installReal))) .flatMap(StepInfo::startTime).map(start -> start.minusSeconds(10)); } public void storeVespaLogs(RunId id) { Run run = run(id); if ( ! id.type().isProduction()) { getVespaLogsFromLogserver(run, 0, false).ifPresent(logs -> { try (logs) { controller.serviceRegistry().runDataStore().putLogs(id, false, logs); } catch (IOException e) { throw new UncheckedIOException(e); } }); } if (id.type().isTest()) { getVespaLogsFromLogserver(run, 0, true).ifPresent(logs -> { try (logs) { controller.serviceRegistry().runDataStore().putLogs(id, true, logs); } catch(IOException e){ throw new UncheckedIOException(e); } }); } } private Optional<InputStream> getVespaLogsFromLogserver(Run run, long fromMillis, boolean tester) { return deploymentCompletedAt(run, tester).map(at -> controller.serviceRegistry().configServer().getLogs(new DeploymentId(tester ? run.id().tester().id() : run.id().application(), run.id().type().zone()), Map.of("from", Long.toString(Math.max(fromMillis, at.toEpochMilli())), "to", Long.toString(run.end().orElse(controller.clock().instant()).toEpochMilli())))); } /** Fetches any new test log entries, and records the id of the last of these, for continuation. */ public void updateTestLog(RunId id) { locked(id, run -> { Optional<Step> step = Stream.of(endStagingSetup, endTests) .filter(run.readySteps()::contains) .findAny(); if (step.isEmpty()) return run; List<LogEntry> entries = cloud.getLog(new DeploymentId(id.tester().id(), id.type().zone()), run.lastTestLogEntry()); if (entries.isEmpty()) return run; logs.append(id.application(), id.type(), step.get(), entries, false); return run.with(entries.stream().mapToLong(LogEntry::id).max().getAsLong()); }); } public void updateTestReport(RunId id) { locked(id, run -> { Optional<TestReport> report = cloud.getTestReport(new DeploymentId(id.tester().id(), id.type().zone())); if (report.isEmpty()) { return run; } logs.writeTestReport(id, report.get()); return run; }); } public Optional<String> getTestReports(RunId id) { return logs.readTestReports(id); } /** Stores the given certificate as the tester certificate for this run, or throws if it's already set. */ public void storeTesterCertificate(RunId id, X509Certificate testerCertificate) { locked(id, run -> run.with(testerCertificate)); } /** Returns a list of all instances of applications which have registered. */ public List<ApplicationId> instances() { return controller.applications().readable().stream() .flatMap(application -> application.instances().values().stream()) .map(Instance::id) .collect(toUnmodifiableList()); } /** Returns all job types which have been run for the given application. */ private List<JobType> jobs(ApplicationId id) { return JobType.allIn(controller.zoneRegistry()).stream() .filter(type -> last(id, type).isPresent()) .collect(toUnmodifiableList()); } /** Returns an immutable map of all known runs for the given application and job type. */ public NavigableMap<RunId, Run> runs(JobId id) { return runs(id.application(), id.type()); } /** Lists the start time of non-redeployment runs of the given job, in order of increasing age. */ public List<Instant> jobStarts(JobId id) { return runs(id).descendingMap().values().stream() .filter(run -> ! run.isRedeployment()) .map(Run::start) .collect(toUnmodifiableList()); } /** Returns when given deployment last started deploying, falling back to time of deployment if it cannot be determined from job runs */ public Instant lastDeploymentStart(ApplicationId instanceId, Deployment deployment) { return jobStarts(new JobId(instanceId, JobType.deploymentTo(deployment.zone()))).stream() .findFirst() .orElseGet(deployment::at); } /** Returns an immutable map of all known runs for the given application and job type. */ public NavigableMap<RunId, Run> runs(ApplicationId id, JobType type) { ImmutableSortedMap.Builder<RunId, Run> runs = ImmutableSortedMap.orderedBy(Comparator.comparing(RunId::number)); Optional<Run> last = last(id, type); curator.readHistoricRuns(id, type).forEach((runId, run) -> { if (last.isEmpty() || ! runId.equals(last.get().id())) runs.put(runId, run); }); last.ifPresent(run -> runs.put(run.id(), run)); return runs.build(); } /** Returns the run with the given id, or throws if no such run exists. */ public Run run(RunId id) { return runs(id.application(), id.type()).values().stream() .filter(run -> run.id().equals(id)) .findAny() .orElseThrow(() -> new NoSuchElementException("no run with id '" + id + "' exists")); } /** Returns the last run of the given type, for the given application, if one has been run. */ public Optional<Run> last(JobId job) { return curator.readLastRun(job.application(), job.type()); } /** Returns the last run of the given type, for the given application, if one has been run. */ public Optional<Run> last(ApplicationId id, JobType type) { return curator.readLastRun(id, type); } /** Returns the last completed of the given job. */ public Optional<Run> lastCompleted(JobId id) { return JobStatus.lastCompleted(runs(id)); } /** Returns the first failing of the given job. */ public Optional<Run> firstFailing(JobId id) { return JobStatus.firstFailing(runs(id)); } /** Returns the last success of the given job. */ public Optional<Run> lastSuccess(JobId id) { return JobStatus.lastSuccess(runs(id)); } /** Returns the run with the given id, provided it is still active. */ public Optional<Run> active(RunId id) { return last(id.application(), id.type()) .filter(run -> ! run.hasEnded()) .filter(run -> run.id().equals(id)); } /** Returns a list of all active runs. */ public List<Run> active() { return controller.applications().idList().stream() .flatMap(id -> active(id).stream()) .toList(); } /** Returns a list of all active runs for the given application. */ public List<Run> active(TenantAndApplicationId id) { return controller.applications().requireApplication(id).instances().keySet().stream() .flatMap(name -> JobType.allIn(controller.zoneRegistry()).stream() .map(type -> last(id.instance(name), type)) .flatMap(Optional::stream) .filter(run -> ! run.hasEnded())) .toList(); } /** Returns a list of all active runs for the given instance. */ public List<Run> active(ApplicationId id) { return JobType.allIn(controller.zoneRegistry()).stream() .map(type -> last(id, type)) .flatMap(Optional::stream) .filter(run -> !run.hasEnded()) .toList(); } /** Returns the job status of the given job, possibly empty. */ public JobStatus jobStatus(JobId id) { return new JobStatus(id, runs(id)); } /** Returns the deployment status of the given application. */ public DeploymentStatus deploymentStatus(Application application) { VersionStatus versionStatus = controller.readVersionStatus(); return deploymentStatus(application, versionStatus, controller.systemVersion(versionStatus)); } private DeploymentStatus deploymentStatus(Application application, VersionStatus versionStatus, Version systemVersion) { return new DeploymentStatus(application, this::jobStatus, controller.zoneRegistry(), versionStatus, systemVersion, instance -> controller.applications().versionCompatibility(application.id().instance(instance)), controller.clock().instant()); } /** Adds deployment status to each of the given applications. */ public DeploymentStatusList deploymentStatuses(ApplicationList applications, VersionStatus versionStatus) { Version systemVersion = controller.systemVersion(versionStatus); return DeploymentStatusList.from(applications.asList().stream() .map(application -> deploymentStatus(application, versionStatus, systemVersion)) .toList()); } /** Adds deployment status to each of the given applications. Calling this will do an implicit read of the controller's version status */ public DeploymentStatusList deploymentStatuses(ApplicationList applications) { VersionStatus versionStatus = controller.readVersionStatus(); return deploymentStatuses(applications, versionStatus); } /** Changes the status of the given step, for the given run, provided it is still active. */ public void update(RunId id, RunStatus status, LockedStep step) { locked(id, run -> run.with(status, step)); } /** * Changes the status of the given run to inactive, and stores it as a historic run. * Throws TimeoutException if some step in this job is still being run. */ public void finish(RunId id) throws TimeoutException { Deque<Mutex> locks = new ArrayDeque<>(); try { Run unlockedRun = run(id); locks.push(curator.lock(id.application(), id.type(), report)); for (Step step : report.allPrerequisites(unlockedRun.steps().keySet())) locks.push(curator.lock(id.application(), id.type(), step)); locked(id, run -> { if (run.status() == reset) { for (Step step : run.steps().keySet()) log(id, step, INFO, List.of(" return run.reset(); } if (run.status() == running && run.stepStatuses().values().stream().anyMatch(not(succeeded::equals))) return run; Run finishedRun = run.finished(controller.clock().instant()); locked(id.application(), id.type(), runs -> { runs.put(run.id(), finishedRun); long last = id.number(); long successes = runs.values().stream().filter(Run::hasSucceeded).count(); var oldEntries = runs.entrySet().iterator(); for (var old = oldEntries.next(); old.getKey().number() <= last - historyLength || old.getValue().start().isBefore(controller.clock().instant().minus(maxHistoryAge)); old = oldEntries.next()) { if ( successes == 1 && old.getValue().hasSucceeded() && ! old.getValue().start().isBefore(controller.clock().instant().minus(maxHistoryAge))) { oldEntries.next(); continue; } logs.delete(old.getKey()); oldEntries.remove(); } }); logs.flush(id); metric.jobFinished(run.id().job(), finishedRun.status()); pruneRevisions(unlockedRun); return finishedRun; }); } finally { for (Mutex lock : locks) { try { lock.close(); } catch (Throwable t) { log.log(WARNING, "Failed to close the lock " + lock + ": the lock may or may not " + "have been released in ZooKeeper, and if not this controller " + "must be restarted to release the lock", t); } } } } /** Marks the given run as aborted; no further normal steps will run, but run-always steps will try to succeed. */ public void abort(RunId id, String reason) { locked(id, run -> { run.stepStatuses().entrySet().stream() .filter(entry -> entry.getValue() == unfinished) .forEach(entry -> log(id, entry.getKey(), INFO, "Aborting run: " + reason)); return run.aborted(); }); } /** Accepts and stores a new application package and test jar pair under a generated application version key. */ public ApplicationVersion submit(TenantAndApplicationId id, Submission submission, long projectId) { ApplicationController applications = controller.applications(); AtomicReference<ApplicationVersion> version = new AtomicReference<>(); applications.lockApplicationOrThrow(id, application -> { Optional<ApplicationVersion> previousVersion = application.get().revisions().last(); Optional<ApplicationPackage> previousPackage = previousVersion.flatMap(previous -> applications.applicationStore().find(id.tenant(), id.application(), previous.buildNumber().getAsLong())) .map(ApplicationPackage::new); long previousBuild = previousVersion.map(latestVersion -> latestVersion.buildNumber().getAsLong()).orElse(0L); version.set(submission.toApplicationVersion(1 + previousBuild)); byte[] diff = previousPackage.map(previous -> ApplicationPackageDiff.diff(previous, submission.applicationPackage())) .orElseGet(() -> ApplicationPackageDiff.diffAgainstEmpty(submission.applicationPackage())); applications.applicationStore().put(id.tenant(), id.application(), version.get().id(), submission.applicationPackage().zippedContent(), submission.testPackage(), diff); applications.applicationStore().putMeta(id.tenant(), id.application(), controller.clock().instant(), submission.applicationPackage().metaDataZip()); application = application.withProjectId(projectId == -1 ? OptionalLong.empty() : OptionalLong.of(projectId)); application = application.withRevisions(revisions -> revisions.with(version.get())); application = withPrunedPackages(application, version.get().id()); validate(id, submission); applications.storeWithUpdatedConfig(application, submission.applicationPackage()); if (application.get().projectId().isPresent()) applications.deploymentTrigger().triggerNewRevision(id); }); return version.get(); } private void validate(TenantAndApplicationId id, Submission submission) { validateTests(id, submission); validateParentVersion(id, submission); validateMajorVersion(id, submission); } private void validateTests(TenantAndApplicationId id, Submission submission) { TestSummary testSummary = TestPackage.validateTests(submission.applicationPackage().deploymentSpec(), submission.testPackage()); if (testSummary.problems().isEmpty()) controller.notificationsDb().removeNotification(NotificationSource.from(id), Type.testPackage); else controller.notificationsDb().setNotification(NotificationSource.from(id), Type.testPackage, Notification.Level.warning, testSummary.problems()); } private void validateParentVersion(TenantAndApplicationId id, Submission submission) { submission.applicationPackage().parentVersion().ifPresent(parent -> { if (parent.getMajor() < controller.readSystemVersion().getMajor()) controller.notificationsDb().setNotification(NotificationSource.from(id), Type.submission, Notification.Level.warning, "Parent version used to compile the application is on a " + "lower major version than the current Vespa Cloud version"); else controller.notificationsDb().removeNotification(NotificationSource.from(id), Type.submission); }); } private LockedApplication withPrunedPackages(LockedApplication application, RevisionId latest){ TenantAndApplicationId id = application.get().id(); Application wrapped = application.get(); RevisionId oldestDeployed = application.get().oldestDeployedRevision() .or(() -> wrapped.instances().values().stream() .flatMap(instance -> instance.change().revision().stream()) .min(naturalOrder())) .orElse(latest); controller.applications().applicationStore().prune(id.tenant(), id.application(), oldestDeployed); for (ApplicationVersion version : application.get().revisions().withPackage()) if (version.id().compareTo(oldestDeployed) < 0) application = application.withRevisions(revisions -> revisions.with(version.withoutPackage())); return application; } /** Forget revisions no longer present in any relevant job history. */ private void pruneRevisions(Run run) { TenantAndApplicationId applicationId = TenantAndApplicationId.from(run.id().application()); boolean isProduction = run.versions().targetRevision().isProduction(); (isProduction ? deploymentStatus(controller.applications().requireApplication(applicationId)).jobs().asList().stream() : Stream.of(jobStatus(run.id().job()))) .flatMap(jobs -> jobs.runs().values().stream()) .map(r -> r.versions().targetRevision()) .filter(id -> id.isProduction() == isProduction) .min(naturalOrder()) .ifPresent(oldestRevision -> { controller.applications().lockApplicationOrThrow(applicationId, application -> { if (isProduction) { controller.applications().applicationStore().pruneDiffs(run.id().application().tenant(), run.id().application().application(), oldestRevision.number()); controller.applications().store(application.withRevisions(revisions -> revisions.withoutOlderThan(oldestRevision))); } else { controller.applications().applicationStore().pruneDevDiffs(new DeploymentId(run.id().application(), run.id().job().type().zone()), oldestRevision.number()); controller.applications().store(application.withRevisions(revisions -> revisions.withoutOlderThan(oldestRevision, run.id().job()))); } }); }); } /** Orders a run of the given type, or throws an IllegalStateException if that job type is already running. */ public void start(ApplicationId id, JobType type, Versions versions, boolean isRedeployment, Optional<String> reason) { start(id, type, versions, isRedeployment, JobProfile.of(type), reason); } /** Orders a run of the given type, or throws an IllegalStateException if that job type is already running. */ public void start(ApplicationId id, JobType type, Versions versions, boolean isRedeployment, JobProfile profile, Optional<String> reason) { ApplicationVersion revision = controller.applications().requireApplication(TenantAndApplicationId.from(id)).revisions().get(versions.targetRevision()); if (revision.compileVersion() .map(version -> controller.applications().versionCompatibility(id).refuse(versions.targetPlatform(), version)) .orElse(false)) throw new IllegalArgumentException("Will not start " + type + " for " + id + " with incompatible platform version (" + versions.targetPlatform() + ") " + "and compile versions (" + revision.compileVersion().get() + ")"); locked(id, type, __ -> { Optional<Run> last = last(id, type); if (last.flatMap(run -> active(run.id())).isPresent()) throw new IllegalArgumentException("Cannot start " + type + " for " + id + "; it is already running!"); RunId newId = new RunId(id, type, last.map(run -> run.id().number()).orElse(0L) + 1); curator.writeLastRun(Run.initial(newId, versions, isRedeployment, controller.clock().instant(), profile, reason)); metric.jobStarted(newId.job()); }); } /** Stores the given package and starts a deployment of it, after aborting any such ongoing deployment. */ public void deploy(ApplicationId id, JobType type, Optional<Version> platform, ApplicationPackage applicationPackage) { deploy(id, type, platform, applicationPackage, false); } /** Stores the given package and starts a deployment of it, after aborting any such ongoing deployment.*/ public void deploy(ApplicationId id, JobType type, Optional<Version> platform, ApplicationPackage applicationPackage, boolean dryRun) { if ( ! controller.zoneRegistry().hasZone(type.zone())) throw new IllegalArgumentException(type.zone() + " is not present in this system"); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { if ( ! application.get().instances().containsKey(id.instance())) application = controller.applications().withNewInstance(application, id); controller.applications().store(application); }); DeploymentId deploymentId = new DeploymentId(id, type.zone()); Optional<Run> lastRun = last(id, type); lastRun.filter(run -> ! run.hasEnded()).ifPresent(run -> abortAndWait(run.id(), Duration.ofMinutes(2))); long build = 1 + lastRun.map(run -> run.versions().targetRevision().number()).orElse(0L); RevisionId revisionId = RevisionId.forDevelopment(build, new JobId(id, type)); ApplicationVersion version = ApplicationVersion.forDevelopment(revisionId, applicationPackage.compileVersion()); byte[] diff = getDiff(applicationPackage, deploymentId, lastRun); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { controller.applications().applicationStore().putDev(deploymentId, version.id(), applicationPackage.zippedContent(), diff); Version targetPlatform = platform.orElseGet(() -> findTargetPlatform(applicationPackage, deploymentId, application.get().get(id.instance()))); controller.applications().store(application.withRevisions(revisions -> revisions.with(version))); start(id, type, new Versions(targetPlatform, version.id(), lastRun.map(run -> run.versions().targetPlatform()), lastRun.map(run -> run.versions().targetRevision())), false, dryRun ? JobProfile.developmentDryRun : JobProfile.development, Optional.empty()); }); locked(id, type, __ -> { runner.get().accept(last(id, type).get()); }); } /* Application package diff against previous version, or against empty version if previous does not exist or is invalid */ private byte[] getDiff(ApplicationPackage applicationPackage, DeploymentId deploymentId, Optional<Run> lastRun) { return lastRun.map(run -> run.versions().targetRevision()) .map(prevVersion -> { ApplicationPackage previous; try { previous = new ApplicationPackage(controller.applications().applicationStore().get(deploymentId, prevVersion)); } catch (IllegalArgumentException e) { return ApplicationPackageDiff.diffAgainstEmpty(applicationPackage); } return ApplicationPackageDiff.diff(previous, applicationPackage); }) .orElseGet(() -> ApplicationPackageDiff.diffAgainstEmpty(applicationPackage)); } private Version findTargetPlatform(ApplicationPackage applicationPackage, DeploymentId id, Optional<Instance> instance) { List<Version> versions = controller.readVersionStatus().deployableVersions().stream() .map(VespaVersion::versionNumber) .collect(toList()); instance.map(Instance::deployments) .map(deployments -> deployments.get(id.zoneId())) .map(Deployment::version) .ifPresent(versions::add); if (versions.isEmpty()) throw new IllegalStateException("no deployable platform version found in the system"); VersionCompatibility compatibility = controller.applications().versionCompatibility(id.applicationId()); List<Version> compatibleVersions = new ArrayList<>(); for (Version target : reversed(versions)) if (applicationPackage.compileVersion().isEmpty() || compatibility.accept(target, applicationPackage.compileVersion().get())) compatibleVersions.add(target); if (compatibleVersions.isEmpty()) throw new IllegalArgumentException("no platforms are compatible with compile version " + applicationPackage.compileVersion().get()); Optional<Integer> major = applicationPackage.deploymentSpec().majorVersion(); List<Version> versionOnRightMajor = new ArrayList<>(); for (Version target : reversed(versions)) if (major.isEmpty() || major.get() == target.getMajor()) versionOnRightMajor.add(target); if (versionOnRightMajor.isEmpty()) throw new IllegalArgumentException("no platforms were found for major version " + major.get() + " specified in deployment.xml"); for (Version target : compatibleVersions) if (versionOnRightMajor.contains(target)) return target; throw new IllegalArgumentException("no platforms on major version " + major.get() + " specified in deployment.xml " + "are compatible with compile version " + applicationPackage.compileVersion().get()); } /** Aborts a run and waits for it complete. */ private void abortAndWait(RunId id, Duration timeout) { abort(id, "replaced by new deployment"); runner.get().accept(last(id.application(), id.type()).get()); Instant doom = controller.clock().instant().plus(timeout); Duration sleep = Duration.ofMillis(100); while ( ! last(id.application(), id.type()).get().hasEnded()) { if (controller.clock().instant().plus(sleep).isAfter(doom)) throw new UncheckedTimeoutException("timeout waiting for " + id + " to abort and finish"); try { Thread.sleep(sleep.toMillis()); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new RuntimeException(e); } } } /** Deletes run data and tester deployments for applications which are unknown, or no longer built internally. */ public void collectGarbage() { Set<ApplicationId> applicationsToBuild = new HashSet<>(instances()); curator.applicationsWithJobs().stream() .filter(id -> ! applicationsToBuild.contains(id)) .forEach(id -> { try { TesterId tester = TesterId.of(id); for (JobType type : jobs(id)) locked(id, type, deactivateTester, __ -> { try (Mutex ___ = curator.lock(id, type)) { try { deactivateTester(tester, type); } catch (Exception e) { } curator.deleteRunData(id, type); } }); logs.delete(id); curator.deleteRunData(id); } catch (Exception e) { log.log(WARNING, "failed cleaning up after deleted application", e); } }); } public void deactivateTester(TesterId id, JobType type) { controller.serviceRegistry().configServer().deactivate(new DeploymentId(id.id(), type.zone())); } /** Locks all runs and modifies the list of historic runs for the given application and job type. */ private void locked(ApplicationId id, JobType type, Consumer<SortedMap<RunId, Run>> modifications) { try (Mutex __ = curator.lock(id, type)) { SortedMap<RunId, Run> runs = new TreeMap<>(curator.readHistoricRuns(id, type)); modifications.accept(runs); curator.writeHistoricRuns(id, type, runs.values()); } } /** Locks and modifies the run with the given id, provided it is still active. */ public void locked(RunId id, UnaryOperator<Run> modifications) { try (Mutex __ = curator.lock(id.application(), id.type())) { active(id).ifPresent(run -> { Run modified = modifications.apply(run); if (modified != null) curator.writeLastRun(modified); }); } } /** Locks the given step and checks none of its prerequisites are running, then performs the given actions. */ public void locked(ApplicationId id, JobType type, Step step, Consumer<LockedStep> action) throws TimeoutException { try (Mutex lock = curator.lock(id, type, step)) { for (Step prerequisite : step.allPrerequisites(last(id, type).get().steps().keySet())) try (Mutex __ = curator.lock(id, type, prerequisite)) { ; } action.accept(new LockedStep(lock, step)); } } }
class JobController { public static final Duration maxHistoryAge = Duration.ofDays(60); private static final Logger log = Logger.getLogger(JobController.class.getName()); private final int historyLength; private final Controller controller; private final CuratorDb curator; private final BufferedLogStore logs; private final TesterCloud cloud; private final JobMetrics metric; private final AtomicReference<Consumer<Run>> runner = new AtomicReference<>(__ -> { }); public JobController(Controller controller) { this.historyLength = controller.system().isCd() ? 256 : 64; this.controller = controller; this.curator = controller.curator(); this.logs = new BufferedLogStore(curator, controller.serviceRegistry().runDataStore()); this.cloud = controller.serviceRegistry().testerCloud(); this.metric = new JobMetrics(controller.metric(), controller::system); } public TesterCloud cloud() { return cloud; } public int historyLength() { return historyLength; } public void setRunner(Consumer<Run> runner) { this.runner.set(runner); } /** Rewrite all job data with the newest format. */ public void updateStorage() { for (ApplicationId id : instances()) for (JobType type : jobs(id)) { locked(id, type, runs -> { curator.readLastRun(id, type).ifPresent(curator::writeLastRun); }); } } /** Returns all entries currently logged for the given run. */ public Optional<RunLog> details(RunId id) { return details(id, -1); } /** Returns the logged entries for the given run, which are after the given id threshold. */ public Optional<RunLog> details(RunId id, long after) { try (Mutex __ = curator.lock(id.application(), id.type())) { Run run = runs(id.application(), id.type()).get(id); if (run == null) return Optional.empty(); return active(id).isPresent() ? Optional.of(logs.readActive(id.application(), id.type(), after)) : logs.readFinished(id, after); } } /** Stores the given log entries for the given run and step. */ public void log(RunId id, Step step, List<LogEntry> entries) { locked(id, __ -> { logs.append(id.application(), id.type(), step, entries, true); return __; }); } /** Stores the given log messages for the given run and step. */ public void log(RunId id, Step step, Level level, List<String> messages) { log(id, step, messages.stream() .map(message -> new LogEntry(0, controller.clock().instant(), LogEntry.typeOf(level), message)) .collect(toList())); } /** Stores the given log message for the given run and step. */ public void log(RunId id, Step step, Level level, String message) { log(id, step, level, Collections.singletonList(message)); } /** Fetches any new Vespa log entries, and records the timestamp of the last of these, for continuation. */ public void updateVespaLog(RunId id) { locked(id, run -> { if ( ! run.hasStep(copyVespaLogs)) return run; storeVespaLogs(id); ZoneId zone = id.type().zone(); Optional<Deployment> deployment = Optional.ofNullable(controller.applications().requireInstance(id.application()) .deployments().get(zone)); if (deployment.isEmpty() || deployment.get().at().isBefore(run.start())) return run; List<LogEntry> log; Instant deployedAt; Instant from; if ( ! run.id().type().isProduction()) { deployedAt = run.stepInfo(installInitialReal).or(() -> run.stepInfo(installReal)).flatMap(StepInfo::startTime).orElseThrow(); from = run.lastVespaLogTimestamp().isAfter(run.start()) ? run.lastVespaLogTimestamp() : deployedAt.minusSeconds(10); log = LogEntry.parseVespaLog(controller.serviceRegistry().configServer() .getLogs(new DeploymentId(id.application(), zone), Map.of("from", Long.toString(from.toEpochMilli()))), from); } else log = List.of(); if (id.type().isTest()) { deployedAt = run.stepInfo(installTester).flatMap(StepInfo::startTime).orElseThrow(); from = run.lastVespaLogTimestamp().isAfter(run.start()) ? run.lastVespaLogTimestamp() : deployedAt.minusSeconds(10); List<LogEntry> testerLog = LogEntry.parseVespaLog(controller.serviceRegistry().configServer() .getLogs(new DeploymentId(id.tester().id(), zone), Map.of("from", Long.toString(from.toEpochMilli()))), from); Instant justNow = controller.clock().instant().minusSeconds(2); log = Stream.concat(log.stream(), testerLog.stream()) .filter(entry -> entry.at().isBefore(justNow)) .sorted(comparing(LogEntry::at)) .collect(toUnmodifiableList()); } if (log.isEmpty()) return run; logs.append(id.application(), id.type(), Step.copyVespaLogs, log, false); return run.with(log.get(log.size() - 1).at()); }); } public InputStream getVespaLogs(RunId id, long fromMillis, boolean tester) { Run run = run(id); return run.stepStatus(copyVespaLogs).map(succeeded::equals).orElse(false) ? controller.serviceRegistry().runDataStore().getLogs(id, tester) : getVespaLogsFromLogserver(run, fromMillis, tester).orElse(InputStream.nullInputStream()); } public static Optional<Instant> deploymentCompletedAt(Run run, boolean tester) { return (tester ? run.stepInfo(installTester) : run.stepInfo(installInitialReal).or(() -> run.stepInfo(installReal))) .flatMap(StepInfo::startTime).map(start -> start.minusSeconds(10)); } public void storeVespaLogs(RunId id) { Run run = run(id); if ( ! id.type().isProduction()) { getVespaLogsFromLogserver(run, 0, false).ifPresent(logs -> { try (logs) { controller.serviceRegistry().runDataStore().putLogs(id, false, logs); } catch (IOException e) { throw new UncheckedIOException(e); } }); } if (id.type().isTest()) { getVespaLogsFromLogserver(run, 0, true).ifPresent(logs -> { try (logs) { controller.serviceRegistry().runDataStore().putLogs(id, true, logs); } catch(IOException e){ throw new UncheckedIOException(e); } }); } } private Optional<InputStream> getVespaLogsFromLogserver(Run run, long fromMillis, boolean tester) { return deploymentCompletedAt(run, tester).map(at -> controller.serviceRegistry().configServer().getLogs(new DeploymentId(tester ? run.id().tester().id() : run.id().application(), run.id().type().zone()), Map.of("from", Long.toString(Math.max(fromMillis, at.toEpochMilli())), "to", Long.toString(run.end().orElse(controller.clock().instant()).toEpochMilli())))); } /** Fetches any new test log entries, and records the id of the last of these, for continuation. */ public void updateTestLog(RunId id) { locked(id, run -> { Optional<Step> step = Stream.of(endStagingSetup, endTests) .filter(run.readySteps()::contains) .findAny(); if (step.isEmpty()) return run; List<LogEntry> entries = cloud.getLog(new DeploymentId(id.tester().id(), id.type().zone()), run.lastTestLogEntry()); if (entries.isEmpty()) return run; logs.append(id.application(), id.type(), step.get(), entries, false); return run.with(entries.stream().mapToLong(LogEntry::id).max().getAsLong()); }); } public void updateTestReport(RunId id) { locked(id, run -> { Optional<TestReport> report = cloud.getTestReport(new DeploymentId(id.tester().id(), id.type().zone())); if (report.isEmpty()) { return run; } logs.writeTestReport(id, report.get()); return run; }); } public Optional<String> getTestReports(RunId id) { return logs.readTestReports(id); } /** Stores the given certificate as the tester certificate for this run, or throws if it's already set. */ public void storeTesterCertificate(RunId id, X509Certificate testerCertificate) { locked(id, run -> run.with(testerCertificate)); } /** Returns a list of all instances of applications which have registered. */ public List<ApplicationId> instances() { return controller.applications().readable().stream() .flatMap(application -> application.instances().values().stream()) .map(Instance::id) .collect(toUnmodifiableList()); } /** Returns all job types which have been run for the given application. */ private List<JobType> jobs(ApplicationId id) { return JobType.allIn(controller.zoneRegistry()).stream() .filter(type -> last(id, type).isPresent()) .collect(toUnmodifiableList()); } /** Returns an immutable map of all known runs for the given application and job type. */ public NavigableMap<RunId, Run> runs(JobId id) { return runs(id.application(), id.type()); } /** Lists the start time of non-redeployment runs of the given job, in order of increasing age. */ public List<Instant> jobStarts(JobId id) { return runs(id).descendingMap().values().stream() .filter(run -> ! run.isRedeployment()) .map(Run::start) .collect(toUnmodifiableList()); } /** Returns when given deployment last started deploying, falling back to time of deployment if it cannot be determined from job runs */ public Instant lastDeploymentStart(ApplicationId instanceId, Deployment deployment) { return jobStarts(new JobId(instanceId, JobType.deploymentTo(deployment.zone()))).stream() .findFirst() .orElseGet(deployment::at); } /** Returns an immutable map of all known runs for the given application and job type. */ public NavigableMap<RunId, Run> runs(ApplicationId id, JobType type) { ImmutableSortedMap.Builder<RunId, Run> runs = ImmutableSortedMap.orderedBy(Comparator.comparing(RunId::number)); Optional<Run> last = last(id, type); curator.readHistoricRuns(id, type).forEach((runId, run) -> { if (last.isEmpty() || ! runId.equals(last.get().id())) runs.put(runId, run); }); last.ifPresent(run -> runs.put(run.id(), run)); return runs.build(); } /** Returns the run with the given id, or throws if no such run exists. */ public Run run(RunId id) { return runs(id.application(), id.type()).values().stream() .filter(run -> run.id().equals(id)) .findAny() .orElseThrow(() -> new NoSuchElementException("no run with id '" + id + "' exists")); } /** Returns the last run of the given type, for the given application, if one has been run. */ public Optional<Run> last(JobId job) { return curator.readLastRun(job.application(), job.type()); } /** Returns the last run of the given type, for the given application, if one has been run. */ public Optional<Run> last(ApplicationId id, JobType type) { return curator.readLastRun(id, type); } /** Returns the last completed of the given job. */ public Optional<Run> lastCompleted(JobId id) { return JobStatus.lastCompleted(runs(id)); } /** Returns the first failing of the given job. */ public Optional<Run> firstFailing(JobId id) { return JobStatus.firstFailing(runs(id)); } /** Returns the last success of the given job. */ public Optional<Run> lastSuccess(JobId id) { return JobStatus.lastSuccess(runs(id)); } /** Returns the run with the given id, provided it is still active. */ public Optional<Run> active(RunId id) { return last(id.application(), id.type()) .filter(run -> ! run.hasEnded()) .filter(run -> run.id().equals(id)); } /** Returns a list of all active runs. */ public List<Run> active() { return controller.applications().idList().stream() .flatMap(id -> active(id).stream()) .toList(); } /** Returns a list of all active runs for the given application. */ public List<Run> active(TenantAndApplicationId id) { return controller.applications().requireApplication(id).instances().keySet().stream() .flatMap(name -> JobType.allIn(controller.zoneRegistry()).stream() .map(type -> last(id.instance(name), type)) .flatMap(Optional::stream) .filter(run -> ! run.hasEnded())) .toList(); } /** Returns a list of all active runs for the given instance. */ public List<Run> active(ApplicationId id) { return JobType.allIn(controller.zoneRegistry()).stream() .map(type -> last(id, type)) .flatMap(Optional::stream) .filter(run -> !run.hasEnded()) .toList(); } /** Returns the job status of the given job, possibly empty. */ public JobStatus jobStatus(JobId id) { return new JobStatus(id, runs(id)); } /** Returns the deployment status of the given application. */ public DeploymentStatus deploymentStatus(Application application) { VersionStatus versionStatus = controller.readVersionStatus(); return deploymentStatus(application, versionStatus, controller.systemVersion(versionStatus)); } private DeploymentStatus deploymentStatus(Application application, VersionStatus versionStatus, Version systemVersion) { return new DeploymentStatus(application, this::jobStatus, controller.zoneRegistry(), versionStatus, systemVersion, instance -> controller.applications().versionCompatibility(application.id().instance(instance)), controller.clock().instant()); } /** Adds deployment status to each of the given applications. */ public DeploymentStatusList deploymentStatuses(ApplicationList applications, VersionStatus versionStatus) { Version systemVersion = controller.systemVersion(versionStatus); return DeploymentStatusList.from(applications.asList().stream() .map(application -> deploymentStatus(application, versionStatus, systemVersion)) .toList()); } /** Adds deployment status to each of the given applications. Calling this will do an implicit read of the controller's version status */ public DeploymentStatusList deploymentStatuses(ApplicationList applications) { VersionStatus versionStatus = controller.readVersionStatus(); return deploymentStatuses(applications, versionStatus); } /** Changes the status of the given step, for the given run, provided it is still active. */ public void update(RunId id, RunStatus status, LockedStep step) { locked(id, run -> run.with(status, step)); } /** * Changes the status of the given run to inactive, and stores it as a historic run. * Throws TimeoutException if some step in this job is still being run. */ public void finish(RunId id) throws TimeoutException { Deque<Mutex> locks = new ArrayDeque<>(); try { Run unlockedRun = run(id); locks.push(curator.lock(id.application(), id.type(), report)); for (Step step : report.allPrerequisites(unlockedRun.steps().keySet())) locks.push(curator.lock(id.application(), id.type(), step)); locked(id, run -> { if (run.status() == reset) { for (Step step : run.steps().keySet()) log(id, step, INFO, List.of(" return run.reset(); } if (run.status() == running && run.stepStatuses().values().stream().anyMatch(not(succeeded::equals))) return run; Run finishedRun = run.finished(controller.clock().instant()); locked(id.application(), id.type(), runs -> { runs.put(run.id(), finishedRun); long last = id.number(); long successes = runs.values().stream().filter(Run::hasSucceeded).count(); var oldEntries = runs.entrySet().iterator(); for (var old = oldEntries.next(); old.getKey().number() <= last - historyLength || old.getValue().start().isBefore(controller.clock().instant().minus(maxHistoryAge)); old = oldEntries.next()) { if ( successes == 1 && old.getValue().hasSucceeded() && ! old.getValue().start().isBefore(controller.clock().instant().minus(maxHistoryAge))) { oldEntries.next(); continue; } logs.delete(old.getKey()); oldEntries.remove(); } }); logs.flush(id); metric.jobFinished(run.id().job(), finishedRun.status()); pruneRevisions(unlockedRun); return finishedRun; }); } finally { for (Mutex lock : locks) { try { lock.close(); } catch (Throwable t) { log.log(WARNING, "Failed to close the lock " + lock + ": the lock may or may not " + "have been released in ZooKeeper, and if not this controller " + "must be restarted to release the lock", t); } } } } /** Marks the given run as aborted; no further normal steps will run, but run-always steps will try to succeed. */ public void abort(RunId id, String reason) { locked(id, run -> { run.stepStatuses().entrySet().stream() .filter(entry -> entry.getValue() == unfinished) .forEach(entry -> log(id, entry.getKey(), INFO, "Aborting run: " + reason)); return run.aborted(); }); } /** Accepts and stores a new application package and test jar pair under a generated application version key. */ public ApplicationVersion submit(TenantAndApplicationId id, Submission submission, long projectId) { ApplicationController applications = controller.applications(); AtomicReference<ApplicationVersion> version = new AtomicReference<>(); applications.lockApplicationOrThrow(id, application -> { Optional<ApplicationVersion> previousVersion = application.get().revisions().last(); Optional<ApplicationPackage> previousPackage = previousVersion.flatMap(previous -> applications.applicationStore().find(id.tenant(), id.application(), previous.buildNumber().getAsLong())) .map(ApplicationPackage::new); long previousBuild = previousVersion.map(latestVersion -> latestVersion.buildNumber().getAsLong()).orElse(0L); version.set(submission.toApplicationVersion(1 + previousBuild)); byte[] diff = previousPackage.map(previous -> ApplicationPackageDiff.diff(previous, submission.applicationPackage())) .orElseGet(() -> ApplicationPackageDiff.diffAgainstEmpty(submission.applicationPackage())); applications.applicationStore().put(id.tenant(), id.application(), version.get().id(), submission.applicationPackage().zippedContent(), submission.testPackage(), diff); applications.applicationStore().putMeta(id.tenant(), id.application(), controller.clock().instant(), submission.applicationPackage().metaDataZip()); application = application.withProjectId(projectId == -1 ? OptionalLong.empty() : OptionalLong.of(projectId)); application = application.withRevisions(revisions -> revisions.with(version.get())); application = withPrunedPackages(application, version.get().id()); validate(id, submission); applications.storeWithUpdatedConfig(application, submission.applicationPackage()); if (application.get().projectId().isPresent()) applications.deploymentTrigger().triggerNewRevision(id); }); return version.get(); } private void validate(TenantAndApplicationId id, Submission submission) { validateTests(id, submission); validateParentVersion(id, submission); validateMajorVersion(id, submission); } private void validateTests(TenantAndApplicationId id, Submission submission) { TestSummary testSummary = TestPackage.validateTests(submission.applicationPackage().deploymentSpec(), submission.testPackage()); if (testSummary.problems().isEmpty()) controller.notificationsDb().removeNotification(NotificationSource.from(id), Type.testPackage); else controller.notificationsDb().setNotification(NotificationSource.from(id), Type.testPackage, Notification.Level.warning, testSummary.problems()); } private void validateParentVersion(TenantAndApplicationId id, Submission submission) { submission.applicationPackage().parentVersion().ifPresent(parent -> { if (parent.getMajor() < controller.readSystemVersion().getMajor()) controller.notificationsDb().setNotification(NotificationSource.from(id), Type.submission, Notification.Level.warning, "Parent version used to compile the application is on a " + "lower major version than the current Vespa Cloud version"); else controller.notificationsDb().removeNotification(NotificationSource.from(id), Type.submission); }); } private LockedApplication withPrunedPackages(LockedApplication application, RevisionId latest){ TenantAndApplicationId id = application.get().id(); Application wrapped = application.get(); RevisionId oldestDeployed = application.get().oldestDeployedRevision() .or(() -> wrapped.instances().values().stream() .flatMap(instance -> instance.change().revision().stream()) .min(naturalOrder())) .orElse(latest); controller.applications().applicationStore().prune(id.tenant(), id.application(), oldestDeployed); for (ApplicationVersion version : application.get().revisions().withPackage()) if (version.id().compareTo(oldestDeployed) < 0) application = application.withRevisions(revisions -> revisions.with(version.withoutPackage())); return application; } /** Forget revisions no longer present in any relevant job history. */ private void pruneRevisions(Run run) { TenantAndApplicationId applicationId = TenantAndApplicationId.from(run.id().application()); boolean isProduction = run.versions().targetRevision().isProduction(); (isProduction ? deploymentStatus(controller.applications().requireApplication(applicationId)).jobs().asList().stream() : Stream.of(jobStatus(run.id().job()))) .flatMap(jobs -> jobs.runs().values().stream()) .map(r -> r.versions().targetRevision()) .filter(id -> id.isProduction() == isProduction) .min(naturalOrder()) .ifPresent(oldestRevision -> { controller.applications().lockApplicationOrThrow(applicationId, application -> { if (isProduction) { controller.applications().applicationStore().pruneDiffs(run.id().application().tenant(), run.id().application().application(), oldestRevision.number()); controller.applications().store(application.withRevisions(revisions -> revisions.withoutOlderThan(oldestRevision))); } else { controller.applications().applicationStore().pruneDevDiffs(new DeploymentId(run.id().application(), run.id().job().type().zone()), oldestRevision.number()); controller.applications().store(application.withRevisions(revisions -> revisions.withoutOlderThan(oldestRevision, run.id().job()))); } }); }); } /** Orders a run of the given type, or throws an IllegalStateException if that job type is already running. */ public void start(ApplicationId id, JobType type, Versions versions, boolean isRedeployment, Optional<String> reason) { start(id, type, versions, isRedeployment, JobProfile.of(type), reason); } /** Orders a run of the given type, or throws an IllegalStateException if that job type is already running. */ public void start(ApplicationId id, JobType type, Versions versions, boolean isRedeployment, JobProfile profile, Optional<String> reason) { ApplicationVersion revision = controller.applications().requireApplication(TenantAndApplicationId.from(id)).revisions().get(versions.targetRevision()); if (revision.compileVersion() .map(version -> controller.applications().versionCompatibility(id).refuse(versions.targetPlatform(), version)) .orElse(false)) throw new IllegalArgumentException("Will not start " + type + " for " + id + " with incompatible platform version (" + versions.targetPlatform() + ") " + "and compile versions (" + revision.compileVersion().get() + ")"); locked(id, type, __ -> { Optional<Run> last = last(id, type); if (last.flatMap(run -> active(run.id())).isPresent()) throw new IllegalArgumentException("Cannot start " + type + " for " + id + "; it is already running!"); RunId newId = new RunId(id, type, last.map(run -> run.id().number()).orElse(0L) + 1); curator.writeLastRun(Run.initial(newId, versions, isRedeployment, controller.clock().instant(), profile, reason)); metric.jobStarted(newId.job()); }); } /** Stores the given package and starts a deployment of it, after aborting any such ongoing deployment. */ public void deploy(ApplicationId id, JobType type, Optional<Version> platform, ApplicationPackage applicationPackage) { deploy(id, type, platform, applicationPackage, false); } /** Stores the given package and starts a deployment of it, after aborting any such ongoing deployment.*/ public void deploy(ApplicationId id, JobType type, Optional<Version> platform, ApplicationPackage applicationPackage, boolean dryRun) { if ( ! controller.zoneRegistry().hasZone(type.zone())) throw new IllegalArgumentException(type.zone() + " is not present in this system"); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { if ( ! application.get().instances().containsKey(id.instance())) application = controller.applications().withNewInstance(application, id); controller.applications().store(application); }); DeploymentId deploymentId = new DeploymentId(id, type.zone()); Optional<Run> lastRun = last(id, type); lastRun.filter(run -> ! run.hasEnded()).ifPresent(run -> abortAndWait(run.id(), Duration.ofMinutes(2))); long build = 1 + lastRun.map(run -> run.versions().targetRevision().number()).orElse(0L); RevisionId revisionId = RevisionId.forDevelopment(build, new JobId(id, type)); ApplicationVersion version = ApplicationVersion.forDevelopment(revisionId, applicationPackage.compileVersion()); byte[] diff = getDiff(applicationPackage, deploymentId, lastRun); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { controller.applications().applicationStore().putDev(deploymentId, version.id(), applicationPackage.zippedContent(), diff); Version targetPlatform = platform.orElseGet(() -> findTargetPlatform(applicationPackage, deploymentId, application.get().get(id.instance()))); controller.applications().store(application.withRevisions(revisions -> revisions.with(version))); start(id, type, new Versions(targetPlatform, version.id(), lastRun.map(run -> run.versions().targetPlatform()), lastRun.map(run -> run.versions().targetRevision())), false, dryRun ? JobProfile.developmentDryRun : JobProfile.development, Optional.empty()); }); locked(id, type, __ -> { runner.get().accept(last(id, type).get()); }); } /* Application package diff against previous version, or against empty version if previous does not exist or is invalid */ private byte[] getDiff(ApplicationPackage applicationPackage, DeploymentId deploymentId, Optional<Run> lastRun) { return lastRun.map(run -> run.versions().targetRevision()) .map(prevVersion -> { ApplicationPackage previous; try { previous = new ApplicationPackage(controller.applications().applicationStore().get(deploymentId, prevVersion)); } catch (IllegalArgumentException e) { return ApplicationPackageDiff.diffAgainstEmpty(applicationPackage); } return ApplicationPackageDiff.diff(previous, applicationPackage); }) .orElseGet(() -> ApplicationPackageDiff.diffAgainstEmpty(applicationPackage)); } private Version findTargetPlatform(ApplicationPackage applicationPackage, DeploymentId id, Optional<Instance> instance) { List<Version> versions = controller.readVersionStatus().deployableVersions().stream() .map(VespaVersion::versionNumber) .collect(toList()); instance.map(Instance::deployments) .map(deployments -> deployments.get(id.zoneId())) .map(Deployment::version) .ifPresent(versions::add); if (versions.isEmpty()) throw new IllegalStateException("no deployable platform version found in the system"); VersionCompatibility compatibility = controller.applications().versionCompatibility(id.applicationId()); List<Version> compatibleVersions = new ArrayList<>(); for (Version target : reversed(versions)) if (applicationPackage.compileVersion().isEmpty() || compatibility.accept(target, applicationPackage.compileVersion().get())) compatibleVersions.add(target); if (compatibleVersions.isEmpty()) throw new IllegalArgumentException("no platforms are compatible with compile version " + applicationPackage.compileVersion().get()); Optional<Integer> major = applicationPackage.deploymentSpec().majorVersion(); List<Version> versionOnRightMajor = new ArrayList<>(); for (Version target : reversed(versions)) if (major.isEmpty() || major.get() == target.getMajor()) versionOnRightMajor.add(target); if (versionOnRightMajor.isEmpty()) throw new IllegalArgumentException("no platforms were found for major version " + major.get() + " specified in deployment.xml"); for (Version target : compatibleVersions) if (versionOnRightMajor.contains(target)) return target; throw new IllegalArgumentException("no platforms on major version " + major.get() + " specified in deployment.xml " + "are compatible with compile version " + applicationPackage.compileVersion().get()); } /** Aborts a run and waits for it complete. */ private void abortAndWait(RunId id, Duration timeout) { abort(id, "replaced by new deployment"); runner.get().accept(last(id.application(), id.type()).get()); Instant doom = controller.clock().instant().plus(timeout); Duration sleep = Duration.ofMillis(100); while ( ! last(id.application(), id.type()).get().hasEnded()) { if (controller.clock().instant().plus(sleep).isAfter(doom)) throw new UncheckedTimeoutException("timeout waiting for " + id + " to abort and finish"); try { Thread.sleep(sleep.toMillis()); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new RuntimeException(e); } } } /** Deletes run data and tester deployments for applications which are unknown, or no longer built internally. */ public void collectGarbage() { Set<ApplicationId> applicationsToBuild = new HashSet<>(instances()); curator.applicationsWithJobs().stream() .filter(id -> ! applicationsToBuild.contains(id)) .forEach(id -> { try { TesterId tester = TesterId.of(id); for (JobType type : jobs(id)) locked(id, type, deactivateTester, __ -> { try (Mutex ___ = curator.lock(id, type)) { try { deactivateTester(tester, type); } catch (Exception e) { } curator.deleteRunData(id, type); } }); logs.delete(id); curator.deleteRunData(id); } catch (Exception e) { log.log(WARNING, "failed cleaning up after deleted application", e); } }); } public void deactivateTester(TesterId id, JobType type) { controller.serviceRegistry().configServer().deactivate(new DeploymentId(id.id(), type.zone())); } /** Locks all runs and modifies the list of historic runs for the given application and job type. */ private void locked(ApplicationId id, JobType type, Consumer<SortedMap<RunId, Run>> modifications) { try (Mutex __ = curator.lock(id, type)) { SortedMap<RunId, Run> runs = new TreeMap<>(curator.readHistoricRuns(id, type)); modifications.accept(runs); curator.writeHistoricRuns(id, type, runs.values()); } } /** Locks and modifies the run with the given id, provided it is still active. */ public void locked(RunId id, UnaryOperator<Run> modifications) { try (Mutex __ = curator.lock(id.application(), id.type())) { active(id).ifPresent(run -> { Run modified = modifications.apply(run); if (modified != null) curator.writeLastRun(modified); }); } } /** Locks the given step and checks none of its prerequisites are running, then performs the given actions. */ public void locked(ApplicationId id, JobType type, Step step, Consumer<LockedStep> action) throws TimeoutException { try (Mutex lock = curator.lock(id, type, step)) { for (Step prerequisite : step.allPrerequisites(last(id, type).get().steps().keySet())) try (Mutex __ = curator.lock(id, type, prerequisite)) { ; } action.accept(new LockedStep(lock, step)); } } }
Clearing those owned by the controller on submission: https://github.com/vespa-engine/vespa/pull/23975
private void validateMajorVersion(TenantAndApplicationId id, Submission submission) { submission.applicationPackage().deploymentSpec().majorVersion().ifPresent(explicitMajor -> { if (explicitMajor < 8) controller.notificationsDb().setNotification(NotificationSource.from(id), Type.applicationPackage, Notification.Level.warning, "Vespa 7 will soon be end of life, upgrade to Vespa 8 now:" + "https: else controller.notificationsDb().removeNotification(NotificationSource.from(id), Type.applicationPackage); }); }
controller.notificationsDb().setNotification(NotificationSource.from(id),
private void validateMajorVersion(TenantAndApplicationId id, Submission submission) { submission.applicationPackage().deploymentSpec().majorVersion().ifPresent(explicitMajor -> { if (explicitMajor < 8) controller.notificationsDb().setNotification(NotificationSource.from(id), Type.applicationPackage, Notification.Level.warning, "Vespa 7 will soon be end of life, upgrade to Vespa 8 now:" + "https: else controller.notificationsDb().removeNotification(NotificationSource.from(id), Type.applicationPackage); }); }
class JobController { public static final Duration maxHistoryAge = Duration.ofDays(60); private static final Logger log = Logger.getLogger(JobController.class.getName()); private final int historyLength; private final Controller controller; private final CuratorDb curator; private final BufferedLogStore logs; private final TesterCloud cloud; private final JobMetrics metric; private final AtomicReference<Consumer<Run>> runner = new AtomicReference<>(__ -> { }); public JobController(Controller controller) { this.historyLength = controller.system().isCd() ? 256 : 64; this.controller = controller; this.curator = controller.curator(); this.logs = new BufferedLogStore(curator, controller.serviceRegistry().runDataStore()); this.cloud = controller.serviceRegistry().testerCloud(); this.metric = new JobMetrics(controller.metric(), controller::system); } public TesterCloud cloud() { return cloud; } public int historyLength() { return historyLength; } public void setRunner(Consumer<Run> runner) { this.runner.set(runner); } /** Rewrite all job data with the newest format. */ public void updateStorage() { for (ApplicationId id : instances()) for (JobType type : jobs(id)) { locked(id, type, runs -> { curator.readLastRun(id, type).ifPresent(curator::writeLastRun); }); } } /** Returns all entries currently logged for the given run. */ public Optional<RunLog> details(RunId id) { return details(id, -1); } /** Returns the logged entries for the given run, which are after the given id threshold. */ public Optional<RunLog> details(RunId id, long after) { try (Mutex __ = curator.lock(id.application(), id.type())) { Run run = runs(id.application(), id.type()).get(id); if (run == null) return Optional.empty(); return active(id).isPresent() ? Optional.of(logs.readActive(id.application(), id.type(), after)) : logs.readFinished(id, after); } } /** Stores the given log entries for the given run and step. */ public void log(RunId id, Step step, List<LogEntry> entries) { locked(id, __ -> { logs.append(id.application(), id.type(), step, entries, true); return __; }); } /** Stores the given log messages for the given run and step. */ public void log(RunId id, Step step, Level level, List<String> messages) { log(id, step, messages.stream() .map(message -> new LogEntry(0, controller.clock().instant(), LogEntry.typeOf(level), message)) .collect(toList())); } /** Stores the given log message for the given run and step. */ public void log(RunId id, Step step, Level level, String message) { log(id, step, level, Collections.singletonList(message)); } /** Fetches any new Vespa log entries, and records the timestamp of the last of these, for continuation. */ public void updateVespaLog(RunId id) { locked(id, run -> { if ( ! run.hasStep(copyVespaLogs)) return run; storeVespaLogs(id); ZoneId zone = id.type().zone(); Optional<Deployment> deployment = Optional.ofNullable(controller.applications().requireInstance(id.application()) .deployments().get(zone)); if (deployment.isEmpty() || deployment.get().at().isBefore(run.start())) return run; List<LogEntry> log; Instant deployedAt; Instant from; if ( ! run.id().type().isProduction()) { deployedAt = run.stepInfo(installInitialReal).or(() -> run.stepInfo(installReal)).flatMap(StepInfo::startTime).orElseThrow(); from = run.lastVespaLogTimestamp().isAfter(run.start()) ? run.lastVespaLogTimestamp() : deployedAt.minusSeconds(10); log = LogEntry.parseVespaLog(controller.serviceRegistry().configServer() .getLogs(new DeploymentId(id.application(), zone), Map.of("from", Long.toString(from.toEpochMilli()))), from); } else log = List.of(); if (id.type().isTest()) { deployedAt = run.stepInfo(installTester).flatMap(StepInfo::startTime).orElseThrow(); from = run.lastVespaLogTimestamp().isAfter(run.start()) ? run.lastVespaLogTimestamp() : deployedAt.minusSeconds(10); List<LogEntry> testerLog = LogEntry.parseVespaLog(controller.serviceRegistry().configServer() .getLogs(new DeploymentId(id.tester().id(), zone), Map.of("from", Long.toString(from.toEpochMilli()))), from); Instant justNow = controller.clock().instant().minusSeconds(2); log = Stream.concat(log.stream(), testerLog.stream()) .filter(entry -> entry.at().isBefore(justNow)) .sorted(comparing(LogEntry::at)) .collect(toUnmodifiableList()); } if (log.isEmpty()) return run; logs.append(id.application(), id.type(), Step.copyVespaLogs, log, false); return run.with(log.get(log.size() - 1).at()); }); } public InputStream getVespaLogs(RunId id, long fromMillis, boolean tester) { Run run = run(id); return run.stepStatus(copyVespaLogs).map(succeeded::equals).orElse(false) ? controller.serviceRegistry().runDataStore().getLogs(id, tester) : getVespaLogsFromLogserver(run, fromMillis, tester).orElse(InputStream.nullInputStream()); } public static Optional<Instant> deploymentCompletedAt(Run run, boolean tester) { return (tester ? run.stepInfo(installTester) : run.stepInfo(installInitialReal).or(() -> run.stepInfo(installReal))) .flatMap(StepInfo::startTime).map(start -> start.minusSeconds(10)); } public void storeVespaLogs(RunId id) { Run run = run(id); if ( ! id.type().isProduction()) { getVespaLogsFromLogserver(run, 0, false).ifPresent(logs -> { try (logs) { controller.serviceRegistry().runDataStore().putLogs(id, false, logs); } catch (IOException e) { throw new UncheckedIOException(e); } }); } if (id.type().isTest()) { getVespaLogsFromLogserver(run, 0, true).ifPresent(logs -> { try (logs) { controller.serviceRegistry().runDataStore().putLogs(id, true, logs); } catch(IOException e){ throw new UncheckedIOException(e); } }); } } private Optional<InputStream> getVespaLogsFromLogserver(Run run, long fromMillis, boolean tester) { return deploymentCompletedAt(run, tester).map(at -> controller.serviceRegistry().configServer().getLogs(new DeploymentId(tester ? run.id().tester().id() : run.id().application(), run.id().type().zone()), Map.of("from", Long.toString(Math.max(fromMillis, at.toEpochMilli())), "to", Long.toString(run.end().orElse(controller.clock().instant()).toEpochMilli())))); } /** Fetches any new test log entries, and records the id of the last of these, for continuation. */ public void updateTestLog(RunId id) { locked(id, run -> { Optional<Step> step = Stream.of(endStagingSetup, endTests) .filter(run.readySteps()::contains) .findAny(); if (step.isEmpty()) return run; List<LogEntry> entries = cloud.getLog(new DeploymentId(id.tester().id(), id.type().zone()), run.lastTestLogEntry()); if (entries.isEmpty()) return run; logs.append(id.application(), id.type(), step.get(), entries, false); return run.with(entries.stream().mapToLong(LogEntry::id).max().getAsLong()); }); } public void updateTestReport(RunId id) { locked(id, run -> { Optional<TestReport> report = cloud.getTestReport(new DeploymentId(id.tester().id(), id.type().zone())); if (report.isEmpty()) { return run; } logs.writeTestReport(id, report.get()); return run; }); } public Optional<String> getTestReports(RunId id) { return logs.readTestReports(id); } /** Stores the given certificate as the tester certificate for this run, or throws if it's already set. */ public void storeTesterCertificate(RunId id, X509Certificate testerCertificate) { locked(id, run -> run.with(testerCertificate)); } /** Returns a list of all instances of applications which have registered. */ public List<ApplicationId> instances() { return controller.applications().readable().stream() .flatMap(application -> application.instances().values().stream()) .map(Instance::id) .collect(toUnmodifiableList()); } /** Returns all job types which have been run for the given application. */ private List<JobType> jobs(ApplicationId id) { return JobType.allIn(controller.zoneRegistry()).stream() .filter(type -> last(id, type).isPresent()) .collect(toUnmodifiableList()); } /** Returns an immutable map of all known runs for the given application and job type. */ public NavigableMap<RunId, Run> runs(JobId id) { return runs(id.application(), id.type()); } /** Lists the start time of non-redeployment runs of the given job, in order of increasing age. */ public List<Instant> jobStarts(JobId id) { return runs(id).descendingMap().values().stream() .filter(run -> ! run.isRedeployment()) .map(Run::start) .collect(toUnmodifiableList()); } /** Returns when given deployment last started deploying, falling back to time of deployment if it cannot be determined from job runs */ public Instant lastDeploymentStart(ApplicationId instanceId, Deployment deployment) { return jobStarts(new JobId(instanceId, JobType.deploymentTo(deployment.zone()))).stream() .findFirst() .orElseGet(deployment::at); } /** Returns an immutable map of all known runs for the given application and job type. */ public NavigableMap<RunId, Run> runs(ApplicationId id, JobType type) { ImmutableSortedMap.Builder<RunId, Run> runs = ImmutableSortedMap.orderedBy(Comparator.comparing(RunId::number)); Optional<Run> last = last(id, type); curator.readHistoricRuns(id, type).forEach((runId, run) -> { if (last.isEmpty() || ! runId.equals(last.get().id())) runs.put(runId, run); }); last.ifPresent(run -> runs.put(run.id(), run)); return runs.build(); } /** Returns the run with the given id, or throws if no such run exists. */ public Run run(RunId id) { return runs(id.application(), id.type()).values().stream() .filter(run -> run.id().equals(id)) .findAny() .orElseThrow(() -> new NoSuchElementException("no run with id '" + id + "' exists")); } /** Returns the last run of the given type, for the given application, if one has been run. */ public Optional<Run> last(JobId job) { return curator.readLastRun(job.application(), job.type()); } /** Returns the last run of the given type, for the given application, if one has been run. */ public Optional<Run> last(ApplicationId id, JobType type) { return curator.readLastRun(id, type); } /** Returns the last completed of the given job. */ public Optional<Run> lastCompleted(JobId id) { return JobStatus.lastCompleted(runs(id)); } /** Returns the first failing of the given job. */ public Optional<Run> firstFailing(JobId id) { return JobStatus.firstFailing(runs(id)); } /** Returns the last success of the given job. */ public Optional<Run> lastSuccess(JobId id) { return JobStatus.lastSuccess(runs(id)); } /** Returns the run with the given id, provided it is still active. */ public Optional<Run> active(RunId id) { return last(id.application(), id.type()) .filter(run -> ! run.hasEnded()) .filter(run -> run.id().equals(id)); } /** Returns a list of all active runs. */ public List<Run> active() { return controller.applications().idList().stream() .flatMap(id -> active(id).stream()) .toList(); } /** Returns a list of all active runs for the given application. */ public List<Run> active(TenantAndApplicationId id) { return controller.applications().requireApplication(id).instances().keySet().stream() .flatMap(name -> JobType.allIn(controller.zoneRegistry()).stream() .map(type -> last(id.instance(name), type)) .flatMap(Optional::stream) .filter(run -> ! run.hasEnded())) .toList(); } /** Returns a list of all active runs for the given instance. */ public List<Run> active(ApplicationId id) { return JobType.allIn(controller.zoneRegistry()).stream() .map(type -> last(id, type)) .flatMap(Optional::stream) .filter(run -> !run.hasEnded()) .toList(); } /** Returns the job status of the given job, possibly empty. */ public JobStatus jobStatus(JobId id) { return new JobStatus(id, runs(id)); } /** Returns the deployment status of the given application. */ public DeploymentStatus deploymentStatus(Application application) { VersionStatus versionStatus = controller.readVersionStatus(); return deploymentStatus(application, versionStatus, controller.systemVersion(versionStatus)); } private DeploymentStatus deploymentStatus(Application application, VersionStatus versionStatus, Version systemVersion) { return new DeploymentStatus(application, this::jobStatus, controller.zoneRegistry(), versionStatus, systemVersion, instance -> controller.applications().versionCompatibility(application.id().instance(instance)), controller.clock().instant()); } /** Adds deployment status to each of the given applications. */ public DeploymentStatusList deploymentStatuses(ApplicationList applications, VersionStatus versionStatus) { Version systemVersion = controller.systemVersion(versionStatus); return DeploymentStatusList.from(applications.asList().stream() .map(application -> deploymentStatus(application, versionStatus, systemVersion)) .toList()); } /** Adds deployment status to each of the given applications. Calling this will do an implicit read of the controller's version status */ public DeploymentStatusList deploymentStatuses(ApplicationList applications) { VersionStatus versionStatus = controller.readVersionStatus(); return deploymentStatuses(applications, versionStatus); } /** Changes the status of the given step, for the given run, provided it is still active. */ public void update(RunId id, RunStatus status, LockedStep step) { locked(id, run -> run.with(status, step)); } /** * Changes the status of the given run to inactive, and stores it as a historic run. * Throws TimeoutException if some step in this job is still being run. */ public void finish(RunId id) throws TimeoutException { Deque<Mutex> locks = new ArrayDeque<>(); try { Run unlockedRun = run(id); locks.push(curator.lock(id.application(), id.type(), report)); for (Step step : report.allPrerequisites(unlockedRun.steps().keySet())) locks.push(curator.lock(id.application(), id.type(), step)); locked(id, run -> { if (run.status() == reset) { for (Step step : run.steps().keySet()) log(id, step, INFO, List.of(" return run.reset(); } if (run.status() == running && run.stepStatuses().values().stream().anyMatch(not(succeeded::equals))) return run; Run finishedRun = run.finished(controller.clock().instant()); locked(id.application(), id.type(), runs -> { runs.put(run.id(), finishedRun); long last = id.number(); long successes = runs.values().stream().filter(Run::hasSucceeded).count(); var oldEntries = runs.entrySet().iterator(); for (var old = oldEntries.next(); old.getKey().number() <= last - historyLength || old.getValue().start().isBefore(controller.clock().instant().minus(maxHistoryAge)); old = oldEntries.next()) { if ( successes == 1 && old.getValue().hasSucceeded() && ! old.getValue().start().isBefore(controller.clock().instant().minus(maxHistoryAge))) { oldEntries.next(); continue; } logs.delete(old.getKey()); oldEntries.remove(); } }); logs.flush(id); metric.jobFinished(run.id().job(), finishedRun.status()); pruneRevisions(unlockedRun); return finishedRun; }); } finally { for (Mutex lock : locks) { try { lock.close(); } catch (Throwable t) { log.log(WARNING, "Failed to close the lock " + lock + ": the lock may or may not " + "have been released in ZooKeeper, and if not this controller " + "must be restarted to release the lock", t); } } } } /** Marks the given run as aborted; no further normal steps will run, but run-always steps will try to succeed. */ public void abort(RunId id, String reason) { locked(id, run -> { run.stepStatuses().entrySet().stream() .filter(entry -> entry.getValue() == unfinished) .forEach(entry -> log(id, entry.getKey(), INFO, "Aborting run: " + reason)); return run.aborted(); }); } /** Accepts and stores a new application package and test jar pair under a generated application version key. */ public ApplicationVersion submit(TenantAndApplicationId id, Submission submission, long projectId) { ApplicationController applications = controller.applications(); AtomicReference<ApplicationVersion> version = new AtomicReference<>(); applications.lockApplicationOrThrow(id, application -> { Optional<ApplicationVersion> previousVersion = application.get().revisions().last(); Optional<ApplicationPackage> previousPackage = previousVersion.flatMap(previous -> applications.applicationStore().find(id.tenant(), id.application(), previous.buildNumber().getAsLong())) .map(ApplicationPackage::new); long previousBuild = previousVersion.map(latestVersion -> latestVersion.buildNumber().getAsLong()).orElse(0L); version.set(submission.toApplicationVersion(1 + previousBuild)); byte[] diff = previousPackage.map(previous -> ApplicationPackageDiff.diff(previous, submission.applicationPackage())) .orElseGet(() -> ApplicationPackageDiff.diffAgainstEmpty(submission.applicationPackage())); applications.applicationStore().put(id.tenant(), id.application(), version.get().id(), submission.applicationPackage().zippedContent(), submission.testPackage(), diff); applications.applicationStore().putMeta(id.tenant(), id.application(), controller.clock().instant(), submission.applicationPackage().metaDataZip()); application = application.withProjectId(projectId == -1 ? OptionalLong.empty() : OptionalLong.of(projectId)); application = application.withRevisions(revisions -> revisions.with(version.get())); application = withPrunedPackages(application, version.get().id()); validate(id, submission); applications.storeWithUpdatedConfig(application, submission.applicationPackage()); if (application.get().projectId().isPresent()) applications.deploymentTrigger().triggerNewRevision(id); }); return version.get(); } private void validate(TenantAndApplicationId id, Submission submission) { validateTests(id, submission); validateParentVersion(id, submission); validateMajorVersion(id, submission); } private void validateTests(TenantAndApplicationId id, Submission submission) { TestSummary testSummary = TestPackage.validateTests(submission.applicationPackage().deploymentSpec(), submission.testPackage()); if (testSummary.problems().isEmpty()) controller.notificationsDb().removeNotification(NotificationSource.from(id), Type.testPackage); else controller.notificationsDb().setNotification(NotificationSource.from(id), Type.testPackage, Notification.Level.warning, testSummary.problems()); } private void validateParentVersion(TenantAndApplicationId id, Submission submission) { submission.applicationPackage().parentVersion().ifPresent(parent -> { if (parent.getMajor() < controller.readSystemVersion().getMajor()) controller.notificationsDb().setNotification(NotificationSource.from(id), Type.submission, Notification.Level.warning, "Parent version used to compile the application is on a " + "lower major version than the current Vespa Cloud version"); else controller.notificationsDb().removeNotification(NotificationSource.from(id), Type.submission); }); } private LockedApplication withPrunedPackages(LockedApplication application, RevisionId latest){ TenantAndApplicationId id = application.get().id(); Application wrapped = application.get(); RevisionId oldestDeployed = application.get().oldestDeployedRevision() .or(() -> wrapped.instances().values().stream() .flatMap(instance -> instance.change().revision().stream()) .min(naturalOrder())) .orElse(latest); controller.applications().applicationStore().prune(id.tenant(), id.application(), oldestDeployed); for (ApplicationVersion version : application.get().revisions().withPackage()) if (version.id().compareTo(oldestDeployed) < 0) application = application.withRevisions(revisions -> revisions.with(version.withoutPackage())); return application; } /** Forget revisions no longer present in any relevant job history. */ private void pruneRevisions(Run run) { TenantAndApplicationId applicationId = TenantAndApplicationId.from(run.id().application()); boolean isProduction = run.versions().targetRevision().isProduction(); (isProduction ? deploymentStatus(controller.applications().requireApplication(applicationId)).jobs().asList().stream() : Stream.of(jobStatus(run.id().job()))) .flatMap(jobs -> jobs.runs().values().stream()) .map(r -> r.versions().targetRevision()) .filter(id -> id.isProduction() == isProduction) .min(naturalOrder()) .ifPresent(oldestRevision -> { controller.applications().lockApplicationOrThrow(applicationId, application -> { if (isProduction) { controller.applications().applicationStore().pruneDiffs(run.id().application().tenant(), run.id().application().application(), oldestRevision.number()); controller.applications().store(application.withRevisions(revisions -> revisions.withoutOlderThan(oldestRevision))); } else { controller.applications().applicationStore().pruneDevDiffs(new DeploymentId(run.id().application(), run.id().job().type().zone()), oldestRevision.number()); controller.applications().store(application.withRevisions(revisions -> revisions.withoutOlderThan(oldestRevision, run.id().job()))); } }); }); } /** Orders a run of the given type, or throws an IllegalStateException if that job type is already running. */ public void start(ApplicationId id, JobType type, Versions versions, boolean isRedeployment, Optional<String> reason) { start(id, type, versions, isRedeployment, JobProfile.of(type), reason); } /** Orders a run of the given type, or throws an IllegalStateException if that job type is already running. */ public void start(ApplicationId id, JobType type, Versions versions, boolean isRedeployment, JobProfile profile, Optional<String> reason) { ApplicationVersion revision = controller.applications().requireApplication(TenantAndApplicationId.from(id)).revisions().get(versions.targetRevision()); if (revision.compileVersion() .map(version -> controller.applications().versionCompatibility(id).refuse(versions.targetPlatform(), version)) .orElse(false)) throw new IllegalArgumentException("Will not start " + type + " for " + id + " with incompatible platform version (" + versions.targetPlatform() + ") " + "and compile versions (" + revision.compileVersion().get() + ")"); locked(id, type, __ -> { Optional<Run> last = last(id, type); if (last.flatMap(run -> active(run.id())).isPresent()) throw new IllegalArgumentException("Cannot start " + type + " for " + id + "; it is already running!"); RunId newId = new RunId(id, type, last.map(run -> run.id().number()).orElse(0L) + 1); curator.writeLastRun(Run.initial(newId, versions, isRedeployment, controller.clock().instant(), profile, reason)); metric.jobStarted(newId.job()); }); } /** Stores the given package and starts a deployment of it, after aborting any such ongoing deployment. */ public void deploy(ApplicationId id, JobType type, Optional<Version> platform, ApplicationPackage applicationPackage) { deploy(id, type, platform, applicationPackage, false); } /** Stores the given package and starts a deployment of it, after aborting any such ongoing deployment.*/ public void deploy(ApplicationId id, JobType type, Optional<Version> platform, ApplicationPackage applicationPackage, boolean dryRun) { if ( ! controller.zoneRegistry().hasZone(type.zone())) throw new IllegalArgumentException(type.zone() + " is not present in this system"); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { if ( ! application.get().instances().containsKey(id.instance())) application = controller.applications().withNewInstance(application, id); controller.applications().store(application); }); DeploymentId deploymentId = new DeploymentId(id, type.zone()); Optional<Run> lastRun = last(id, type); lastRun.filter(run -> ! run.hasEnded()).ifPresent(run -> abortAndWait(run.id(), Duration.ofMinutes(2))); long build = 1 + lastRun.map(run -> run.versions().targetRevision().number()).orElse(0L); RevisionId revisionId = RevisionId.forDevelopment(build, new JobId(id, type)); ApplicationVersion version = ApplicationVersion.forDevelopment(revisionId, applicationPackage.compileVersion()); byte[] diff = getDiff(applicationPackage, deploymentId, lastRun); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { controller.applications().applicationStore().putDev(deploymentId, version.id(), applicationPackage.zippedContent(), diff); Version targetPlatform = platform.orElseGet(() -> findTargetPlatform(applicationPackage, deploymentId, application.get().get(id.instance()))); controller.applications().store(application.withRevisions(revisions -> revisions.with(version))); start(id, type, new Versions(targetPlatform, version.id(), lastRun.map(run -> run.versions().targetPlatform()), lastRun.map(run -> run.versions().targetRevision())), false, dryRun ? JobProfile.developmentDryRun : JobProfile.development, Optional.empty()); }); locked(id, type, __ -> { runner.get().accept(last(id, type).get()); }); } /* Application package diff against previous version, or against empty version if previous does not exist or is invalid */ private byte[] getDiff(ApplicationPackage applicationPackage, DeploymentId deploymentId, Optional<Run> lastRun) { return lastRun.map(run -> run.versions().targetRevision()) .map(prevVersion -> { ApplicationPackage previous; try { previous = new ApplicationPackage(controller.applications().applicationStore().get(deploymentId, prevVersion)); } catch (IllegalArgumentException e) { return ApplicationPackageDiff.diffAgainstEmpty(applicationPackage); } return ApplicationPackageDiff.diff(previous, applicationPackage); }) .orElseGet(() -> ApplicationPackageDiff.diffAgainstEmpty(applicationPackage)); } private Version findTargetPlatform(ApplicationPackage applicationPackage, DeploymentId id, Optional<Instance> instance) { List<Version> versions = controller.readVersionStatus().deployableVersions().stream() .map(VespaVersion::versionNumber) .collect(toList()); instance.map(Instance::deployments) .map(deployments -> deployments.get(id.zoneId())) .map(Deployment::version) .ifPresent(versions::add); if (versions.isEmpty()) throw new IllegalStateException("no deployable platform version found in the system"); VersionCompatibility compatibility = controller.applications().versionCompatibility(id.applicationId()); List<Version> compatibleVersions = new ArrayList<>(); for (Version target : reversed(versions)) if (applicationPackage.compileVersion().isEmpty() || compatibility.accept(target, applicationPackage.compileVersion().get())) compatibleVersions.add(target); if (compatibleVersions.isEmpty()) throw new IllegalArgumentException("no platforms are compatible with compile version " + applicationPackage.compileVersion().get()); Optional<Integer> major = applicationPackage.deploymentSpec().majorVersion(); List<Version> versionOnRightMajor = new ArrayList<>(); for (Version target : reversed(versions)) if (major.isEmpty() || major.get() == target.getMajor()) versionOnRightMajor.add(target); if (versionOnRightMajor.isEmpty()) throw new IllegalArgumentException("no platforms were found for major version " + major.get() + " specified in deployment.xml"); for (Version target : compatibleVersions) if (versionOnRightMajor.contains(target)) return target; throw new IllegalArgumentException("no platforms on major version " + major.get() + " specified in deployment.xml " + "are compatible with compile version " + applicationPackage.compileVersion().get()); } /** Aborts a run and waits for it complete. */ private void abortAndWait(RunId id, Duration timeout) { abort(id, "replaced by new deployment"); runner.get().accept(last(id.application(), id.type()).get()); Instant doom = controller.clock().instant().plus(timeout); Duration sleep = Duration.ofMillis(100); while ( ! last(id.application(), id.type()).get().hasEnded()) { if (controller.clock().instant().plus(sleep).isAfter(doom)) throw new UncheckedTimeoutException("timeout waiting for " + id + " to abort and finish"); try { Thread.sleep(sleep.toMillis()); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new RuntimeException(e); } } } /** Deletes run data and tester deployments for applications which are unknown, or no longer built internally. */ public void collectGarbage() { Set<ApplicationId> applicationsToBuild = new HashSet<>(instances()); curator.applicationsWithJobs().stream() .filter(id -> ! applicationsToBuild.contains(id)) .forEach(id -> { try { TesterId tester = TesterId.of(id); for (JobType type : jobs(id)) locked(id, type, deactivateTester, __ -> { try (Mutex ___ = curator.lock(id, type)) { try { deactivateTester(tester, type); } catch (Exception e) { } curator.deleteRunData(id, type); } }); logs.delete(id); curator.deleteRunData(id); } catch (Exception e) { log.log(WARNING, "failed cleaning up after deleted application", e); } }); } public void deactivateTester(TesterId id, JobType type) { controller.serviceRegistry().configServer().deactivate(new DeploymentId(id.id(), type.zone())); } /** Locks all runs and modifies the list of historic runs for the given application and job type. */ private void locked(ApplicationId id, JobType type, Consumer<SortedMap<RunId, Run>> modifications) { try (Mutex __ = curator.lock(id, type)) { SortedMap<RunId, Run> runs = new TreeMap<>(curator.readHistoricRuns(id, type)); modifications.accept(runs); curator.writeHistoricRuns(id, type, runs.values()); } } /** Locks and modifies the run with the given id, provided it is still active. */ public void locked(RunId id, UnaryOperator<Run> modifications) { try (Mutex __ = curator.lock(id.application(), id.type())) { active(id).ifPresent(run -> { Run modified = modifications.apply(run); if (modified != null) curator.writeLastRun(modified); }); } } /** Locks the given step and checks none of its prerequisites are running, then performs the given actions. */ public void locked(ApplicationId id, JobType type, Step step, Consumer<LockedStep> action) throws TimeoutException { try (Mutex lock = curator.lock(id, type, step)) { for (Step prerequisite : step.allPrerequisites(last(id, type).get().steps().keySet())) try (Mutex __ = curator.lock(id, type, prerequisite)) { ; } action.accept(new LockedStep(lock, step)); } } }
class JobController { public static final Duration maxHistoryAge = Duration.ofDays(60); private static final Logger log = Logger.getLogger(JobController.class.getName()); private final int historyLength; private final Controller controller; private final CuratorDb curator; private final BufferedLogStore logs; private final TesterCloud cloud; private final JobMetrics metric; private final AtomicReference<Consumer<Run>> runner = new AtomicReference<>(__ -> { }); public JobController(Controller controller) { this.historyLength = controller.system().isCd() ? 256 : 64; this.controller = controller; this.curator = controller.curator(); this.logs = new BufferedLogStore(curator, controller.serviceRegistry().runDataStore()); this.cloud = controller.serviceRegistry().testerCloud(); this.metric = new JobMetrics(controller.metric(), controller::system); } public TesterCloud cloud() { return cloud; } public int historyLength() { return historyLength; } public void setRunner(Consumer<Run> runner) { this.runner.set(runner); } /** Rewrite all job data with the newest format. */ public void updateStorage() { for (ApplicationId id : instances()) for (JobType type : jobs(id)) { locked(id, type, runs -> { curator.readLastRun(id, type).ifPresent(curator::writeLastRun); }); } } /** Returns all entries currently logged for the given run. */ public Optional<RunLog> details(RunId id) { return details(id, -1); } /** Returns the logged entries for the given run, which are after the given id threshold. */ public Optional<RunLog> details(RunId id, long after) { try (Mutex __ = curator.lock(id.application(), id.type())) { Run run = runs(id.application(), id.type()).get(id); if (run == null) return Optional.empty(); return active(id).isPresent() ? Optional.of(logs.readActive(id.application(), id.type(), after)) : logs.readFinished(id, after); } } /** Stores the given log entries for the given run and step. */ public void log(RunId id, Step step, List<LogEntry> entries) { locked(id, __ -> { logs.append(id.application(), id.type(), step, entries, true); return __; }); } /** Stores the given log messages for the given run and step. */ public void log(RunId id, Step step, Level level, List<String> messages) { log(id, step, messages.stream() .map(message -> new LogEntry(0, controller.clock().instant(), LogEntry.typeOf(level), message)) .collect(toList())); } /** Stores the given log message for the given run and step. */ public void log(RunId id, Step step, Level level, String message) { log(id, step, level, Collections.singletonList(message)); } /** Fetches any new Vespa log entries, and records the timestamp of the last of these, for continuation. */ public void updateVespaLog(RunId id) { locked(id, run -> { if ( ! run.hasStep(copyVespaLogs)) return run; storeVespaLogs(id); ZoneId zone = id.type().zone(); Optional<Deployment> deployment = Optional.ofNullable(controller.applications().requireInstance(id.application()) .deployments().get(zone)); if (deployment.isEmpty() || deployment.get().at().isBefore(run.start())) return run; List<LogEntry> log; Instant deployedAt; Instant from; if ( ! run.id().type().isProduction()) { deployedAt = run.stepInfo(installInitialReal).or(() -> run.stepInfo(installReal)).flatMap(StepInfo::startTime).orElseThrow(); from = run.lastVespaLogTimestamp().isAfter(run.start()) ? run.lastVespaLogTimestamp() : deployedAt.minusSeconds(10); log = LogEntry.parseVespaLog(controller.serviceRegistry().configServer() .getLogs(new DeploymentId(id.application(), zone), Map.of("from", Long.toString(from.toEpochMilli()))), from); } else log = List.of(); if (id.type().isTest()) { deployedAt = run.stepInfo(installTester).flatMap(StepInfo::startTime).orElseThrow(); from = run.lastVespaLogTimestamp().isAfter(run.start()) ? run.lastVespaLogTimestamp() : deployedAt.minusSeconds(10); List<LogEntry> testerLog = LogEntry.parseVespaLog(controller.serviceRegistry().configServer() .getLogs(new DeploymentId(id.tester().id(), zone), Map.of("from", Long.toString(from.toEpochMilli()))), from); Instant justNow = controller.clock().instant().minusSeconds(2); log = Stream.concat(log.stream(), testerLog.stream()) .filter(entry -> entry.at().isBefore(justNow)) .sorted(comparing(LogEntry::at)) .collect(toUnmodifiableList()); } if (log.isEmpty()) return run; logs.append(id.application(), id.type(), Step.copyVespaLogs, log, false); return run.with(log.get(log.size() - 1).at()); }); } public InputStream getVespaLogs(RunId id, long fromMillis, boolean tester) { Run run = run(id); return run.stepStatus(copyVespaLogs).map(succeeded::equals).orElse(false) ? controller.serviceRegistry().runDataStore().getLogs(id, tester) : getVespaLogsFromLogserver(run, fromMillis, tester).orElse(InputStream.nullInputStream()); } public static Optional<Instant> deploymentCompletedAt(Run run, boolean tester) { return (tester ? run.stepInfo(installTester) : run.stepInfo(installInitialReal).or(() -> run.stepInfo(installReal))) .flatMap(StepInfo::startTime).map(start -> start.minusSeconds(10)); } public void storeVespaLogs(RunId id) { Run run = run(id); if ( ! id.type().isProduction()) { getVespaLogsFromLogserver(run, 0, false).ifPresent(logs -> { try (logs) { controller.serviceRegistry().runDataStore().putLogs(id, false, logs); } catch (IOException e) { throw new UncheckedIOException(e); } }); } if (id.type().isTest()) { getVespaLogsFromLogserver(run, 0, true).ifPresent(logs -> { try (logs) { controller.serviceRegistry().runDataStore().putLogs(id, true, logs); } catch(IOException e){ throw new UncheckedIOException(e); } }); } } private Optional<InputStream> getVespaLogsFromLogserver(Run run, long fromMillis, boolean tester) { return deploymentCompletedAt(run, tester).map(at -> controller.serviceRegistry().configServer().getLogs(new DeploymentId(tester ? run.id().tester().id() : run.id().application(), run.id().type().zone()), Map.of("from", Long.toString(Math.max(fromMillis, at.toEpochMilli())), "to", Long.toString(run.end().orElse(controller.clock().instant()).toEpochMilli())))); } /** Fetches any new test log entries, and records the id of the last of these, for continuation. */ public void updateTestLog(RunId id) { locked(id, run -> { Optional<Step> step = Stream.of(endStagingSetup, endTests) .filter(run.readySteps()::contains) .findAny(); if (step.isEmpty()) return run; List<LogEntry> entries = cloud.getLog(new DeploymentId(id.tester().id(), id.type().zone()), run.lastTestLogEntry()); if (entries.isEmpty()) return run; logs.append(id.application(), id.type(), step.get(), entries, false); return run.with(entries.stream().mapToLong(LogEntry::id).max().getAsLong()); }); } public void updateTestReport(RunId id) { locked(id, run -> { Optional<TestReport> report = cloud.getTestReport(new DeploymentId(id.tester().id(), id.type().zone())); if (report.isEmpty()) { return run; } logs.writeTestReport(id, report.get()); return run; }); } public Optional<String> getTestReports(RunId id) { return logs.readTestReports(id); } /** Stores the given certificate as the tester certificate for this run, or throws if it's already set. */ public void storeTesterCertificate(RunId id, X509Certificate testerCertificate) { locked(id, run -> run.with(testerCertificate)); } /** Returns a list of all instances of applications which have registered. */ public List<ApplicationId> instances() { return controller.applications().readable().stream() .flatMap(application -> application.instances().values().stream()) .map(Instance::id) .collect(toUnmodifiableList()); } /** Returns all job types which have been run for the given application. */ private List<JobType> jobs(ApplicationId id) { return JobType.allIn(controller.zoneRegistry()).stream() .filter(type -> last(id, type).isPresent()) .collect(toUnmodifiableList()); } /** Returns an immutable map of all known runs for the given application and job type. */ public NavigableMap<RunId, Run> runs(JobId id) { return runs(id.application(), id.type()); } /** Lists the start time of non-redeployment runs of the given job, in order of increasing age. */ public List<Instant> jobStarts(JobId id) { return runs(id).descendingMap().values().stream() .filter(run -> ! run.isRedeployment()) .map(Run::start) .collect(toUnmodifiableList()); } /** Returns when given deployment last started deploying, falling back to time of deployment if it cannot be determined from job runs */ public Instant lastDeploymentStart(ApplicationId instanceId, Deployment deployment) { return jobStarts(new JobId(instanceId, JobType.deploymentTo(deployment.zone()))).stream() .findFirst() .orElseGet(deployment::at); } /** Returns an immutable map of all known runs for the given application and job type. */ public NavigableMap<RunId, Run> runs(ApplicationId id, JobType type) { ImmutableSortedMap.Builder<RunId, Run> runs = ImmutableSortedMap.orderedBy(Comparator.comparing(RunId::number)); Optional<Run> last = last(id, type); curator.readHistoricRuns(id, type).forEach((runId, run) -> { if (last.isEmpty() || ! runId.equals(last.get().id())) runs.put(runId, run); }); last.ifPresent(run -> runs.put(run.id(), run)); return runs.build(); } /** Returns the run with the given id, or throws if no such run exists. */ public Run run(RunId id) { return runs(id.application(), id.type()).values().stream() .filter(run -> run.id().equals(id)) .findAny() .orElseThrow(() -> new NoSuchElementException("no run with id '" + id + "' exists")); } /** Returns the last run of the given type, for the given application, if one has been run. */ public Optional<Run> last(JobId job) { return curator.readLastRun(job.application(), job.type()); } /** Returns the last run of the given type, for the given application, if one has been run. */ public Optional<Run> last(ApplicationId id, JobType type) { return curator.readLastRun(id, type); } /** Returns the last completed of the given job. */ public Optional<Run> lastCompleted(JobId id) { return JobStatus.lastCompleted(runs(id)); } /** Returns the first failing of the given job. */ public Optional<Run> firstFailing(JobId id) { return JobStatus.firstFailing(runs(id)); } /** Returns the last success of the given job. */ public Optional<Run> lastSuccess(JobId id) { return JobStatus.lastSuccess(runs(id)); } /** Returns the run with the given id, provided it is still active. */ public Optional<Run> active(RunId id) { return last(id.application(), id.type()) .filter(run -> ! run.hasEnded()) .filter(run -> run.id().equals(id)); } /** Returns a list of all active runs. */ public List<Run> active() { return controller.applications().idList().stream() .flatMap(id -> active(id).stream()) .toList(); } /** Returns a list of all active runs for the given application. */ public List<Run> active(TenantAndApplicationId id) { return controller.applications().requireApplication(id).instances().keySet().stream() .flatMap(name -> JobType.allIn(controller.zoneRegistry()).stream() .map(type -> last(id.instance(name), type)) .flatMap(Optional::stream) .filter(run -> ! run.hasEnded())) .toList(); } /** Returns a list of all active runs for the given instance. */ public List<Run> active(ApplicationId id) { return JobType.allIn(controller.zoneRegistry()).stream() .map(type -> last(id, type)) .flatMap(Optional::stream) .filter(run -> !run.hasEnded()) .toList(); } /** Returns the job status of the given job, possibly empty. */ public JobStatus jobStatus(JobId id) { return new JobStatus(id, runs(id)); } /** Returns the deployment status of the given application. */ public DeploymentStatus deploymentStatus(Application application) { VersionStatus versionStatus = controller.readVersionStatus(); return deploymentStatus(application, versionStatus, controller.systemVersion(versionStatus)); } private DeploymentStatus deploymentStatus(Application application, VersionStatus versionStatus, Version systemVersion) { return new DeploymentStatus(application, this::jobStatus, controller.zoneRegistry(), versionStatus, systemVersion, instance -> controller.applications().versionCompatibility(application.id().instance(instance)), controller.clock().instant()); } /** Adds deployment status to each of the given applications. */ public DeploymentStatusList deploymentStatuses(ApplicationList applications, VersionStatus versionStatus) { Version systemVersion = controller.systemVersion(versionStatus); return DeploymentStatusList.from(applications.asList().stream() .map(application -> deploymentStatus(application, versionStatus, systemVersion)) .toList()); } /** Adds deployment status to each of the given applications. Calling this will do an implicit read of the controller's version status */ public DeploymentStatusList deploymentStatuses(ApplicationList applications) { VersionStatus versionStatus = controller.readVersionStatus(); return deploymentStatuses(applications, versionStatus); } /** Changes the status of the given step, for the given run, provided it is still active. */ public void update(RunId id, RunStatus status, LockedStep step) { locked(id, run -> run.with(status, step)); } /** * Changes the status of the given run to inactive, and stores it as a historic run. * Throws TimeoutException if some step in this job is still being run. */ public void finish(RunId id) throws TimeoutException { Deque<Mutex> locks = new ArrayDeque<>(); try { Run unlockedRun = run(id); locks.push(curator.lock(id.application(), id.type(), report)); for (Step step : report.allPrerequisites(unlockedRun.steps().keySet())) locks.push(curator.lock(id.application(), id.type(), step)); locked(id, run -> { if (run.status() == reset) { for (Step step : run.steps().keySet()) log(id, step, INFO, List.of(" return run.reset(); } if (run.status() == running && run.stepStatuses().values().stream().anyMatch(not(succeeded::equals))) return run; Run finishedRun = run.finished(controller.clock().instant()); locked(id.application(), id.type(), runs -> { runs.put(run.id(), finishedRun); long last = id.number(); long successes = runs.values().stream().filter(Run::hasSucceeded).count(); var oldEntries = runs.entrySet().iterator(); for (var old = oldEntries.next(); old.getKey().number() <= last - historyLength || old.getValue().start().isBefore(controller.clock().instant().minus(maxHistoryAge)); old = oldEntries.next()) { if ( successes == 1 && old.getValue().hasSucceeded() && ! old.getValue().start().isBefore(controller.clock().instant().minus(maxHistoryAge))) { oldEntries.next(); continue; } logs.delete(old.getKey()); oldEntries.remove(); } }); logs.flush(id); metric.jobFinished(run.id().job(), finishedRun.status()); pruneRevisions(unlockedRun); return finishedRun; }); } finally { for (Mutex lock : locks) { try { lock.close(); } catch (Throwable t) { log.log(WARNING, "Failed to close the lock " + lock + ": the lock may or may not " + "have been released in ZooKeeper, and if not this controller " + "must be restarted to release the lock", t); } } } } /** Marks the given run as aborted; no further normal steps will run, but run-always steps will try to succeed. */ public void abort(RunId id, String reason) { locked(id, run -> { run.stepStatuses().entrySet().stream() .filter(entry -> entry.getValue() == unfinished) .forEach(entry -> log(id, entry.getKey(), INFO, "Aborting run: " + reason)); return run.aborted(); }); } /** Accepts and stores a new application package and test jar pair under a generated application version key. */ public ApplicationVersion submit(TenantAndApplicationId id, Submission submission, long projectId) { ApplicationController applications = controller.applications(); AtomicReference<ApplicationVersion> version = new AtomicReference<>(); applications.lockApplicationOrThrow(id, application -> { Optional<ApplicationVersion> previousVersion = application.get().revisions().last(); Optional<ApplicationPackage> previousPackage = previousVersion.flatMap(previous -> applications.applicationStore().find(id.tenant(), id.application(), previous.buildNumber().getAsLong())) .map(ApplicationPackage::new); long previousBuild = previousVersion.map(latestVersion -> latestVersion.buildNumber().getAsLong()).orElse(0L); version.set(submission.toApplicationVersion(1 + previousBuild)); byte[] diff = previousPackage.map(previous -> ApplicationPackageDiff.diff(previous, submission.applicationPackage())) .orElseGet(() -> ApplicationPackageDiff.diffAgainstEmpty(submission.applicationPackage())); applications.applicationStore().put(id.tenant(), id.application(), version.get().id(), submission.applicationPackage().zippedContent(), submission.testPackage(), diff); applications.applicationStore().putMeta(id.tenant(), id.application(), controller.clock().instant(), submission.applicationPackage().metaDataZip()); application = application.withProjectId(projectId == -1 ? OptionalLong.empty() : OptionalLong.of(projectId)); application = application.withRevisions(revisions -> revisions.with(version.get())); application = withPrunedPackages(application, version.get().id()); validate(id, submission); applications.storeWithUpdatedConfig(application, submission.applicationPackage()); if (application.get().projectId().isPresent()) applications.deploymentTrigger().triggerNewRevision(id); }); return version.get(); } private void validate(TenantAndApplicationId id, Submission submission) { validateTests(id, submission); validateParentVersion(id, submission); validateMajorVersion(id, submission); } private void validateTests(TenantAndApplicationId id, Submission submission) { TestSummary testSummary = TestPackage.validateTests(submission.applicationPackage().deploymentSpec(), submission.testPackage()); if (testSummary.problems().isEmpty()) controller.notificationsDb().removeNotification(NotificationSource.from(id), Type.testPackage); else controller.notificationsDb().setNotification(NotificationSource.from(id), Type.testPackage, Notification.Level.warning, testSummary.problems()); } private void validateParentVersion(TenantAndApplicationId id, Submission submission) { submission.applicationPackage().parentVersion().ifPresent(parent -> { if (parent.getMajor() < controller.readSystemVersion().getMajor()) controller.notificationsDb().setNotification(NotificationSource.from(id), Type.submission, Notification.Level.warning, "Parent version used to compile the application is on a " + "lower major version than the current Vespa Cloud version"); else controller.notificationsDb().removeNotification(NotificationSource.from(id), Type.submission); }); } private LockedApplication withPrunedPackages(LockedApplication application, RevisionId latest){ TenantAndApplicationId id = application.get().id(); Application wrapped = application.get(); RevisionId oldestDeployed = application.get().oldestDeployedRevision() .or(() -> wrapped.instances().values().stream() .flatMap(instance -> instance.change().revision().stream()) .min(naturalOrder())) .orElse(latest); controller.applications().applicationStore().prune(id.tenant(), id.application(), oldestDeployed); for (ApplicationVersion version : application.get().revisions().withPackage()) if (version.id().compareTo(oldestDeployed) < 0) application = application.withRevisions(revisions -> revisions.with(version.withoutPackage())); return application; } /** Forget revisions no longer present in any relevant job history. */ private void pruneRevisions(Run run) { TenantAndApplicationId applicationId = TenantAndApplicationId.from(run.id().application()); boolean isProduction = run.versions().targetRevision().isProduction(); (isProduction ? deploymentStatus(controller.applications().requireApplication(applicationId)).jobs().asList().stream() : Stream.of(jobStatus(run.id().job()))) .flatMap(jobs -> jobs.runs().values().stream()) .map(r -> r.versions().targetRevision()) .filter(id -> id.isProduction() == isProduction) .min(naturalOrder()) .ifPresent(oldestRevision -> { controller.applications().lockApplicationOrThrow(applicationId, application -> { if (isProduction) { controller.applications().applicationStore().pruneDiffs(run.id().application().tenant(), run.id().application().application(), oldestRevision.number()); controller.applications().store(application.withRevisions(revisions -> revisions.withoutOlderThan(oldestRevision))); } else { controller.applications().applicationStore().pruneDevDiffs(new DeploymentId(run.id().application(), run.id().job().type().zone()), oldestRevision.number()); controller.applications().store(application.withRevisions(revisions -> revisions.withoutOlderThan(oldestRevision, run.id().job()))); } }); }); } /** Orders a run of the given type, or throws an IllegalStateException if that job type is already running. */ public void start(ApplicationId id, JobType type, Versions versions, boolean isRedeployment, Optional<String> reason) { start(id, type, versions, isRedeployment, JobProfile.of(type), reason); } /** Orders a run of the given type, or throws an IllegalStateException if that job type is already running. */ public void start(ApplicationId id, JobType type, Versions versions, boolean isRedeployment, JobProfile profile, Optional<String> reason) { ApplicationVersion revision = controller.applications().requireApplication(TenantAndApplicationId.from(id)).revisions().get(versions.targetRevision()); if (revision.compileVersion() .map(version -> controller.applications().versionCompatibility(id).refuse(versions.targetPlatform(), version)) .orElse(false)) throw new IllegalArgumentException("Will not start " + type + " for " + id + " with incompatible platform version (" + versions.targetPlatform() + ") " + "and compile versions (" + revision.compileVersion().get() + ")"); locked(id, type, __ -> { Optional<Run> last = last(id, type); if (last.flatMap(run -> active(run.id())).isPresent()) throw new IllegalArgumentException("Cannot start " + type + " for " + id + "; it is already running!"); RunId newId = new RunId(id, type, last.map(run -> run.id().number()).orElse(0L) + 1); curator.writeLastRun(Run.initial(newId, versions, isRedeployment, controller.clock().instant(), profile, reason)); metric.jobStarted(newId.job()); }); } /** Stores the given package and starts a deployment of it, after aborting any such ongoing deployment. */ public void deploy(ApplicationId id, JobType type, Optional<Version> platform, ApplicationPackage applicationPackage) { deploy(id, type, platform, applicationPackage, false); } /** Stores the given package and starts a deployment of it, after aborting any such ongoing deployment.*/ public void deploy(ApplicationId id, JobType type, Optional<Version> platform, ApplicationPackage applicationPackage, boolean dryRun) { if ( ! controller.zoneRegistry().hasZone(type.zone())) throw new IllegalArgumentException(type.zone() + " is not present in this system"); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { if ( ! application.get().instances().containsKey(id.instance())) application = controller.applications().withNewInstance(application, id); controller.applications().store(application); }); DeploymentId deploymentId = new DeploymentId(id, type.zone()); Optional<Run> lastRun = last(id, type); lastRun.filter(run -> ! run.hasEnded()).ifPresent(run -> abortAndWait(run.id(), Duration.ofMinutes(2))); long build = 1 + lastRun.map(run -> run.versions().targetRevision().number()).orElse(0L); RevisionId revisionId = RevisionId.forDevelopment(build, new JobId(id, type)); ApplicationVersion version = ApplicationVersion.forDevelopment(revisionId, applicationPackage.compileVersion()); byte[] diff = getDiff(applicationPackage, deploymentId, lastRun); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { controller.applications().applicationStore().putDev(deploymentId, version.id(), applicationPackage.zippedContent(), diff); Version targetPlatform = platform.orElseGet(() -> findTargetPlatform(applicationPackage, deploymentId, application.get().get(id.instance()))); controller.applications().store(application.withRevisions(revisions -> revisions.with(version))); start(id, type, new Versions(targetPlatform, version.id(), lastRun.map(run -> run.versions().targetPlatform()), lastRun.map(run -> run.versions().targetRevision())), false, dryRun ? JobProfile.developmentDryRun : JobProfile.development, Optional.empty()); }); locked(id, type, __ -> { runner.get().accept(last(id, type).get()); }); } /* Application package diff against previous version, or against empty version if previous does not exist or is invalid */ private byte[] getDiff(ApplicationPackage applicationPackage, DeploymentId deploymentId, Optional<Run> lastRun) { return lastRun.map(run -> run.versions().targetRevision()) .map(prevVersion -> { ApplicationPackage previous; try { previous = new ApplicationPackage(controller.applications().applicationStore().get(deploymentId, prevVersion)); } catch (IllegalArgumentException e) { return ApplicationPackageDiff.diffAgainstEmpty(applicationPackage); } return ApplicationPackageDiff.diff(previous, applicationPackage); }) .orElseGet(() -> ApplicationPackageDiff.diffAgainstEmpty(applicationPackage)); } private Version findTargetPlatform(ApplicationPackage applicationPackage, DeploymentId id, Optional<Instance> instance) { List<Version> versions = controller.readVersionStatus().deployableVersions().stream() .map(VespaVersion::versionNumber) .collect(toList()); instance.map(Instance::deployments) .map(deployments -> deployments.get(id.zoneId())) .map(Deployment::version) .ifPresent(versions::add); if (versions.isEmpty()) throw new IllegalStateException("no deployable platform version found in the system"); VersionCompatibility compatibility = controller.applications().versionCompatibility(id.applicationId()); List<Version> compatibleVersions = new ArrayList<>(); for (Version target : reversed(versions)) if (applicationPackage.compileVersion().isEmpty() || compatibility.accept(target, applicationPackage.compileVersion().get())) compatibleVersions.add(target); if (compatibleVersions.isEmpty()) throw new IllegalArgumentException("no platforms are compatible with compile version " + applicationPackage.compileVersion().get()); Optional<Integer> major = applicationPackage.deploymentSpec().majorVersion(); List<Version> versionOnRightMajor = new ArrayList<>(); for (Version target : reversed(versions)) if (major.isEmpty() || major.get() == target.getMajor()) versionOnRightMajor.add(target); if (versionOnRightMajor.isEmpty()) throw new IllegalArgumentException("no platforms were found for major version " + major.get() + " specified in deployment.xml"); for (Version target : compatibleVersions) if (versionOnRightMajor.contains(target)) return target; throw new IllegalArgumentException("no platforms on major version " + major.get() + " specified in deployment.xml " + "are compatible with compile version " + applicationPackage.compileVersion().get()); } /** Aborts a run and waits for it complete. */ private void abortAndWait(RunId id, Duration timeout) { abort(id, "replaced by new deployment"); runner.get().accept(last(id.application(), id.type()).get()); Instant doom = controller.clock().instant().plus(timeout); Duration sleep = Duration.ofMillis(100); while ( ! last(id.application(), id.type()).get().hasEnded()) { if (controller.clock().instant().plus(sleep).isAfter(doom)) throw new UncheckedTimeoutException("timeout waiting for " + id + " to abort and finish"); try { Thread.sleep(sleep.toMillis()); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new RuntimeException(e); } } } /** Deletes run data and tester deployments for applications which are unknown, or no longer built internally. */ public void collectGarbage() { Set<ApplicationId> applicationsToBuild = new HashSet<>(instances()); curator.applicationsWithJobs().stream() .filter(id -> ! applicationsToBuild.contains(id)) .forEach(id -> { try { TesterId tester = TesterId.of(id); for (JobType type : jobs(id)) locked(id, type, deactivateTester, __ -> { try (Mutex ___ = curator.lock(id, type)) { try { deactivateTester(tester, type); } catch (Exception e) { } curator.deleteRunData(id, type); } }); logs.delete(id); curator.deleteRunData(id); } catch (Exception e) { log.log(WARNING, "failed cleaning up after deleted application", e); } }); } public void deactivateTester(TesterId id, JobType type) { controller.serviceRegistry().configServer().deactivate(new DeploymentId(id.id(), type.zone())); } /** Locks all runs and modifies the list of historic runs for the given application and job type. */ private void locked(ApplicationId id, JobType type, Consumer<SortedMap<RunId, Run>> modifications) { try (Mutex __ = curator.lock(id, type)) { SortedMap<RunId, Run> runs = new TreeMap<>(curator.readHistoricRuns(id, type)); modifications.accept(runs); curator.writeHistoricRuns(id, type, runs.values()); } } /** Locks and modifies the run with the given id, provided it is still active. */ public void locked(RunId id, UnaryOperator<Run> modifications) { try (Mutex __ = curator.lock(id.application(), id.type())) { active(id).ifPresent(run -> { Run modified = modifications.apply(run); if (modified != null) curator.writeLastRun(modified); }); } } /** Locks the given step and checks none of its prerequisites are running, then performs the given actions. */ public void locked(ApplicationId id, JobType type, Step step, Consumer<LockedStep> action) throws TimeoutException { try (Mutex lock = curator.lock(id, type, step)) { for (Step prerequisite : step.allPrerequisites(last(id, type).get().steps().keySet())) try (Mutex __ = curator.lock(id, type, prerequisite)) { ; } action.accept(new LockedStep(lock, step)); } } }
Thanks, fixed.
private void validateMajorVersion(TenantAndApplicationId id, Submission submission) { submission.applicationPackage().deploymentSpec().majorVersion().ifPresent(explicitMajor -> { if (explicitMajor < 8) controller.notificationsDb().setNotification(NotificationSource.from(id), Type.applicationPackage, Notification.Level.warning, "Vespa 7 will soon be end of life, upgrade to Vespa 8 now:" + "https: else controller.notificationsDb().removeNotification(NotificationSource.from(id), Type.applicationPackage); }); }
"Vespa 7 will soon be end of life, upgrade to Vespa 8 now:" +
private void validateMajorVersion(TenantAndApplicationId id, Submission submission) { submission.applicationPackage().deploymentSpec().majorVersion().ifPresent(explicitMajor -> { if (explicitMajor < 8) controller.notificationsDb().setNotification(NotificationSource.from(id), Type.applicationPackage, Notification.Level.warning, "Vespa 7 will soon be end of life, upgrade to Vespa 8 now:" + "https: else controller.notificationsDb().removeNotification(NotificationSource.from(id), Type.applicationPackage); }); }
class JobController { public static final Duration maxHistoryAge = Duration.ofDays(60); private static final Logger log = Logger.getLogger(JobController.class.getName()); private final int historyLength; private final Controller controller; private final CuratorDb curator; private final BufferedLogStore logs; private final TesterCloud cloud; private final JobMetrics metric; private final AtomicReference<Consumer<Run>> runner = new AtomicReference<>(__ -> { }); public JobController(Controller controller) { this.historyLength = controller.system().isCd() ? 256 : 64; this.controller = controller; this.curator = controller.curator(); this.logs = new BufferedLogStore(curator, controller.serviceRegistry().runDataStore()); this.cloud = controller.serviceRegistry().testerCloud(); this.metric = new JobMetrics(controller.metric(), controller::system); } public TesterCloud cloud() { return cloud; } public int historyLength() { return historyLength; } public void setRunner(Consumer<Run> runner) { this.runner.set(runner); } /** Rewrite all job data with the newest format. */ public void updateStorage() { for (ApplicationId id : instances()) for (JobType type : jobs(id)) { locked(id, type, runs -> { curator.readLastRun(id, type).ifPresent(curator::writeLastRun); }); } } /** Returns all entries currently logged for the given run. */ public Optional<RunLog> details(RunId id) { return details(id, -1); } /** Returns the logged entries for the given run, which are after the given id threshold. */ public Optional<RunLog> details(RunId id, long after) { try (Mutex __ = curator.lock(id.application(), id.type())) { Run run = runs(id.application(), id.type()).get(id); if (run == null) return Optional.empty(); return active(id).isPresent() ? Optional.of(logs.readActive(id.application(), id.type(), after)) : logs.readFinished(id, after); } } /** Stores the given log entries for the given run and step. */ public void log(RunId id, Step step, List<LogEntry> entries) { locked(id, __ -> { logs.append(id.application(), id.type(), step, entries, true); return __; }); } /** Stores the given log messages for the given run and step. */ public void log(RunId id, Step step, Level level, List<String> messages) { log(id, step, messages.stream() .map(message -> new LogEntry(0, controller.clock().instant(), LogEntry.typeOf(level), message)) .collect(toList())); } /** Stores the given log message for the given run and step. */ public void log(RunId id, Step step, Level level, String message) { log(id, step, level, Collections.singletonList(message)); } /** Fetches any new Vespa log entries, and records the timestamp of the last of these, for continuation. */ public void updateVespaLog(RunId id) { locked(id, run -> { if ( ! run.hasStep(copyVespaLogs)) return run; storeVespaLogs(id); ZoneId zone = id.type().zone(); Optional<Deployment> deployment = Optional.ofNullable(controller.applications().requireInstance(id.application()) .deployments().get(zone)); if (deployment.isEmpty() || deployment.get().at().isBefore(run.start())) return run; List<LogEntry> log; Instant deployedAt; Instant from; if ( ! run.id().type().isProduction()) { deployedAt = run.stepInfo(installInitialReal).or(() -> run.stepInfo(installReal)).flatMap(StepInfo::startTime).orElseThrow(); from = run.lastVespaLogTimestamp().isAfter(run.start()) ? run.lastVespaLogTimestamp() : deployedAt.minusSeconds(10); log = LogEntry.parseVespaLog(controller.serviceRegistry().configServer() .getLogs(new DeploymentId(id.application(), zone), Map.of("from", Long.toString(from.toEpochMilli()))), from); } else log = List.of(); if (id.type().isTest()) { deployedAt = run.stepInfo(installTester).flatMap(StepInfo::startTime).orElseThrow(); from = run.lastVespaLogTimestamp().isAfter(run.start()) ? run.lastVespaLogTimestamp() : deployedAt.minusSeconds(10); List<LogEntry> testerLog = LogEntry.parseVespaLog(controller.serviceRegistry().configServer() .getLogs(new DeploymentId(id.tester().id(), zone), Map.of("from", Long.toString(from.toEpochMilli()))), from); Instant justNow = controller.clock().instant().minusSeconds(2); log = Stream.concat(log.stream(), testerLog.stream()) .filter(entry -> entry.at().isBefore(justNow)) .sorted(comparing(LogEntry::at)) .collect(toUnmodifiableList()); } if (log.isEmpty()) return run; logs.append(id.application(), id.type(), Step.copyVespaLogs, log, false); return run.with(log.get(log.size() - 1).at()); }); } public InputStream getVespaLogs(RunId id, long fromMillis, boolean tester) { Run run = run(id); return run.stepStatus(copyVespaLogs).map(succeeded::equals).orElse(false) ? controller.serviceRegistry().runDataStore().getLogs(id, tester) : getVespaLogsFromLogserver(run, fromMillis, tester).orElse(InputStream.nullInputStream()); } public static Optional<Instant> deploymentCompletedAt(Run run, boolean tester) { return (tester ? run.stepInfo(installTester) : run.stepInfo(installInitialReal).or(() -> run.stepInfo(installReal))) .flatMap(StepInfo::startTime).map(start -> start.minusSeconds(10)); } public void storeVespaLogs(RunId id) { Run run = run(id); if ( ! id.type().isProduction()) { getVespaLogsFromLogserver(run, 0, false).ifPresent(logs -> { try (logs) { controller.serviceRegistry().runDataStore().putLogs(id, false, logs); } catch (IOException e) { throw new UncheckedIOException(e); } }); } if (id.type().isTest()) { getVespaLogsFromLogserver(run, 0, true).ifPresent(logs -> { try (logs) { controller.serviceRegistry().runDataStore().putLogs(id, true, logs); } catch(IOException e){ throw new UncheckedIOException(e); } }); } } private Optional<InputStream> getVespaLogsFromLogserver(Run run, long fromMillis, boolean tester) { return deploymentCompletedAt(run, tester).map(at -> controller.serviceRegistry().configServer().getLogs(new DeploymentId(tester ? run.id().tester().id() : run.id().application(), run.id().type().zone()), Map.of("from", Long.toString(Math.max(fromMillis, at.toEpochMilli())), "to", Long.toString(run.end().orElse(controller.clock().instant()).toEpochMilli())))); } /** Fetches any new test log entries, and records the id of the last of these, for continuation. */ public void updateTestLog(RunId id) { locked(id, run -> { Optional<Step> step = Stream.of(endStagingSetup, endTests) .filter(run.readySteps()::contains) .findAny(); if (step.isEmpty()) return run; List<LogEntry> entries = cloud.getLog(new DeploymentId(id.tester().id(), id.type().zone()), run.lastTestLogEntry()); if (entries.isEmpty()) return run; logs.append(id.application(), id.type(), step.get(), entries, false); return run.with(entries.stream().mapToLong(LogEntry::id).max().getAsLong()); }); } public void updateTestReport(RunId id) { locked(id, run -> { Optional<TestReport> report = cloud.getTestReport(new DeploymentId(id.tester().id(), id.type().zone())); if (report.isEmpty()) { return run; } logs.writeTestReport(id, report.get()); return run; }); } public Optional<String> getTestReports(RunId id) { return logs.readTestReports(id); } /** Stores the given certificate as the tester certificate for this run, or throws if it's already set. */ public void storeTesterCertificate(RunId id, X509Certificate testerCertificate) { locked(id, run -> run.with(testerCertificate)); } /** Returns a list of all instances of applications which have registered. */ public List<ApplicationId> instances() { return controller.applications().readable().stream() .flatMap(application -> application.instances().values().stream()) .map(Instance::id) .collect(toUnmodifiableList()); } /** Returns all job types which have been run for the given application. */ private List<JobType> jobs(ApplicationId id) { return JobType.allIn(controller.zoneRegistry()).stream() .filter(type -> last(id, type).isPresent()) .collect(toUnmodifiableList()); } /** Returns an immutable map of all known runs for the given application and job type. */ public NavigableMap<RunId, Run> runs(JobId id) { return runs(id.application(), id.type()); } /** Lists the start time of non-redeployment runs of the given job, in order of increasing age. */ public List<Instant> jobStarts(JobId id) { return runs(id).descendingMap().values().stream() .filter(run -> ! run.isRedeployment()) .map(Run::start) .collect(toUnmodifiableList()); } /** Returns when given deployment last started deploying, falling back to time of deployment if it cannot be determined from job runs */ public Instant lastDeploymentStart(ApplicationId instanceId, Deployment deployment) { return jobStarts(new JobId(instanceId, JobType.deploymentTo(deployment.zone()))).stream() .findFirst() .orElseGet(deployment::at); } /** Returns an immutable map of all known runs for the given application and job type. */ public NavigableMap<RunId, Run> runs(ApplicationId id, JobType type) { ImmutableSortedMap.Builder<RunId, Run> runs = ImmutableSortedMap.orderedBy(Comparator.comparing(RunId::number)); Optional<Run> last = last(id, type); curator.readHistoricRuns(id, type).forEach((runId, run) -> { if (last.isEmpty() || ! runId.equals(last.get().id())) runs.put(runId, run); }); last.ifPresent(run -> runs.put(run.id(), run)); return runs.build(); } /** Returns the run with the given id, or throws if no such run exists. */ public Run run(RunId id) { return runs(id.application(), id.type()).values().stream() .filter(run -> run.id().equals(id)) .findAny() .orElseThrow(() -> new NoSuchElementException("no run with id '" + id + "' exists")); } /** Returns the last run of the given type, for the given application, if one has been run. */ public Optional<Run> last(JobId job) { return curator.readLastRun(job.application(), job.type()); } /** Returns the last run of the given type, for the given application, if one has been run. */ public Optional<Run> last(ApplicationId id, JobType type) { return curator.readLastRun(id, type); } /** Returns the last completed of the given job. */ public Optional<Run> lastCompleted(JobId id) { return JobStatus.lastCompleted(runs(id)); } /** Returns the first failing of the given job. */ public Optional<Run> firstFailing(JobId id) { return JobStatus.firstFailing(runs(id)); } /** Returns the last success of the given job. */ public Optional<Run> lastSuccess(JobId id) { return JobStatus.lastSuccess(runs(id)); } /** Returns the run with the given id, provided it is still active. */ public Optional<Run> active(RunId id) { return last(id.application(), id.type()) .filter(run -> ! run.hasEnded()) .filter(run -> run.id().equals(id)); } /** Returns a list of all active runs. */ public List<Run> active() { return controller.applications().idList().stream() .flatMap(id -> active(id).stream()) .toList(); } /** Returns a list of all active runs for the given application. */ public List<Run> active(TenantAndApplicationId id) { return controller.applications().requireApplication(id).instances().keySet().stream() .flatMap(name -> JobType.allIn(controller.zoneRegistry()).stream() .map(type -> last(id.instance(name), type)) .flatMap(Optional::stream) .filter(run -> ! run.hasEnded())) .toList(); } /** Returns a list of all active runs for the given instance. */ public List<Run> active(ApplicationId id) { return JobType.allIn(controller.zoneRegistry()).stream() .map(type -> last(id, type)) .flatMap(Optional::stream) .filter(run -> !run.hasEnded()) .toList(); } /** Returns the job status of the given job, possibly empty. */ public JobStatus jobStatus(JobId id) { return new JobStatus(id, runs(id)); } /** Returns the deployment status of the given application. */ public DeploymentStatus deploymentStatus(Application application) { VersionStatus versionStatus = controller.readVersionStatus(); return deploymentStatus(application, versionStatus, controller.systemVersion(versionStatus)); } private DeploymentStatus deploymentStatus(Application application, VersionStatus versionStatus, Version systemVersion) { return new DeploymentStatus(application, this::jobStatus, controller.zoneRegistry(), versionStatus, systemVersion, instance -> controller.applications().versionCompatibility(application.id().instance(instance)), controller.clock().instant()); } /** Adds deployment status to each of the given applications. */ public DeploymentStatusList deploymentStatuses(ApplicationList applications, VersionStatus versionStatus) { Version systemVersion = controller.systemVersion(versionStatus); return DeploymentStatusList.from(applications.asList().stream() .map(application -> deploymentStatus(application, versionStatus, systemVersion)) .toList()); } /** Adds deployment status to each of the given applications. Calling this will do an implicit read of the controller's version status */ public DeploymentStatusList deploymentStatuses(ApplicationList applications) { VersionStatus versionStatus = controller.readVersionStatus(); return deploymentStatuses(applications, versionStatus); } /** Changes the status of the given step, for the given run, provided it is still active. */ public void update(RunId id, RunStatus status, LockedStep step) { locked(id, run -> run.with(status, step)); } /** * Changes the status of the given run to inactive, and stores it as a historic run. * Throws TimeoutException if some step in this job is still being run. */ public void finish(RunId id) throws TimeoutException { Deque<Mutex> locks = new ArrayDeque<>(); try { Run unlockedRun = run(id); locks.push(curator.lock(id.application(), id.type(), report)); for (Step step : report.allPrerequisites(unlockedRun.steps().keySet())) locks.push(curator.lock(id.application(), id.type(), step)); locked(id, run -> { if (run.status() == reset) { for (Step step : run.steps().keySet()) log(id, step, INFO, List.of(" return run.reset(); } if (run.status() == running && run.stepStatuses().values().stream().anyMatch(not(succeeded::equals))) return run; Run finishedRun = run.finished(controller.clock().instant()); locked(id.application(), id.type(), runs -> { runs.put(run.id(), finishedRun); long last = id.number(); long successes = runs.values().stream().filter(Run::hasSucceeded).count(); var oldEntries = runs.entrySet().iterator(); for (var old = oldEntries.next(); old.getKey().number() <= last - historyLength || old.getValue().start().isBefore(controller.clock().instant().minus(maxHistoryAge)); old = oldEntries.next()) { if ( successes == 1 && old.getValue().hasSucceeded() && ! old.getValue().start().isBefore(controller.clock().instant().minus(maxHistoryAge))) { oldEntries.next(); continue; } logs.delete(old.getKey()); oldEntries.remove(); } }); logs.flush(id); metric.jobFinished(run.id().job(), finishedRun.status()); pruneRevisions(unlockedRun); return finishedRun; }); } finally { for (Mutex lock : locks) { try { lock.close(); } catch (Throwable t) { log.log(WARNING, "Failed to close the lock " + lock + ": the lock may or may not " + "have been released in ZooKeeper, and if not this controller " + "must be restarted to release the lock", t); } } } } /** Marks the given run as aborted; no further normal steps will run, but run-always steps will try to succeed. */ public void abort(RunId id, String reason) { locked(id, run -> { run.stepStatuses().entrySet().stream() .filter(entry -> entry.getValue() == unfinished) .forEach(entry -> log(id, entry.getKey(), INFO, "Aborting run: " + reason)); return run.aborted(); }); } /** Accepts and stores a new application package and test jar pair under a generated application version key. */ public ApplicationVersion submit(TenantAndApplicationId id, Submission submission, long projectId) { ApplicationController applications = controller.applications(); AtomicReference<ApplicationVersion> version = new AtomicReference<>(); applications.lockApplicationOrThrow(id, application -> { Optional<ApplicationVersion> previousVersion = application.get().revisions().last(); Optional<ApplicationPackage> previousPackage = previousVersion.flatMap(previous -> applications.applicationStore().find(id.tenant(), id.application(), previous.buildNumber().getAsLong())) .map(ApplicationPackage::new); long previousBuild = previousVersion.map(latestVersion -> latestVersion.buildNumber().getAsLong()).orElse(0L); version.set(submission.toApplicationVersion(1 + previousBuild)); byte[] diff = previousPackage.map(previous -> ApplicationPackageDiff.diff(previous, submission.applicationPackage())) .orElseGet(() -> ApplicationPackageDiff.diffAgainstEmpty(submission.applicationPackage())); applications.applicationStore().put(id.tenant(), id.application(), version.get().id(), submission.applicationPackage().zippedContent(), submission.testPackage(), diff); applications.applicationStore().putMeta(id.tenant(), id.application(), controller.clock().instant(), submission.applicationPackage().metaDataZip()); application = application.withProjectId(projectId == -1 ? OptionalLong.empty() : OptionalLong.of(projectId)); application = application.withRevisions(revisions -> revisions.with(version.get())); application = withPrunedPackages(application, version.get().id()); validate(id, submission); applications.storeWithUpdatedConfig(application, submission.applicationPackage()); if (application.get().projectId().isPresent()) applications.deploymentTrigger().triggerNewRevision(id); }); return version.get(); } private void validate(TenantAndApplicationId id, Submission submission) { validateTests(id, submission); validateParentVersion(id, submission); validateMajorVersion(id, submission); } private void validateTests(TenantAndApplicationId id, Submission submission) { TestSummary testSummary = TestPackage.validateTests(submission.applicationPackage().deploymentSpec(), submission.testPackage()); if (testSummary.problems().isEmpty()) controller.notificationsDb().removeNotification(NotificationSource.from(id), Type.testPackage); else controller.notificationsDb().setNotification(NotificationSource.from(id), Type.testPackage, Notification.Level.warning, testSummary.problems()); } private void validateParentVersion(TenantAndApplicationId id, Submission submission) { submission.applicationPackage().parentVersion().ifPresent(parent -> { if (parent.getMajor() < controller.readSystemVersion().getMajor()) controller.notificationsDb().setNotification(NotificationSource.from(id), Type.submission, Notification.Level.warning, "Parent version used to compile the application is on a " + "lower major version than the current Vespa Cloud version"); else controller.notificationsDb().removeNotification(NotificationSource.from(id), Type.submission); }); } private LockedApplication withPrunedPackages(LockedApplication application, RevisionId latest){ TenantAndApplicationId id = application.get().id(); Application wrapped = application.get(); RevisionId oldestDeployed = application.get().oldestDeployedRevision() .or(() -> wrapped.instances().values().stream() .flatMap(instance -> instance.change().revision().stream()) .min(naturalOrder())) .orElse(latest); controller.applications().applicationStore().prune(id.tenant(), id.application(), oldestDeployed); for (ApplicationVersion version : application.get().revisions().withPackage()) if (version.id().compareTo(oldestDeployed) < 0) application = application.withRevisions(revisions -> revisions.with(version.withoutPackage())); return application; } /** Forget revisions no longer present in any relevant job history. */ private void pruneRevisions(Run run) { TenantAndApplicationId applicationId = TenantAndApplicationId.from(run.id().application()); boolean isProduction = run.versions().targetRevision().isProduction(); (isProduction ? deploymentStatus(controller.applications().requireApplication(applicationId)).jobs().asList().stream() : Stream.of(jobStatus(run.id().job()))) .flatMap(jobs -> jobs.runs().values().stream()) .map(r -> r.versions().targetRevision()) .filter(id -> id.isProduction() == isProduction) .min(naturalOrder()) .ifPresent(oldestRevision -> { controller.applications().lockApplicationOrThrow(applicationId, application -> { if (isProduction) { controller.applications().applicationStore().pruneDiffs(run.id().application().tenant(), run.id().application().application(), oldestRevision.number()); controller.applications().store(application.withRevisions(revisions -> revisions.withoutOlderThan(oldestRevision))); } else { controller.applications().applicationStore().pruneDevDiffs(new DeploymentId(run.id().application(), run.id().job().type().zone()), oldestRevision.number()); controller.applications().store(application.withRevisions(revisions -> revisions.withoutOlderThan(oldestRevision, run.id().job()))); } }); }); } /** Orders a run of the given type, or throws an IllegalStateException if that job type is already running. */ public void start(ApplicationId id, JobType type, Versions versions, boolean isRedeployment, Optional<String> reason) { start(id, type, versions, isRedeployment, JobProfile.of(type), reason); } /** Orders a run of the given type, or throws an IllegalStateException if that job type is already running. */ public void start(ApplicationId id, JobType type, Versions versions, boolean isRedeployment, JobProfile profile, Optional<String> reason) { ApplicationVersion revision = controller.applications().requireApplication(TenantAndApplicationId.from(id)).revisions().get(versions.targetRevision()); if (revision.compileVersion() .map(version -> controller.applications().versionCompatibility(id).refuse(versions.targetPlatform(), version)) .orElse(false)) throw new IllegalArgumentException("Will not start " + type + " for " + id + " with incompatible platform version (" + versions.targetPlatform() + ") " + "and compile versions (" + revision.compileVersion().get() + ")"); locked(id, type, __ -> { Optional<Run> last = last(id, type); if (last.flatMap(run -> active(run.id())).isPresent()) throw new IllegalArgumentException("Cannot start " + type + " for " + id + "; it is already running!"); RunId newId = new RunId(id, type, last.map(run -> run.id().number()).orElse(0L) + 1); curator.writeLastRun(Run.initial(newId, versions, isRedeployment, controller.clock().instant(), profile, reason)); metric.jobStarted(newId.job()); }); } /** Stores the given package and starts a deployment of it, after aborting any such ongoing deployment. */ public void deploy(ApplicationId id, JobType type, Optional<Version> platform, ApplicationPackage applicationPackage) { deploy(id, type, platform, applicationPackage, false); } /** Stores the given package and starts a deployment of it, after aborting any such ongoing deployment.*/ public void deploy(ApplicationId id, JobType type, Optional<Version> platform, ApplicationPackage applicationPackage, boolean dryRun) { if ( ! controller.zoneRegistry().hasZone(type.zone())) throw new IllegalArgumentException(type.zone() + " is not present in this system"); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { if ( ! application.get().instances().containsKey(id.instance())) application = controller.applications().withNewInstance(application, id); controller.applications().store(application); }); DeploymentId deploymentId = new DeploymentId(id, type.zone()); Optional<Run> lastRun = last(id, type); lastRun.filter(run -> ! run.hasEnded()).ifPresent(run -> abortAndWait(run.id(), Duration.ofMinutes(2))); long build = 1 + lastRun.map(run -> run.versions().targetRevision().number()).orElse(0L); RevisionId revisionId = RevisionId.forDevelopment(build, new JobId(id, type)); ApplicationVersion version = ApplicationVersion.forDevelopment(revisionId, applicationPackage.compileVersion()); byte[] diff = getDiff(applicationPackage, deploymentId, lastRun); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { controller.applications().applicationStore().putDev(deploymentId, version.id(), applicationPackage.zippedContent(), diff); Version targetPlatform = platform.orElseGet(() -> findTargetPlatform(applicationPackage, deploymentId, application.get().get(id.instance()))); controller.applications().store(application.withRevisions(revisions -> revisions.with(version))); start(id, type, new Versions(targetPlatform, version.id(), lastRun.map(run -> run.versions().targetPlatform()), lastRun.map(run -> run.versions().targetRevision())), false, dryRun ? JobProfile.developmentDryRun : JobProfile.development, Optional.empty()); }); locked(id, type, __ -> { runner.get().accept(last(id, type).get()); }); } /* Application package diff against previous version, or against empty version if previous does not exist or is invalid */ private byte[] getDiff(ApplicationPackage applicationPackage, DeploymentId deploymentId, Optional<Run> lastRun) { return lastRun.map(run -> run.versions().targetRevision()) .map(prevVersion -> { ApplicationPackage previous; try { previous = new ApplicationPackage(controller.applications().applicationStore().get(deploymentId, prevVersion)); } catch (IllegalArgumentException e) { return ApplicationPackageDiff.diffAgainstEmpty(applicationPackage); } return ApplicationPackageDiff.diff(previous, applicationPackage); }) .orElseGet(() -> ApplicationPackageDiff.diffAgainstEmpty(applicationPackage)); } private Version findTargetPlatform(ApplicationPackage applicationPackage, DeploymentId id, Optional<Instance> instance) { List<Version> versions = controller.readVersionStatus().deployableVersions().stream() .map(VespaVersion::versionNumber) .collect(toList()); instance.map(Instance::deployments) .map(deployments -> deployments.get(id.zoneId())) .map(Deployment::version) .ifPresent(versions::add); if (versions.isEmpty()) throw new IllegalStateException("no deployable platform version found in the system"); VersionCompatibility compatibility = controller.applications().versionCompatibility(id.applicationId()); List<Version> compatibleVersions = new ArrayList<>(); for (Version target : reversed(versions)) if (applicationPackage.compileVersion().isEmpty() || compatibility.accept(target, applicationPackage.compileVersion().get())) compatibleVersions.add(target); if (compatibleVersions.isEmpty()) throw new IllegalArgumentException("no platforms are compatible with compile version " + applicationPackage.compileVersion().get()); Optional<Integer> major = applicationPackage.deploymentSpec().majorVersion(); List<Version> versionOnRightMajor = new ArrayList<>(); for (Version target : reversed(versions)) if (major.isEmpty() || major.get() == target.getMajor()) versionOnRightMajor.add(target); if (versionOnRightMajor.isEmpty()) throw new IllegalArgumentException("no platforms were found for major version " + major.get() + " specified in deployment.xml"); for (Version target : compatibleVersions) if (versionOnRightMajor.contains(target)) return target; throw new IllegalArgumentException("no platforms on major version " + major.get() + " specified in deployment.xml " + "are compatible with compile version " + applicationPackage.compileVersion().get()); } /** Aborts a run and waits for it complete. */ private void abortAndWait(RunId id, Duration timeout) { abort(id, "replaced by new deployment"); runner.get().accept(last(id.application(), id.type()).get()); Instant doom = controller.clock().instant().plus(timeout); Duration sleep = Duration.ofMillis(100); while ( ! last(id.application(), id.type()).get().hasEnded()) { if (controller.clock().instant().plus(sleep).isAfter(doom)) throw new UncheckedTimeoutException("timeout waiting for " + id + " to abort and finish"); try { Thread.sleep(sleep.toMillis()); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new RuntimeException(e); } } } /** Deletes run data and tester deployments for applications which are unknown, or no longer built internally. */ public void collectGarbage() { Set<ApplicationId> applicationsToBuild = new HashSet<>(instances()); curator.applicationsWithJobs().stream() .filter(id -> ! applicationsToBuild.contains(id)) .forEach(id -> { try { TesterId tester = TesterId.of(id); for (JobType type : jobs(id)) locked(id, type, deactivateTester, __ -> { try (Mutex ___ = curator.lock(id, type)) { try { deactivateTester(tester, type); } catch (Exception e) { } curator.deleteRunData(id, type); } }); logs.delete(id); curator.deleteRunData(id); } catch (Exception e) { log.log(WARNING, "failed cleaning up after deleted application", e); } }); } public void deactivateTester(TesterId id, JobType type) { controller.serviceRegistry().configServer().deactivate(new DeploymentId(id.id(), type.zone())); } /** Locks all runs and modifies the list of historic runs for the given application and job type. */ private void locked(ApplicationId id, JobType type, Consumer<SortedMap<RunId, Run>> modifications) { try (Mutex __ = curator.lock(id, type)) { SortedMap<RunId, Run> runs = new TreeMap<>(curator.readHistoricRuns(id, type)); modifications.accept(runs); curator.writeHistoricRuns(id, type, runs.values()); } } /** Locks and modifies the run with the given id, provided it is still active. */ public void locked(RunId id, UnaryOperator<Run> modifications) { try (Mutex __ = curator.lock(id.application(), id.type())) { active(id).ifPresent(run -> { Run modified = modifications.apply(run); if (modified != null) curator.writeLastRun(modified); }); } } /** Locks the given step and checks none of its prerequisites are running, then performs the given actions. */ public void locked(ApplicationId id, JobType type, Step step, Consumer<LockedStep> action) throws TimeoutException { try (Mutex lock = curator.lock(id, type, step)) { for (Step prerequisite : step.allPrerequisites(last(id, type).get().steps().keySet())) try (Mutex __ = curator.lock(id, type, prerequisite)) { ; } action.accept(new LockedStep(lock, step)); } } }
class JobController { public static final Duration maxHistoryAge = Duration.ofDays(60); private static final Logger log = Logger.getLogger(JobController.class.getName()); private final int historyLength; private final Controller controller; private final CuratorDb curator; private final BufferedLogStore logs; private final TesterCloud cloud; private final JobMetrics metric; private final AtomicReference<Consumer<Run>> runner = new AtomicReference<>(__ -> { }); public JobController(Controller controller) { this.historyLength = controller.system().isCd() ? 256 : 64; this.controller = controller; this.curator = controller.curator(); this.logs = new BufferedLogStore(curator, controller.serviceRegistry().runDataStore()); this.cloud = controller.serviceRegistry().testerCloud(); this.metric = new JobMetrics(controller.metric(), controller::system); } public TesterCloud cloud() { return cloud; } public int historyLength() { return historyLength; } public void setRunner(Consumer<Run> runner) { this.runner.set(runner); } /** Rewrite all job data with the newest format. */ public void updateStorage() { for (ApplicationId id : instances()) for (JobType type : jobs(id)) { locked(id, type, runs -> { curator.readLastRun(id, type).ifPresent(curator::writeLastRun); }); } } /** Returns all entries currently logged for the given run. */ public Optional<RunLog> details(RunId id) { return details(id, -1); } /** Returns the logged entries for the given run, which are after the given id threshold. */ public Optional<RunLog> details(RunId id, long after) { try (Mutex __ = curator.lock(id.application(), id.type())) { Run run = runs(id.application(), id.type()).get(id); if (run == null) return Optional.empty(); return active(id).isPresent() ? Optional.of(logs.readActive(id.application(), id.type(), after)) : logs.readFinished(id, after); } } /** Stores the given log entries for the given run and step. */ public void log(RunId id, Step step, List<LogEntry> entries) { locked(id, __ -> { logs.append(id.application(), id.type(), step, entries, true); return __; }); } /** Stores the given log messages for the given run and step. */ public void log(RunId id, Step step, Level level, List<String> messages) { log(id, step, messages.stream() .map(message -> new LogEntry(0, controller.clock().instant(), LogEntry.typeOf(level), message)) .collect(toList())); } /** Stores the given log message for the given run and step. */ public void log(RunId id, Step step, Level level, String message) { log(id, step, level, Collections.singletonList(message)); } /** Fetches any new Vespa log entries, and records the timestamp of the last of these, for continuation. */ public void updateVespaLog(RunId id) { locked(id, run -> { if ( ! run.hasStep(copyVespaLogs)) return run; storeVespaLogs(id); ZoneId zone = id.type().zone(); Optional<Deployment> deployment = Optional.ofNullable(controller.applications().requireInstance(id.application()) .deployments().get(zone)); if (deployment.isEmpty() || deployment.get().at().isBefore(run.start())) return run; List<LogEntry> log; Instant deployedAt; Instant from; if ( ! run.id().type().isProduction()) { deployedAt = run.stepInfo(installInitialReal).or(() -> run.stepInfo(installReal)).flatMap(StepInfo::startTime).orElseThrow(); from = run.lastVespaLogTimestamp().isAfter(run.start()) ? run.lastVespaLogTimestamp() : deployedAt.minusSeconds(10); log = LogEntry.parseVespaLog(controller.serviceRegistry().configServer() .getLogs(new DeploymentId(id.application(), zone), Map.of("from", Long.toString(from.toEpochMilli()))), from); } else log = List.of(); if (id.type().isTest()) { deployedAt = run.stepInfo(installTester).flatMap(StepInfo::startTime).orElseThrow(); from = run.lastVespaLogTimestamp().isAfter(run.start()) ? run.lastVespaLogTimestamp() : deployedAt.minusSeconds(10); List<LogEntry> testerLog = LogEntry.parseVespaLog(controller.serviceRegistry().configServer() .getLogs(new DeploymentId(id.tester().id(), zone), Map.of("from", Long.toString(from.toEpochMilli()))), from); Instant justNow = controller.clock().instant().minusSeconds(2); log = Stream.concat(log.stream(), testerLog.stream()) .filter(entry -> entry.at().isBefore(justNow)) .sorted(comparing(LogEntry::at)) .collect(toUnmodifiableList()); } if (log.isEmpty()) return run; logs.append(id.application(), id.type(), Step.copyVespaLogs, log, false); return run.with(log.get(log.size() - 1).at()); }); } public InputStream getVespaLogs(RunId id, long fromMillis, boolean tester) { Run run = run(id); return run.stepStatus(copyVespaLogs).map(succeeded::equals).orElse(false) ? controller.serviceRegistry().runDataStore().getLogs(id, tester) : getVespaLogsFromLogserver(run, fromMillis, tester).orElse(InputStream.nullInputStream()); } public static Optional<Instant> deploymentCompletedAt(Run run, boolean tester) { return (tester ? run.stepInfo(installTester) : run.stepInfo(installInitialReal).or(() -> run.stepInfo(installReal))) .flatMap(StepInfo::startTime).map(start -> start.minusSeconds(10)); } public void storeVespaLogs(RunId id) { Run run = run(id); if ( ! id.type().isProduction()) { getVespaLogsFromLogserver(run, 0, false).ifPresent(logs -> { try (logs) { controller.serviceRegistry().runDataStore().putLogs(id, false, logs); } catch (IOException e) { throw new UncheckedIOException(e); } }); } if (id.type().isTest()) { getVespaLogsFromLogserver(run, 0, true).ifPresent(logs -> { try (logs) { controller.serviceRegistry().runDataStore().putLogs(id, true, logs); } catch(IOException e){ throw new UncheckedIOException(e); } }); } } private Optional<InputStream> getVespaLogsFromLogserver(Run run, long fromMillis, boolean tester) { return deploymentCompletedAt(run, tester).map(at -> controller.serviceRegistry().configServer().getLogs(new DeploymentId(tester ? run.id().tester().id() : run.id().application(), run.id().type().zone()), Map.of("from", Long.toString(Math.max(fromMillis, at.toEpochMilli())), "to", Long.toString(run.end().orElse(controller.clock().instant()).toEpochMilli())))); } /** Fetches any new test log entries, and records the id of the last of these, for continuation. */ public void updateTestLog(RunId id) { locked(id, run -> { Optional<Step> step = Stream.of(endStagingSetup, endTests) .filter(run.readySteps()::contains) .findAny(); if (step.isEmpty()) return run; List<LogEntry> entries = cloud.getLog(new DeploymentId(id.tester().id(), id.type().zone()), run.lastTestLogEntry()); if (entries.isEmpty()) return run; logs.append(id.application(), id.type(), step.get(), entries, false); return run.with(entries.stream().mapToLong(LogEntry::id).max().getAsLong()); }); } public void updateTestReport(RunId id) { locked(id, run -> { Optional<TestReport> report = cloud.getTestReport(new DeploymentId(id.tester().id(), id.type().zone())); if (report.isEmpty()) { return run; } logs.writeTestReport(id, report.get()); return run; }); } public Optional<String> getTestReports(RunId id) { return logs.readTestReports(id); } /** Stores the given certificate as the tester certificate for this run, or throws if it's already set. */ public void storeTesterCertificate(RunId id, X509Certificate testerCertificate) { locked(id, run -> run.with(testerCertificate)); } /** Returns a list of all instances of applications which have registered. */ public List<ApplicationId> instances() { return controller.applications().readable().stream() .flatMap(application -> application.instances().values().stream()) .map(Instance::id) .collect(toUnmodifiableList()); } /** Returns all job types which have been run for the given application. */ private List<JobType> jobs(ApplicationId id) { return JobType.allIn(controller.zoneRegistry()).stream() .filter(type -> last(id, type).isPresent()) .collect(toUnmodifiableList()); } /** Returns an immutable map of all known runs for the given application and job type. */ public NavigableMap<RunId, Run> runs(JobId id) { return runs(id.application(), id.type()); } /** Lists the start time of non-redeployment runs of the given job, in order of increasing age. */ public List<Instant> jobStarts(JobId id) { return runs(id).descendingMap().values().stream() .filter(run -> ! run.isRedeployment()) .map(Run::start) .collect(toUnmodifiableList()); } /** Returns when given deployment last started deploying, falling back to time of deployment if it cannot be determined from job runs */ public Instant lastDeploymentStart(ApplicationId instanceId, Deployment deployment) { return jobStarts(new JobId(instanceId, JobType.deploymentTo(deployment.zone()))).stream() .findFirst() .orElseGet(deployment::at); } /** Returns an immutable map of all known runs for the given application and job type. */ public NavigableMap<RunId, Run> runs(ApplicationId id, JobType type) { ImmutableSortedMap.Builder<RunId, Run> runs = ImmutableSortedMap.orderedBy(Comparator.comparing(RunId::number)); Optional<Run> last = last(id, type); curator.readHistoricRuns(id, type).forEach((runId, run) -> { if (last.isEmpty() || ! runId.equals(last.get().id())) runs.put(runId, run); }); last.ifPresent(run -> runs.put(run.id(), run)); return runs.build(); } /** Returns the run with the given id, or throws if no such run exists. */ public Run run(RunId id) { return runs(id.application(), id.type()).values().stream() .filter(run -> run.id().equals(id)) .findAny() .orElseThrow(() -> new NoSuchElementException("no run with id '" + id + "' exists")); } /** Returns the last run of the given type, for the given application, if one has been run. */ public Optional<Run> last(JobId job) { return curator.readLastRun(job.application(), job.type()); } /** Returns the last run of the given type, for the given application, if one has been run. */ public Optional<Run> last(ApplicationId id, JobType type) { return curator.readLastRun(id, type); } /** Returns the last completed of the given job. */ public Optional<Run> lastCompleted(JobId id) { return JobStatus.lastCompleted(runs(id)); } /** Returns the first failing of the given job. */ public Optional<Run> firstFailing(JobId id) { return JobStatus.firstFailing(runs(id)); } /** Returns the last success of the given job. */ public Optional<Run> lastSuccess(JobId id) { return JobStatus.lastSuccess(runs(id)); } /** Returns the run with the given id, provided it is still active. */ public Optional<Run> active(RunId id) { return last(id.application(), id.type()) .filter(run -> ! run.hasEnded()) .filter(run -> run.id().equals(id)); } /** Returns a list of all active runs. */ public List<Run> active() { return controller.applications().idList().stream() .flatMap(id -> active(id).stream()) .toList(); } /** Returns a list of all active runs for the given application. */ public List<Run> active(TenantAndApplicationId id) { return controller.applications().requireApplication(id).instances().keySet().stream() .flatMap(name -> JobType.allIn(controller.zoneRegistry()).stream() .map(type -> last(id.instance(name), type)) .flatMap(Optional::stream) .filter(run -> ! run.hasEnded())) .toList(); } /** Returns a list of all active runs for the given instance. */ public List<Run> active(ApplicationId id) { return JobType.allIn(controller.zoneRegistry()).stream() .map(type -> last(id, type)) .flatMap(Optional::stream) .filter(run -> !run.hasEnded()) .toList(); } /** Returns the job status of the given job, possibly empty. */ public JobStatus jobStatus(JobId id) { return new JobStatus(id, runs(id)); } /** Returns the deployment status of the given application. */ public DeploymentStatus deploymentStatus(Application application) { VersionStatus versionStatus = controller.readVersionStatus(); return deploymentStatus(application, versionStatus, controller.systemVersion(versionStatus)); } private DeploymentStatus deploymentStatus(Application application, VersionStatus versionStatus, Version systemVersion) { return new DeploymentStatus(application, this::jobStatus, controller.zoneRegistry(), versionStatus, systemVersion, instance -> controller.applications().versionCompatibility(application.id().instance(instance)), controller.clock().instant()); } /** Adds deployment status to each of the given applications. */ public DeploymentStatusList deploymentStatuses(ApplicationList applications, VersionStatus versionStatus) { Version systemVersion = controller.systemVersion(versionStatus); return DeploymentStatusList.from(applications.asList().stream() .map(application -> deploymentStatus(application, versionStatus, systemVersion)) .toList()); } /** Adds deployment status to each of the given applications. Calling this will do an implicit read of the controller's version status */ public DeploymentStatusList deploymentStatuses(ApplicationList applications) { VersionStatus versionStatus = controller.readVersionStatus(); return deploymentStatuses(applications, versionStatus); } /** Changes the status of the given step, for the given run, provided it is still active. */ public void update(RunId id, RunStatus status, LockedStep step) { locked(id, run -> run.with(status, step)); } /** * Changes the status of the given run to inactive, and stores it as a historic run. * Throws TimeoutException if some step in this job is still being run. */ public void finish(RunId id) throws TimeoutException { Deque<Mutex> locks = new ArrayDeque<>(); try { Run unlockedRun = run(id); locks.push(curator.lock(id.application(), id.type(), report)); for (Step step : report.allPrerequisites(unlockedRun.steps().keySet())) locks.push(curator.lock(id.application(), id.type(), step)); locked(id, run -> { if (run.status() == reset) { for (Step step : run.steps().keySet()) log(id, step, INFO, List.of(" return run.reset(); } if (run.status() == running && run.stepStatuses().values().stream().anyMatch(not(succeeded::equals))) return run; Run finishedRun = run.finished(controller.clock().instant()); locked(id.application(), id.type(), runs -> { runs.put(run.id(), finishedRun); long last = id.number(); long successes = runs.values().stream().filter(Run::hasSucceeded).count(); var oldEntries = runs.entrySet().iterator(); for (var old = oldEntries.next(); old.getKey().number() <= last - historyLength || old.getValue().start().isBefore(controller.clock().instant().minus(maxHistoryAge)); old = oldEntries.next()) { if ( successes == 1 && old.getValue().hasSucceeded() && ! old.getValue().start().isBefore(controller.clock().instant().minus(maxHistoryAge))) { oldEntries.next(); continue; } logs.delete(old.getKey()); oldEntries.remove(); } }); logs.flush(id); metric.jobFinished(run.id().job(), finishedRun.status()); pruneRevisions(unlockedRun); return finishedRun; }); } finally { for (Mutex lock : locks) { try { lock.close(); } catch (Throwable t) { log.log(WARNING, "Failed to close the lock " + lock + ": the lock may or may not " + "have been released in ZooKeeper, and if not this controller " + "must be restarted to release the lock", t); } } } } /** Marks the given run as aborted; no further normal steps will run, but run-always steps will try to succeed. */ public void abort(RunId id, String reason) { locked(id, run -> { run.stepStatuses().entrySet().stream() .filter(entry -> entry.getValue() == unfinished) .forEach(entry -> log(id, entry.getKey(), INFO, "Aborting run: " + reason)); return run.aborted(); }); } /** Accepts and stores a new application package and test jar pair under a generated application version key. */ public ApplicationVersion submit(TenantAndApplicationId id, Submission submission, long projectId) { ApplicationController applications = controller.applications(); AtomicReference<ApplicationVersion> version = new AtomicReference<>(); applications.lockApplicationOrThrow(id, application -> { Optional<ApplicationVersion> previousVersion = application.get().revisions().last(); Optional<ApplicationPackage> previousPackage = previousVersion.flatMap(previous -> applications.applicationStore().find(id.tenant(), id.application(), previous.buildNumber().getAsLong())) .map(ApplicationPackage::new); long previousBuild = previousVersion.map(latestVersion -> latestVersion.buildNumber().getAsLong()).orElse(0L); version.set(submission.toApplicationVersion(1 + previousBuild)); byte[] diff = previousPackage.map(previous -> ApplicationPackageDiff.diff(previous, submission.applicationPackage())) .orElseGet(() -> ApplicationPackageDiff.diffAgainstEmpty(submission.applicationPackage())); applications.applicationStore().put(id.tenant(), id.application(), version.get().id(), submission.applicationPackage().zippedContent(), submission.testPackage(), diff); applications.applicationStore().putMeta(id.tenant(), id.application(), controller.clock().instant(), submission.applicationPackage().metaDataZip()); application = application.withProjectId(projectId == -1 ? OptionalLong.empty() : OptionalLong.of(projectId)); application = application.withRevisions(revisions -> revisions.with(version.get())); application = withPrunedPackages(application, version.get().id()); validate(id, submission); applications.storeWithUpdatedConfig(application, submission.applicationPackage()); if (application.get().projectId().isPresent()) applications.deploymentTrigger().triggerNewRevision(id); }); return version.get(); } private void validate(TenantAndApplicationId id, Submission submission) { validateTests(id, submission); validateParentVersion(id, submission); validateMajorVersion(id, submission); } private void validateTests(TenantAndApplicationId id, Submission submission) { TestSummary testSummary = TestPackage.validateTests(submission.applicationPackage().deploymentSpec(), submission.testPackage()); if (testSummary.problems().isEmpty()) controller.notificationsDb().removeNotification(NotificationSource.from(id), Type.testPackage); else controller.notificationsDb().setNotification(NotificationSource.from(id), Type.testPackage, Notification.Level.warning, testSummary.problems()); } private void validateParentVersion(TenantAndApplicationId id, Submission submission) { submission.applicationPackage().parentVersion().ifPresent(parent -> { if (parent.getMajor() < controller.readSystemVersion().getMajor()) controller.notificationsDb().setNotification(NotificationSource.from(id), Type.submission, Notification.Level.warning, "Parent version used to compile the application is on a " + "lower major version than the current Vespa Cloud version"); else controller.notificationsDb().removeNotification(NotificationSource.from(id), Type.submission); }); } private LockedApplication withPrunedPackages(LockedApplication application, RevisionId latest){ TenantAndApplicationId id = application.get().id(); Application wrapped = application.get(); RevisionId oldestDeployed = application.get().oldestDeployedRevision() .or(() -> wrapped.instances().values().stream() .flatMap(instance -> instance.change().revision().stream()) .min(naturalOrder())) .orElse(latest); controller.applications().applicationStore().prune(id.tenant(), id.application(), oldestDeployed); for (ApplicationVersion version : application.get().revisions().withPackage()) if (version.id().compareTo(oldestDeployed) < 0) application = application.withRevisions(revisions -> revisions.with(version.withoutPackage())); return application; } /** Forget revisions no longer present in any relevant job history. */ private void pruneRevisions(Run run) { TenantAndApplicationId applicationId = TenantAndApplicationId.from(run.id().application()); boolean isProduction = run.versions().targetRevision().isProduction(); (isProduction ? deploymentStatus(controller.applications().requireApplication(applicationId)).jobs().asList().stream() : Stream.of(jobStatus(run.id().job()))) .flatMap(jobs -> jobs.runs().values().stream()) .map(r -> r.versions().targetRevision()) .filter(id -> id.isProduction() == isProduction) .min(naturalOrder()) .ifPresent(oldestRevision -> { controller.applications().lockApplicationOrThrow(applicationId, application -> { if (isProduction) { controller.applications().applicationStore().pruneDiffs(run.id().application().tenant(), run.id().application().application(), oldestRevision.number()); controller.applications().store(application.withRevisions(revisions -> revisions.withoutOlderThan(oldestRevision))); } else { controller.applications().applicationStore().pruneDevDiffs(new DeploymentId(run.id().application(), run.id().job().type().zone()), oldestRevision.number()); controller.applications().store(application.withRevisions(revisions -> revisions.withoutOlderThan(oldestRevision, run.id().job()))); } }); }); } /** Orders a run of the given type, or throws an IllegalStateException if that job type is already running. */ public void start(ApplicationId id, JobType type, Versions versions, boolean isRedeployment, Optional<String> reason) { start(id, type, versions, isRedeployment, JobProfile.of(type), reason); } /** Orders a run of the given type, or throws an IllegalStateException if that job type is already running. */ public void start(ApplicationId id, JobType type, Versions versions, boolean isRedeployment, JobProfile profile, Optional<String> reason) { ApplicationVersion revision = controller.applications().requireApplication(TenantAndApplicationId.from(id)).revisions().get(versions.targetRevision()); if (revision.compileVersion() .map(version -> controller.applications().versionCompatibility(id).refuse(versions.targetPlatform(), version)) .orElse(false)) throw new IllegalArgumentException("Will not start " + type + " for " + id + " with incompatible platform version (" + versions.targetPlatform() + ") " + "and compile versions (" + revision.compileVersion().get() + ")"); locked(id, type, __ -> { Optional<Run> last = last(id, type); if (last.flatMap(run -> active(run.id())).isPresent()) throw new IllegalArgumentException("Cannot start " + type + " for " + id + "; it is already running!"); RunId newId = new RunId(id, type, last.map(run -> run.id().number()).orElse(0L) + 1); curator.writeLastRun(Run.initial(newId, versions, isRedeployment, controller.clock().instant(), profile, reason)); metric.jobStarted(newId.job()); }); } /** Stores the given package and starts a deployment of it, after aborting any such ongoing deployment. */ public void deploy(ApplicationId id, JobType type, Optional<Version> platform, ApplicationPackage applicationPackage) { deploy(id, type, platform, applicationPackage, false); } /** Stores the given package and starts a deployment of it, after aborting any such ongoing deployment.*/ public void deploy(ApplicationId id, JobType type, Optional<Version> platform, ApplicationPackage applicationPackage, boolean dryRun) { if ( ! controller.zoneRegistry().hasZone(type.zone())) throw new IllegalArgumentException(type.zone() + " is not present in this system"); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { if ( ! application.get().instances().containsKey(id.instance())) application = controller.applications().withNewInstance(application, id); controller.applications().store(application); }); DeploymentId deploymentId = new DeploymentId(id, type.zone()); Optional<Run> lastRun = last(id, type); lastRun.filter(run -> ! run.hasEnded()).ifPresent(run -> abortAndWait(run.id(), Duration.ofMinutes(2))); long build = 1 + lastRun.map(run -> run.versions().targetRevision().number()).orElse(0L); RevisionId revisionId = RevisionId.forDevelopment(build, new JobId(id, type)); ApplicationVersion version = ApplicationVersion.forDevelopment(revisionId, applicationPackage.compileVersion()); byte[] diff = getDiff(applicationPackage, deploymentId, lastRun); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { controller.applications().applicationStore().putDev(deploymentId, version.id(), applicationPackage.zippedContent(), diff); Version targetPlatform = platform.orElseGet(() -> findTargetPlatform(applicationPackage, deploymentId, application.get().get(id.instance()))); controller.applications().store(application.withRevisions(revisions -> revisions.with(version))); start(id, type, new Versions(targetPlatform, version.id(), lastRun.map(run -> run.versions().targetPlatform()), lastRun.map(run -> run.versions().targetRevision())), false, dryRun ? JobProfile.developmentDryRun : JobProfile.development, Optional.empty()); }); locked(id, type, __ -> { runner.get().accept(last(id, type).get()); }); } /* Application package diff against previous version, or against empty version if previous does not exist or is invalid */ private byte[] getDiff(ApplicationPackage applicationPackage, DeploymentId deploymentId, Optional<Run> lastRun) { return lastRun.map(run -> run.versions().targetRevision()) .map(prevVersion -> { ApplicationPackage previous; try { previous = new ApplicationPackage(controller.applications().applicationStore().get(deploymentId, prevVersion)); } catch (IllegalArgumentException e) { return ApplicationPackageDiff.diffAgainstEmpty(applicationPackage); } return ApplicationPackageDiff.diff(previous, applicationPackage); }) .orElseGet(() -> ApplicationPackageDiff.diffAgainstEmpty(applicationPackage)); } private Version findTargetPlatform(ApplicationPackage applicationPackage, DeploymentId id, Optional<Instance> instance) { List<Version> versions = controller.readVersionStatus().deployableVersions().stream() .map(VespaVersion::versionNumber) .collect(toList()); instance.map(Instance::deployments) .map(deployments -> deployments.get(id.zoneId())) .map(Deployment::version) .ifPresent(versions::add); if (versions.isEmpty()) throw new IllegalStateException("no deployable platform version found in the system"); VersionCompatibility compatibility = controller.applications().versionCompatibility(id.applicationId()); List<Version> compatibleVersions = new ArrayList<>(); for (Version target : reversed(versions)) if (applicationPackage.compileVersion().isEmpty() || compatibility.accept(target, applicationPackage.compileVersion().get())) compatibleVersions.add(target); if (compatibleVersions.isEmpty()) throw new IllegalArgumentException("no platforms are compatible with compile version " + applicationPackage.compileVersion().get()); Optional<Integer> major = applicationPackage.deploymentSpec().majorVersion(); List<Version> versionOnRightMajor = new ArrayList<>(); for (Version target : reversed(versions)) if (major.isEmpty() || major.get() == target.getMajor()) versionOnRightMajor.add(target); if (versionOnRightMajor.isEmpty()) throw new IllegalArgumentException("no platforms were found for major version " + major.get() + " specified in deployment.xml"); for (Version target : compatibleVersions) if (versionOnRightMajor.contains(target)) return target; throw new IllegalArgumentException("no platforms on major version " + major.get() + " specified in deployment.xml " + "are compatible with compile version " + applicationPackage.compileVersion().get()); } /** Aborts a run and waits for it complete. */ private void abortAndWait(RunId id, Duration timeout) { abort(id, "replaced by new deployment"); runner.get().accept(last(id.application(), id.type()).get()); Instant doom = controller.clock().instant().plus(timeout); Duration sleep = Duration.ofMillis(100); while ( ! last(id.application(), id.type()).get().hasEnded()) { if (controller.clock().instant().plus(sleep).isAfter(doom)) throw new UncheckedTimeoutException("timeout waiting for " + id + " to abort and finish"); try { Thread.sleep(sleep.toMillis()); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new RuntimeException(e); } } } /** Deletes run data and tester deployments for applications which are unknown, or no longer built internally. */ public void collectGarbage() { Set<ApplicationId> applicationsToBuild = new HashSet<>(instances()); curator.applicationsWithJobs().stream() .filter(id -> ! applicationsToBuild.contains(id)) .forEach(id -> { try { TesterId tester = TesterId.of(id); for (JobType type : jobs(id)) locked(id, type, deactivateTester, __ -> { try (Mutex ___ = curator.lock(id, type)) { try { deactivateTester(tester, type); } catch (Exception e) { } curator.deleteRunData(id, type); } }); logs.delete(id); curator.deleteRunData(id); } catch (Exception e) { log.log(WARNING, "failed cleaning up after deleted application", e); } }); } public void deactivateTester(TesterId id, JobType type) { controller.serviceRegistry().configServer().deactivate(new DeploymentId(id.id(), type.zone())); } /** Locks all runs and modifies the list of historic runs for the given application and job type. */ private void locked(ApplicationId id, JobType type, Consumer<SortedMap<RunId, Run>> modifications) { try (Mutex __ = curator.lock(id, type)) { SortedMap<RunId, Run> runs = new TreeMap<>(curator.readHistoricRuns(id, type)); modifications.accept(runs); curator.writeHistoricRuns(id, type, runs.values()); } } /** Locks and modifies the run with the given id, provided it is still active. */ public void locked(RunId id, UnaryOperator<Run> modifications) { try (Mutex __ = curator.lock(id.application(), id.type())) { active(id).ifPresent(run -> { Run modified = modifications.apply(run); if (modified != null) curator.writeLastRun(modified); }); } } /** Locks the given step and checks none of its prerequisites are running, then performs the given actions. */ public void locked(ApplicationId id, JobType type, Step step, Consumer<LockedStep> action) throws TimeoutException { try (Mutex lock = curator.lock(id, type, step)) { for (Step prerequisite : step.allPrerequisites(last(id, type).get().steps().keySet())) try (Mutex __ = curator.lock(id, type, prerequisite)) { ; } action.accept(new LockedStep(lock, step)); } } }
👍
public void deploy(ApplicationId id, JobType type, Optional<Version> platform, ApplicationPackage applicationPackage, boolean dryRun) { if ( ! controller.zoneRegistry().hasZone(type.zone())) throw new IllegalArgumentException(type.zone() + " is not present in this system"); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { if ( ! application.get().instances().containsKey(id.instance())) application = controller.applications().withNewInstance(application, id); controller.applications().validatePackage(applicationPackage, application.get()); controller.applications().store(application); }); DeploymentId deploymentId = new DeploymentId(id, type.zone()); Optional<Run> lastRun = last(id, type); lastRun.filter(run -> ! run.hasEnded()).ifPresent(run -> abortAndWait(run.id(), Duration.ofMinutes(2))); long build = 1 + lastRun.map(run -> run.versions().targetRevision().number()).orElse(0L); RevisionId revisionId = RevisionId.forDevelopment(build, new JobId(id, type)); ApplicationVersion version = ApplicationVersion.forDevelopment(revisionId, applicationPackage.compileVersion()); byte[] diff = getDiff(applicationPackage, deploymentId, lastRun); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { controller.applications().applicationStore().putDev(deploymentId, version.id(), applicationPackage.zippedContent(), diff); Version targetPlatform = platform.orElseGet(() -> findTargetPlatform(applicationPackage, deploymentId, application.get().get(id.instance()))); controller.applications().store(application.withRevisions(revisions -> revisions.with(version))); start(id, type, new Versions(targetPlatform, version.id(), lastRun.map(run -> run.versions().targetPlatform()), lastRun.map(run -> run.versions().targetRevision())), false, dryRun ? JobProfile.developmentDryRun : JobProfile.development, Optional.empty()); }); locked(id, type, __ -> { runner.get().accept(last(id, type).get()); }); }
controller.applications().validatePackage(applicationPackage, application.get());
public void deploy(ApplicationId id, JobType type, Optional<Version> platform, ApplicationPackage applicationPackage, boolean dryRun) { if ( ! controller.zoneRegistry().hasZone(type.zone())) throw new IllegalArgumentException(type.zone() + " is not present in this system"); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { if ( ! application.get().instances().containsKey(id.instance())) application = controller.applications().withNewInstance(application, id); controller.applications().validatePackage(applicationPackage, application.get()); controller.applications().store(application); }); DeploymentId deploymentId = new DeploymentId(id, type.zone()); Optional<Run> lastRun = last(id, type); lastRun.filter(run -> ! run.hasEnded()).ifPresent(run -> abortAndWait(run.id(), Duration.ofMinutes(2))); long build = 1 + lastRun.map(run -> run.versions().targetRevision().number()).orElse(0L); RevisionId revisionId = RevisionId.forDevelopment(build, new JobId(id, type)); ApplicationVersion version = ApplicationVersion.forDevelopment(revisionId, applicationPackage.compileVersion()); byte[] diff = getDiff(applicationPackage, deploymentId, lastRun); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { controller.applications().applicationStore().putDev(deploymentId, version.id(), applicationPackage.zippedContent(), diff); Version targetPlatform = platform.orElseGet(() -> findTargetPlatform(applicationPackage, deploymentId, application.get().get(id.instance()))); controller.applications().store(application.withRevisions(revisions -> revisions.with(version))); start(id, type, new Versions(targetPlatform, version.id(), lastRun.map(run -> run.versions().targetPlatform()), lastRun.map(run -> run.versions().targetRevision())), false, dryRun ? JobProfile.developmentDryRun : JobProfile.development, Optional.empty()); }); locked(id, type, __ -> { runner.get().accept(last(id, type).get()); }); }
class JobController { public static final Duration maxHistoryAge = Duration.ofDays(60); private static final Logger log = Logger.getLogger(JobController.class.getName()); private final int historyLength; private final Controller controller; private final CuratorDb curator; private final BufferedLogStore logs; private final TesterCloud cloud; private final JobMetrics metric; private final AtomicReference<Consumer<Run>> runner = new AtomicReference<>(__ -> { }); public JobController(Controller controller) { this.historyLength = controller.system().isCd() ? 256 : 64; this.controller = controller; this.curator = controller.curator(); this.logs = new BufferedLogStore(curator, controller.serviceRegistry().runDataStore()); this.cloud = controller.serviceRegistry().testerCloud(); this.metric = new JobMetrics(controller.metric(), controller::system); } public TesterCloud cloud() { return cloud; } public int historyLength() { return historyLength; } public void setRunner(Consumer<Run> runner) { this.runner.set(runner); } /** Rewrite all job data with the newest format. */ public void updateStorage() { for (ApplicationId id : instances()) for (JobType type : jobs(id)) { locked(id, type, runs -> { curator.readLastRun(id, type).ifPresent(curator::writeLastRun); }); } } /** Returns all entries currently logged for the given run. */ public Optional<RunLog> details(RunId id) { return details(id, -1); } /** Returns the logged entries for the given run, which are after the given id threshold. */ public Optional<RunLog> details(RunId id, long after) { try (Mutex __ = curator.lock(id.application(), id.type())) { Run run = runs(id.application(), id.type()).get(id); if (run == null) return Optional.empty(); return active(id).isPresent() ? Optional.of(logs.readActive(id.application(), id.type(), after)) : logs.readFinished(id, after); } } /** Stores the given log entries for the given run and step. */ public void log(RunId id, Step step, List<LogEntry> entries) { locked(id, __ -> { logs.append(id.application(), id.type(), step, entries, true); return __; }); } /** Stores the given log messages for the given run and step. */ public void log(RunId id, Step step, Level level, List<String> messages) { log(id, step, messages.stream() .map(message -> new LogEntry(0, controller.clock().instant(), LogEntry.typeOf(level), message)) .collect(toList())); } /** Stores the given log message for the given run and step. */ public void log(RunId id, Step step, Level level, String message) { log(id, step, level, Collections.singletonList(message)); } /** Fetches any new Vespa log entries, and records the timestamp of the last of these, for continuation. */ public void updateVespaLog(RunId id) { locked(id, run -> { if ( ! run.hasStep(copyVespaLogs)) return run; storeVespaLogs(id); ZoneId zone = id.type().zone(); Optional<Deployment> deployment = Optional.ofNullable(controller.applications().requireInstance(id.application()) .deployments().get(zone)); if (deployment.isEmpty() || deployment.get().at().isBefore(run.start())) return run; List<LogEntry> log; Instant deployedAt; Instant from; if ( ! run.id().type().isProduction()) { deployedAt = run.stepInfo(installInitialReal).or(() -> run.stepInfo(installReal)).flatMap(StepInfo::startTime).orElseThrow(); from = run.lastVespaLogTimestamp().isAfter(run.start()) ? run.lastVespaLogTimestamp() : deployedAt.minusSeconds(10); log = LogEntry.parseVespaLog(controller.serviceRegistry().configServer() .getLogs(new DeploymentId(id.application(), zone), Map.of("from", Long.toString(from.toEpochMilli()))), from); } else log = List.of(); if (id.type().isTest()) { deployedAt = run.stepInfo(installTester).flatMap(StepInfo::startTime).orElseThrow(); from = run.lastVespaLogTimestamp().isAfter(run.start()) ? run.lastVespaLogTimestamp() : deployedAt.minusSeconds(10); List<LogEntry> testerLog = LogEntry.parseVespaLog(controller.serviceRegistry().configServer() .getLogs(new DeploymentId(id.tester().id(), zone), Map.of("from", Long.toString(from.toEpochMilli()))), from); Instant justNow = controller.clock().instant().minusSeconds(2); log = Stream.concat(log.stream(), testerLog.stream()) .filter(entry -> entry.at().isBefore(justNow)) .sorted(comparing(LogEntry::at)) .collect(toUnmodifiableList()); } if (log.isEmpty()) return run; logs.append(id.application(), id.type(), Step.copyVespaLogs, log, false); return run.with(log.get(log.size() - 1).at()); }); } public InputStream getVespaLogs(RunId id, long fromMillis, boolean tester) { Run run = run(id); return run.stepStatus(copyVespaLogs).map(succeeded::equals).orElse(false) ? controller.serviceRegistry().runDataStore().getLogs(id, tester) : getVespaLogsFromLogserver(run, fromMillis, tester).orElse(InputStream.nullInputStream()); } public static Optional<Instant> deploymentCompletedAt(Run run, boolean tester) { return (tester ? run.stepInfo(installTester) : run.stepInfo(installInitialReal).or(() -> run.stepInfo(installReal))) .flatMap(StepInfo::startTime).map(start -> start.minusSeconds(10)); } public void storeVespaLogs(RunId id) { Run run = run(id); if ( ! id.type().isProduction()) { getVespaLogsFromLogserver(run, 0, false).ifPresent(logs -> { try (logs) { controller.serviceRegistry().runDataStore().putLogs(id, false, logs); } catch (IOException e) { throw new UncheckedIOException(e); } }); } if (id.type().isTest()) { getVespaLogsFromLogserver(run, 0, true).ifPresent(logs -> { try (logs) { controller.serviceRegistry().runDataStore().putLogs(id, true, logs); } catch(IOException e){ throw new UncheckedIOException(e); } }); } } private Optional<InputStream> getVespaLogsFromLogserver(Run run, long fromMillis, boolean tester) { return deploymentCompletedAt(run, tester).map(at -> controller.serviceRegistry().configServer().getLogs(new DeploymentId(tester ? run.id().tester().id() : run.id().application(), run.id().type().zone()), Map.of("from", Long.toString(Math.max(fromMillis, at.toEpochMilli())), "to", Long.toString(run.end().orElse(controller.clock().instant()).toEpochMilli())))); } /** Fetches any new test log entries, and records the id of the last of these, for continuation. */ public void updateTestLog(RunId id) { locked(id, run -> { Optional<Step> step = Stream.of(endStagingSetup, endTests) .filter(run.readySteps()::contains) .findAny(); if (step.isEmpty()) return run; List<LogEntry> entries = cloud.getLog(new DeploymentId(id.tester().id(), id.type().zone()), run.lastTestLogEntry()); if (entries.isEmpty()) return run; logs.append(id.application(), id.type(), step.get(), entries, false); return run.with(entries.stream().mapToLong(LogEntry::id).max().getAsLong()); }); } public void updateTestReport(RunId id) { locked(id, run -> { Optional<TestReport> report = cloud.getTestReport(new DeploymentId(id.tester().id(), id.type().zone())); if (report.isEmpty()) { return run; } logs.writeTestReport(id, report.get()); return run; }); } public Optional<String> getTestReports(RunId id) { return logs.readTestReports(id); } /** Stores the given certificate as the tester certificate for this run, or throws if it's already set. */ public void storeTesterCertificate(RunId id, X509Certificate testerCertificate) { locked(id, run -> run.with(testerCertificate)); } /** Returns a list of all instances of applications which have registered. */ public List<ApplicationId> instances() { return controller.applications().readable().stream() .flatMap(application -> application.instances().values().stream()) .map(Instance::id) .collect(toUnmodifiableList()); } /** Returns all job types which have been run for the given application. */ private List<JobType> jobs(ApplicationId id) { return JobType.allIn(controller.zoneRegistry()).stream() .filter(type -> last(id, type).isPresent()) .collect(toUnmodifiableList()); } /** Returns an immutable map of all known runs for the given application and job type. */ public NavigableMap<RunId, Run> runs(JobId id) { return runs(id.application(), id.type()); } /** Lists the start time of non-redeployment runs of the given job, in order of increasing age. */ public List<Instant> jobStarts(JobId id) { return runs(id).descendingMap().values().stream() .filter(run -> ! run.isRedeployment()) .map(Run::start) .collect(toUnmodifiableList()); } /** Returns when given deployment last started deploying, falling back to time of deployment if it cannot be determined from job runs */ public Instant lastDeploymentStart(ApplicationId instanceId, Deployment deployment) { return jobStarts(new JobId(instanceId, JobType.deploymentTo(deployment.zone()))).stream() .findFirst() .orElseGet(deployment::at); } /** Returns an immutable map of all known runs for the given application and job type. */ public NavigableMap<RunId, Run> runs(ApplicationId id, JobType type) { ImmutableSortedMap.Builder<RunId, Run> runs = ImmutableSortedMap.orderedBy(Comparator.comparing(RunId::number)); Optional<Run> last = last(id, type); curator.readHistoricRuns(id, type).forEach((runId, run) -> { if (last.isEmpty() || ! runId.equals(last.get().id())) runs.put(runId, run); }); last.ifPresent(run -> runs.put(run.id(), run)); return runs.build(); } /** Returns the run with the given id, or throws if no such run exists. */ public Run run(RunId id) { return runs(id.application(), id.type()).values().stream() .filter(run -> run.id().equals(id)) .findAny() .orElseThrow(() -> new NoSuchElementException("no run with id '" + id + "' exists")); } /** Returns the last run of the given type, for the given application, if one has been run. */ public Optional<Run> last(JobId job) { return curator.readLastRun(job.application(), job.type()); } /** Returns the last run of the given type, for the given application, if one has been run. */ public Optional<Run> last(ApplicationId id, JobType type) { return curator.readLastRun(id, type); } /** Returns the last completed of the given job. */ public Optional<Run> lastCompleted(JobId id) { return JobStatus.lastCompleted(runs(id)); } /** Returns the first failing of the given job. */ public Optional<Run> firstFailing(JobId id) { return JobStatus.firstFailing(runs(id)); } /** Returns the last success of the given job. */ public Optional<Run> lastSuccess(JobId id) { return JobStatus.lastSuccess(runs(id)); } /** Returns the run with the given id, provided it is still active. */ public Optional<Run> active(RunId id) { return last(id.application(), id.type()) .filter(run -> ! run.hasEnded()) .filter(run -> run.id().equals(id)); } /** Returns a list of all active runs. */ public List<Run> active() { return controller.applications().idList().stream() .flatMap(id -> active(id).stream()) .toList(); } /** Returns a list of all active runs for the given application. */ public List<Run> active(TenantAndApplicationId id) { return controller.applications().requireApplication(id).instances().keySet().stream() .flatMap(name -> JobType.allIn(controller.zoneRegistry()).stream() .map(type -> last(id.instance(name), type)) .flatMap(Optional::stream) .filter(run -> ! run.hasEnded())) .toList(); } /** Returns a list of all active runs for the given instance. */ public List<Run> active(ApplicationId id) { return JobType.allIn(controller.zoneRegistry()).stream() .map(type -> last(id, type)) .flatMap(Optional::stream) .filter(run -> !run.hasEnded()) .toList(); } /** Returns the job status of the given job, possibly empty. */ public JobStatus jobStatus(JobId id) { return new JobStatus(id, runs(id)); } /** Returns the deployment status of the given application. */ public DeploymentStatus deploymentStatus(Application application) { VersionStatus versionStatus = controller.readVersionStatus(); return deploymentStatus(application, versionStatus, controller.systemVersion(versionStatus)); } private DeploymentStatus deploymentStatus(Application application, VersionStatus versionStatus, Version systemVersion) { return new DeploymentStatus(application, this::jobStatus, controller.zoneRegistry(), versionStatus, systemVersion, instance -> controller.applications().versionCompatibility(application.id().instance(instance)), controller.clock().instant()); } /** Adds deployment status to each of the given applications. */ public DeploymentStatusList deploymentStatuses(ApplicationList applications, VersionStatus versionStatus) { Version systemVersion = controller.systemVersion(versionStatus); return DeploymentStatusList.from(applications.asList().stream() .map(application -> deploymentStatus(application, versionStatus, systemVersion)) .toList()); } /** Adds deployment status to each of the given applications. Calling this will do an implicit read of the controller's version status */ public DeploymentStatusList deploymentStatuses(ApplicationList applications) { VersionStatus versionStatus = controller.readVersionStatus(); return deploymentStatuses(applications, versionStatus); } /** Changes the status of the given step, for the given run, provided it is still active. */ public void update(RunId id, RunStatus status, LockedStep step) { locked(id, run -> run.with(status, step)); } /** * Changes the status of the given run to inactive, and stores it as a historic run. * Throws TimeoutException if some step in this job is still being run. */ public void finish(RunId id) throws TimeoutException { Deque<Mutex> locks = new ArrayDeque<>(); try { Run unlockedRun = run(id); locks.push(curator.lock(id.application(), id.type(), report)); for (Step step : report.allPrerequisites(unlockedRun.steps().keySet())) locks.push(curator.lock(id.application(), id.type(), step)); locked(id, run -> { if (run.status() == reset) { for (Step step : run.steps().keySet()) log(id, step, INFO, List.of(" return run.reset(); } if (run.status() == running && run.stepStatuses().values().stream().anyMatch(not(succeeded::equals))) return run; Run finishedRun = run.finished(controller.clock().instant()); locked(id.application(), id.type(), runs -> { runs.put(run.id(), finishedRun); long last = id.number(); long successes = runs.values().stream().filter(Run::hasSucceeded).count(); var oldEntries = runs.entrySet().iterator(); for (var old = oldEntries.next(); old.getKey().number() <= last - historyLength || old.getValue().start().isBefore(controller.clock().instant().minus(maxHistoryAge)); old = oldEntries.next()) { if ( successes == 1 && old.getValue().hasSucceeded() && ! old.getValue().start().isBefore(controller.clock().instant().minus(maxHistoryAge))) { oldEntries.next(); continue; } logs.delete(old.getKey()); oldEntries.remove(); } }); logs.flush(id); metric.jobFinished(run.id().job(), finishedRun.status()); pruneRevisions(unlockedRun); return finishedRun; }); } finally { for (Mutex lock : locks) { try { lock.close(); } catch (Throwable t) { log.log(WARNING, "Failed to close the lock " + lock + ": the lock may or may not " + "have been released in ZooKeeper, and if not this controller " + "must be restarted to release the lock", t); } } } } /** Marks the given run as aborted; no further normal steps will run, but run-always steps will try to succeed. */ public void abort(RunId id, String reason) { locked(id, run -> { run.stepStatuses().entrySet().stream() .filter(entry -> entry.getValue() == unfinished) .forEach(entry -> log(id, entry.getKey(), INFO, "Aborting run: " + reason)); return run.aborted(); }); } /** Accepts and stores a new application package and test jar pair under a generated application version key. */ public ApplicationVersion submit(TenantAndApplicationId id, Submission submission, long projectId) { ApplicationController applications = controller.applications(); AtomicReference<ApplicationVersion> version = new AtomicReference<>(); applications.lockApplicationOrThrow(id, application -> { Optional<ApplicationVersion> previousVersion = application.get().revisions().last(); Optional<ApplicationPackage> previousPackage = previousVersion.flatMap(previous -> applications.applicationStore().find(id.tenant(), id.application(), previous.buildNumber().getAsLong())) .map(ApplicationPackage::new); long previousBuild = previousVersion.map(latestVersion -> latestVersion.buildNumber().getAsLong()).orElse(0L); version.set(submission.toApplicationVersion(1 + previousBuild)); byte[] diff = previousPackage.map(previous -> ApplicationPackageDiff.diff(previous, submission.applicationPackage())) .orElseGet(() -> ApplicationPackageDiff.diffAgainstEmpty(submission.applicationPackage())); applications.applicationStore().put(id.tenant(), id.application(), version.get().id(), submission.applicationPackage().zippedContent(), submission.testPackage(), diff); applications.applicationStore().putMeta(id.tenant(), id.application(), controller.clock().instant(), submission.applicationPackage().metaDataZip()); application = application.withProjectId(projectId == -1 ? OptionalLong.empty() : OptionalLong.of(projectId)); application = application.withRevisions(revisions -> revisions.with(version.get())); application = withPrunedPackages(application, version.get().id()); validate(id, submission); applications.storeWithUpdatedConfig(application, submission.applicationPackage()); if (application.get().projectId().isPresent()) applications.deploymentTrigger().triggerNewRevision(id); }); return version.get(); } private void validate(TenantAndApplicationId id, Submission submission) { validateTests(id, submission); validateParentVersion(id, submission); validateMajorVersion(id, submission); } private void validateTests(TenantAndApplicationId id, Submission submission) { TestSummary testSummary = TestPackage.validateTests(submission.applicationPackage().deploymentSpec(), submission.testPackage()); if (testSummary.problems().isEmpty()) controller.notificationsDb().removeNotification(NotificationSource.from(id), Type.testPackage); else controller.notificationsDb().setNotification(NotificationSource.from(id), Type.testPackage, Notification.Level.warning, testSummary.problems()); } private void validateParentVersion(TenantAndApplicationId id, Submission submission) { submission.applicationPackage().parentVersion().ifPresent(parent -> { if (parent.getMajor() < controller.readSystemVersion().getMajor()) controller.notificationsDb().setNotification(NotificationSource.from(id), Type.submission, Notification.Level.warning, "Parent version used to compile the application is on a " + "lower major version than the current Vespa Cloud version"); else controller.notificationsDb().removeNotification(NotificationSource.from(id), Type.submission); }); } private void validateMajorVersion(TenantAndApplicationId id, Submission submission) { submission.applicationPackage().deploymentSpec().majorVersion().ifPresent(explicitMajor -> { if (explicitMajor < 8) controller.notificationsDb().setNotification(NotificationSource.from(id), Type.applicationPackage, Notification.Level.warning, "Vespa 7 will soon be end of life, upgrade to Vespa 8 now:" + "https: else controller.notificationsDb().removeNotification(NotificationSource.from(id), Type.applicationPackage); }); } private LockedApplication withPrunedPackages(LockedApplication application, RevisionId latest){ TenantAndApplicationId id = application.get().id(); Application wrapped = application.get(); RevisionId oldestDeployed = application.get().oldestDeployedRevision() .or(() -> wrapped.instances().values().stream() .flatMap(instance -> instance.change().revision().stream()) .min(naturalOrder())) .orElse(latest); controller.applications().applicationStore().prune(id.tenant(), id.application(), oldestDeployed); for (ApplicationVersion version : application.get().revisions().withPackage()) if (version.id().compareTo(oldestDeployed) < 0) application = application.withRevisions(revisions -> revisions.with(version.withoutPackage())); return application; } /** Forget revisions no longer present in any relevant job history. */ private void pruneRevisions(Run run) { TenantAndApplicationId applicationId = TenantAndApplicationId.from(run.id().application()); boolean isProduction = run.versions().targetRevision().isProduction(); (isProduction ? deploymentStatus(controller.applications().requireApplication(applicationId)).jobs().asList().stream() : Stream.of(jobStatus(run.id().job()))) .flatMap(jobs -> jobs.runs().values().stream()) .map(r -> r.versions().targetRevision()) .filter(id -> id.isProduction() == isProduction) .min(naturalOrder()) .ifPresent(oldestRevision -> { controller.applications().lockApplicationOrThrow(applicationId, application -> { if (isProduction) { controller.applications().applicationStore().pruneDiffs(run.id().application().tenant(), run.id().application().application(), oldestRevision.number()); controller.applications().store(application.withRevisions(revisions -> revisions.withoutOlderThan(oldestRevision))); } else { controller.applications().applicationStore().pruneDevDiffs(new DeploymentId(run.id().application(), run.id().job().type().zone()), oldestRevision.number()); controller.applications().store(application.withRevisions(revisions -> revisions.withoutOlderThan(oldestRevision, run.id().job()))); } }); }); } /** Orders a run of the given type, or throws an IllegalStateException if that job type is already running. */ public void start(ApplicationId id, JobType type, Versions versions, boolean isRedeployment, Optional<String> reason) { start(id, type, versions, isRedeployment, JobProfile.of(type), reason); } /** Orders a run of the given type, or throws an IllegalStateException if that job type is already running. */ public void start(ApplicationId id, JobType type, Versions versions, boolean isRedeployment, JobProfile profile, Optional<String> reason) { ApplicationVersion revision = controller.applications().requireApplication(TenantAndApplicationId.from(id)).revisions().get(versions.targetRevision()); if (revision.compileVersion() .map(version -> controller.applications().versionCompatibility(id).refuse(versions.targetPlatform(), version)) .orElse(false)) throw new IllegalArgumentException("Will not start " + type + " for " + id + " with incompatible platform version (" + versions.targetPlatform() + ") " + "and compile versions (" + revision.compileVersion().get() + ")"); locked(id, type, __ -> { Optional<Run> last = last(id, type); if (last.flatMap(run -> active(run.id())).isPresent()) throw new IllegalArgumentException("Cannot start " + type + " for " + id + "; it is already running!"); RunId newId = new RunId(id, type, last.map(run -> run.id().number()).orElse(0L) + 1); curator.writeLastRun(Run.initial(newId, versions, isRedeployment, controller.clock().instant(), profile, reason)); metric.jobStarted(newId.job()); }); } /** Stores the given package and starts a deployment of it, after aborting any such ongoing deployment. */ public void deploy(ApplicationId id, JobType type, Optional<Version> platform, ApplicationPackage applicationPackage) { deploy(id, type, platform, applicationPackage, false); } /** Stores the given package and starts a deployment of it, after aborting any such ongoing deployment.*/ /* Application package diff against previous version, or against empty version if previous does not exist or is invalid */ private byte[] getDiff(ApplicationPackage applicationPackage, DeploymentId deploymentId, Optional<Run> lastRun) { return lastRun.map(run -> run.versions().targetRevision()) .map(prevVersion -> { ApplicationPackage previous; try { previous = new ApplicationPackage(controller.applications().applicationStore().get(deploymentId, prevVersion)); } catch (IllegalArgumentException e) { return ApplicationPackageDiff.diffAgainstEmpty(applicationPackage); } return ApplicationPackageDiff.diff(previous, applicationPackage); }) .orElseGet(() -> ApplicationPackageDiff.diffAgainstEmpty(applicationPackage)); } private Version findTargetPlatform(ApplicationPackage applicationPackage, DeploymentId id, Optional<Instance> instance) { List<Version> versions = controller.readVersionStatus().deployableVersions().stream() .map(VespaVersion::versionNumber) .collect(toList()); instance.map(Instance::deployments) .map(deployments -> deployments.get(id.zoneId())) .map(Deployment::version) .ifPresent(versions::add); if (versions.isEmpty()) throw new IllegalStateException("no deployable platform version found in the system"); VersionCompatibility compatibility = controller.applications().versionCompatibility(id.applicationId()); List<Version> compatibleVersions = new ArrayList<>(); for (Version target : reversed(versions)) if (applicationPackage.compileVersion().isEmpty() || compatibility.accept(target, applicationPackage.compileVersion().get())) compatibleVersions.add(target); if (compatibleVersions.isEmpty()) throw new IllegalArgumentException("no platforms are compatible with compile version " + applicationPackage.compileVersion().get()); Optional<Integer> major = applicationPackage.deploymentSpec().majorVersion(); List<Version> versionOnRightMajor = new ArrayList<>(); for (Version target : reversed(versions)) if (major.isEmpty() || major.get() == target.getMajor()) versionOnRightMajor.add(target); if (versionOnRightMajor.isEmpty()) throw new IllegalArgumentException("no platforms were found for major version " + major.get() + " specified in deployment.xml"); for (Version target : compatibleVersions) if (versionOnRightMajor.contains(target)) return target; throw new IllegalArgumentException("no platforms on major version " + major.get() + " specified in deployment.xml " + "are compatible with compile version " + applicationPackage.compileVersion().get()); } /** Aborts a run and waits for it complete. */ private void abortAndWait(RunId id, Duration timeout) { abort(id, "replaced by new deployment"); runner.get().accept(last(id.application(), id.type()).get()); Instant doom = controller.clock().instant().plus(timeout); Duration sleep = Duration.ofMillis(100); while ( ! last(id.application(), id.type()).get().hasEnded()) { if (controller.clock().instant().plus(sleep).isAfter(doom)) throw new UncheckedTimeoutException("timeout waiting for " + id + " to abort and finish"); try { Thread.sleep(sleep.toMillis()); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new RuntimeException(e); } } } /** Deletes run data and tester deployments for applications which are unknown, or no longer built internally. */ public void collectGarbage() { Set<ApplicationId> applicationsToBuild = new HashSet<>(instances()); curator.applicationsWithJobs().stream() .filter(id -> ! applicationsToBuild.contains(id)) .forEach(id -> { try { TesterId tester = TesterId.of(id); for (JobType type : jobs(id)) locked(id, type, deactivateTester, __ -> { try (Mutex ___ = curator.lock(id, type)) { try { deactivateTester(tester, type); } catch (Exception e) { } curator.deleteRunData(id, type); } }); logs.delete(id); curator.deleteRunData(id); } catch (Exception e) { log.log(WARNING, "failed cleaning up after deleted application", e); } }); } public void deactivateTester(TesterId id, JobType type) { controller.serviceRegistry().configServer().deactivate(new DeploymentId(id.id(), type.zone())); } /** Locks all runs and modifies the list of historic runs for the given application and job type. */ private void locked(ApplicationId id, JobType type, Consumer<SortedMap<RunId, Run>> modifications) { try (Mutex __ = curator.lock(id, type)) { SortedMap<RunId, Run> runs = new TreeMap<>(curator.readHistoricRuns(id, type)); modifications.accept(runs); curator.writeHistoricRuns(id, type, runs.values()); } } /** Locks and modifies the run with the given id, provided it is still active. */ public void locked(RunId id, UnaryOperator<Run> modifications) { try (Mutex __ = curator.lock(id.application(), id.type())) { active(id).ifPresent(run -> { Run modified = modifications.apply(run); if (modified != null) curator.writeLastRun(modified); }); } } /** Locks the given step and checks none of its prerequisites are running, then performs the given actions. */ public void locked(ApplicationId id, JobType type, Step step, Consumer<LockedStep> action) throws TimeoutException { try (Mutex lock = curator.lock(id, type, step)) { for (Step prerequisite : step.allPrerequisites(last(id, type).get().steps().keySet())) try (Mutex __ = curator.lock(id, type, prerequisite)) { ; } action.accept(new LockedStep(lock, step)); } } }
class JobController { public static final Duration maxHistoryAge = Duration.ofDays(60); private static final Logger log = Logger.getLogger(JobController.class.getName()); private final int historyLength; private final Controller controller; private final CuratorDb curator; private final BufferedLogStore logs; private final TesterCloud cloud; private final JobMetrics metric; private final AtomicReference<Consumer<Run>> runner = new AtomicReference<>(__ -> { }); public JobController(Controller controller) { this.historyLength = controller.system().isCd() ? 256 : 64; this.controller = controller; this.curator = controller.curator(); this.logs = new BufferedLogStore(curator, controller.serviceRegistry().runDataStore()); this.cloud = controller.serviceRegistry().testerCloud(); this.metric = new JobMetrics(controller.metric(), controller::system); } public TesterCloud cloud() { return cloud; } public int historyLength() { return historyLength; } public void setRunner(Consumer<Run> runner) { this.runner.set(runner); } /** Rewrite all job data with the newest format. */ public void updateStorage() { for (ApplicationId id : instances()) for (JobType type : jobs(id)) { locked(id, type, runs -> { curator.readLastRun(id, type).ifPresent(curator::writeLastRun); }); } } /** Returns all entries currently logged for the given run. */ public Optional<RunLog> details(RunId id) { return details(id, -1); } /** Returns the logged entries for the given run, which are after the given id threshold. */ public Optional<RunLog> details(RunId id, long after) { try (Mutex __ = curator.lock(id.application(), id.type())) { Run run = runs(id.application(), id.type()).get(id); if (run == null) return Optional.empty(); return active(id).isPresent() ? Optional.of(logs.readActive(id.application(), id.type(), after)) : logs.readFinished(id, after); } } /** Stores the given log entries for the given run and step. */ public void log(RunId id, Step step, List<LogEntry> entries) { locked(id, __ -> { logs.append(id.application(), id.type(), step, entries, true); return __; }); } /** Stores the given log messages for the given run and step. */ public void log(RunId id, Step step, Level level, List<String> messages) { log(id, step, messages.stream() .map(message -> new LogEntry(0, controller.clock().instant(), LogEntry.typeOf(level), message)) .collect(toList())); } /** Stores the given log message for the given run and step. */ public void log(RunId id, Step step, Level level, String message) { log(id, step, level, Collections.singletonList(message)); } /** Fetches any new Vespa log entries, and records the timestamp of the last of these, for continuation. */ public void updateVespaLog(RunId id) { locked(id, run -> { if ( ! run.hasStep(copyVespaLogs)) return run; storeVespaLogs(id); ZoneId zone = id.type().zone(); Optional<Deployment> deployment = Optional.ofNullable(controller.applications().requireInstance(id.application()) .deployments().get(zone)); if (deployment.isEmpty() || deployment.get().at().isBefore(run.start())) return run; List<LogEntry> log; Instant deployedAt; Instant from; if ( ! run.id().type().isProduction()) { deployedAt = run.stepInfo(installInitialReal).or(() -> run.stepInfo(installReal)).flatMap(StepInfo::startTime).orElseThrow(); from = run.lastVespaLogTimestamp().isAfter(run.start()) ? run.lastVespaLogTimestamp() : deployedAt.minusSeconds(10); log = LogEntry.parseVespaLog(controller.serviceRegistry().configServer() .getLogs(new DeploymentId(id.application(), zone), Map.of("from", Long.toString(from.toEpochMilli()))), from); } else log = List.of(); if (id.type().isTest()) { deployedAt = run.stepInfo(installTester).flatMap(StepInfo::startTime).orElseThrow(); from = run.lastVespaLogTimestamp().isAfter(run.start()) ? run.lastVespaLogTimestamp() : deployedAt.minusSeconds(10); List<LogEntry> testerLog = LogEntry.parseVespaLog(controller.serviceRegistry().configServer() .getLogs(new DeploymentId(id.tester().id(), zone), Map.of("from", Long.toString(from.toEpochMilli()))), from); Instant justNow = controller.clock().instant().minusSeconds(2); log = Stream.concat(log.stream(), testerLog.stream()) .filter(entry -> entry.at().isBefore(justNow)) .sorted(comparing(LogEntry::at)) .collect(toUnmodifiableList()); } if (log.isEmpty()) return run; logs.append(id.application(), id.type(), Step.copyVespaLogs, log, false); return run.with(log.get(log.size() - 1).at()); }); } public InputStream getVespaLogs(RunId id, long fromMillis, boolean tester) { Run run = run(id); return run.stepStatus(copyVespaLogs).map(succeeded::equals).orElse(false) ? controller.serviceRegistry().runDataStore().getLogs(id, tester) : getVespaLogsFromLogserver(run, fromMillis, tester).orElse(InputStream.nullInputStream()); } public static Optional<Instant> deploymentCompletedAt(Run run, boolean tester) { return (tester ? run.stepInfo(installTester) : run.stepInfo(installInitialReal).or(() -> run.stepInfo(installReal))) .flatMap(StepInfo::startTime).map(start -> start.minusSeconds(10)); } public void storeVespaLogs(RunId id) { Run run = run(id); if ( ! id.type().isProduction()) { getVespaLogsFromLogserver(run, 0, false).ifPresent(logs -> { try (logs) { controller.serviceRegistry().runDataStore().putLogs(id, false, logs); } catch (IOException e) { throw new UncheckedIOException(e); } }); } if (id.type().isTest()) { getVespaLogsFromLogserver(run, 0, true).ifPresent(logs -> { try (logs) { controller.serviceRegistry().runDataStore().putLogs(id, true, logs); } catch(IOException e){ throw new UncheckedIOException(e); } }); } } private Optional<InputStream> getVespaLogsFromLogserver(Run run, long fromMillis, boolean tester) { return deploymentCompletedAt(run, tester).map(at -> controller.serviceRegistry().configServer().getLogs(new DeploymentId(tester ? run.id().tester().id() : run.id().application(), run.id().type().zone()), Map.of("from", Long.toString(Math.max(fromMillis, at.toEpochMilli())), "to", Long.toString(run.end().orElse(controller.clock().instant()).toEpochMilli())))); } /** Fetches any new test log entries, and records the id of the last of these, for continuation. */ public void updateTestLog(RunId id) { locked(id, run -> { Optional<Step> step = Stream.of(endStagingSetup, endTests) .filter(run.readySteps()::contains) .findAny(); if (step.isEmpty()) return run; List<LogEntry> entries = cloud.getLog(new DeploymentId(id.tester().id(), id.type().zone()), run.lastTestLogEntry()); if (entries.isEmpty()) return run; logs.append(id.application(), id.type(), step.get(), entries, false); return run.with(entries.stream().mapToLong(LogEntry::id).max().getAsLong()); }); } public void updateTestReport(RunId id) { locked(id, run -> { Optional<TestReport> report = cloud.getTestReport(new DeploymentId(id.tester().id(), id.type().zone())); if (report.isEmpty()) { return run; } logs.writeTestReport(id, report.get()); return run; }); } public Optional<String> getTestReports(RunId id) { return logs.readTestReports(id); } /** Stores the given certificate as the tester certificate for this run, or throws if it's already set. */ public void storeTesterCertificate(RunId id, X509Certificate testerCertificate) { locked(id, run -> run.with(testerCertificate)); } /** Returns a list of all instances of applications which have registered. */ public List<ApplicationId> instances() { return controller.applications().readable().stream() .flatMap(application -> application.instances().values().stream()) .map(Instance::id) .collect(toUnmodifiableList()); } /** Returns all job types which have been run for the given application. */ private List<JobType> jobs(ApplicationId id) { return JobType.allIn(controller.zoneRegistry()).stream() .filter(type -> last(id, type).isPresent()) .collect(toUnmodifiableList()); } /** Returns an immutable map of all known runs for the given application and job type. */ public NavigableMap<RunId, Run> runs(JobId id) { return runs(id.application(), id.type()); } /** Lists the start time of non-redeployment runs of the given job, in order of increasing age. */ public List<Instant> jobStarts(JobId id) { return runs(id).descendingMap().values().stream() .filter(run -> ! run.isRedeployment()) .map(Run::start) .collect(toUnmodifiableList()); } /** Returns when given deployment last started deploying, falling back to time of deployment if it cannot be determined from job runs */ public Instant lastDeploymentStart(ApplicationId instanceId, Deployment deployment) { return jobStarts(new JobId(instanceId, JobType.deploymentTo(deployment.zone()))).stream() .findFirst() .orElseGet(deployment::at); } /** Returns an immutable map of all known runs for the given application and job type. */ public NavigableMap<RunId, Run> runs(ApplicationId id, JobType type) { ImmutableSortedMap.Builder<RunId, Run> runs = ImmutableSortedMap.orderedBy(Comparator.comparing(RunId::number)); Optional<Run> last = last(id, type); curator.readHistoricRuns(id, type).forEach((runId, run) -> { if (last.isEmpty() || ! runId.equals(last.get().id())) runs.put(runId, run); }); last.ifPresent(run -> runs.put(run.id(), run)); return runs.build(); } /** Returns the run with the given id, or throws if no such run exists. */ public Run run(RunId id) { return runs(id.application(), id.type()).values().stream() .filter(run -> run.id().equals(id)) .findAny() .orElseThrow(() -> new NoSuchElementException("no run with id '" + id + "' exists")); } /** Returns the last run of the given type, for the given application, if one has been run. */ public Optional<Run> last(JobId job) { return curator.readLastRun(job.application(), job.type()); } /** Returns the last run of the given type, for the given application, if one has been run. */ public Optional<Run> last(ApplicationId id, JobType type) { return curator.readLastRun(id, type); } /** Returns the last completed of the given job. */ public Optional<Run> lastCompleted(JobId id) { return JobStatus.lastCompleted(runs(id)); } /** Returns the first failing of the given job. */ public Optional<Run> firstFailing(JobId id) { return JobStatus.firstFailing(runs(id)); } /** Returns the last success of the given job. */ public Optional<Run> lastSuccess(JobId id) { return JobStatus.lastSuccess(runs(id)); } /** Returns the run with the given id, provided it is still active. */ public Optional<Run> active(RunId id) { return last(id.application(), id.type()) .filter(run -> ! run.hasEnded()) .filter(run -> run.id().equals(id)); } /** Returns a list of all active runs. */ public List<Run> active() { return controller.applications().idList().stream() .flatMap(id -> active(id).stream()) .toList(); } /** Returns a list of all active runs for the given application. */ public List<Run> active(TenantAndApplicationId id) { return controller.applications().requireApplication(id).instances().keySet().stream() .flatMap(name -> JobType.allIn(controller.zoneRegistry()).stream() .map(type -> last(id.instance(name), type)) .flatMap(Optional::stream) .filter(run -> ! run.hasEnded())) .toList(); } /** Returns a list of all active runs for the given instance. */ public List<Run> active(ApplicationId id) { return JobType.allIn(controller.zoneRegistry()).stream() .map(type -> last(id, type)) .flatMap(Optional::stream) .filter(run -> !run.hasEnded()) .toList(); } /** Returns the job status of the given job, possibly empty. */ public JobStatus jobStatus(JobId id) { return new JobStatus(id, runs(id)); } /** Returns the deployment status of the given application. */ public DeploymentStatus deploymentStatus(Application application) { VersionStatus versionStatus = controller.readVersionStatus(); return deploymentStatus(application, versionStatus, controller.systemVersion(versionStatus)); } private DeploymentStatus deploymentStatus(Application application, VersionStatus versionStatus, Version systemVersion) { return new DeploymentStatus(application, this::jobStatus, controller.zoneRegistry(), versionStatus, systemVersion, instance -> controller.applications().versionCompatibility(application.id().instance(instance)), controller.clock().instant()); } /** Adds deployment status to each of the given applications. */ public DeploymentStatusList deploymentStatuses(ApplicationList applications, VersionStatus versionStatus) { Version systemVersion = controller.systemVersion(versionStatus); return DeploymentStatusList.from(applications.asList().stream() .map(application -> deploymentStatus(application, versionStatus, systemVersion)) .toList()); } /** Adds deployment status to each of the given applications. Calling this will do an implicit read of the controller's version status */ public DeploymentStatusList deploymentStatuses(ApplicationList applications) { VersionStatus versionStatus = controller.readVersionStatus(); return deploymentStatuses(applications, versionStatus); } /** Changes the status of the given step, for the given run, provided it is still active. */ public void update(RunId id, RunStatus status, LockedStep step) { locked(id, run -> run.with(status, step)); } /** * Changes the status of the given run to inactive, and stores it as a historic run. * Throws TimeoutException if some step in this job is still being run. */ public void finish(RunId id) throws TimeoutException { Deque<Mutex> locks = new ArrayDeque<>(); try { Run unlockedRun = run(id); locks.push(curator.lock(id.application(), id.type(), report)); for (Step step : report.allPrerequisites(unlockedRun.steps().keySet())) locks.push(curator.lock(id.application(), id.type(), step)); locked(id, run -> { if (run.status() == reset) { for (Step step : run.steps().keySet()) log(id, step, INFO, List.of(" return run.reset(); } if (run.status() == running && run.stepStatuses().values().stream().anyMatch(not(succeeded::equals))) return run; Run finishedRun = run.finished(controller.clock().instant()); locked(id.application(), id.type(), runs -> { runs.put(run.id(), finishedRun); long last = id.number(); long successes = runs.values().stream().filter(Run::hasSucceeded).count(); var oldEntries = runs.entrySet().iterator(); for (var old = oldEntries.next(); old.getKey().number() <= last - historyLength || old.getValue().start().isBefore(controller.clock().instant().minus(maxHistoryAge)); old = oldEntries.next()) { if ( successes == 1 && old.getValue().hasSucceeded() && ! old.getValue().start().isBefore(controller.clock().instant().minus(maxHistoryAge))) { oldEntries.next(); continue; } logs.delete(old.getKey()); oldEntries.remove(); } }); logs.flush(id); metric.jobFinished(run.id().job(), finishedRun.status()); pruneRevisions(unlockedRun); return finishedRun; }); } finally { for (Mutex lock : locks) { try { lock.close(); } catch (Throwable t) { log.log(WARNING, "Failed to close the lock " + lock + ": the lock may or may not " + "have been released in ZooKeeper, and if not this controller " + "must be restarted to release the lock", t); } } } } /** Marks the given run as aborted; no further normal steps will run, but run-always steps will try to succeed. */ public void abort(RunId id, String reason) { locked(id, run -> { run.stepStatuses().entrySet().stream() .filter(entry -> entry.getValue() == unfinished) .forEach(entry -> log(id, entry.getKey(), INFO, "Aborting run: " + reason)); return run.aborted(); }); } /** Accepts and stores a new application package and test jar pair under a generated application version key. */ public ApplicationVersion submit(TenantAndApplicationId id, Submission submission, long projectId) { ApplicationController applications = controller.applications(); AtomicReference<ApplicationVersion> version = new AtomicReference<>(); applications.lockApplicationOrThrow(id, application -> { Optional<ApplicationVersion> previousVersion = application.get().revisions().last(); Optional<ApplicationPackage> previousPackage = previousVersion.flatMap(previous -> applications.applicationStore().find(id.tenant(), id.application(), previous.buildNumber().getAsLong())) .map(ApplicationPackage::new); long previousBuild = previousVersion.map(latestVersion -> latestVersion.buildNumber().getAsLong()).orElse(0L); version.set(submission.toApplicationVersion(1 + previousBuild)); byte[] diff = previousPackage.map(previous -> ApplicationPackageDiff.diff(previous, submission.applicationPackage())) .orElseGet(() -> ApplicationPackageDiff.diffAgainstEmpty(submission.applicationPackage())); applications.applicationStore().put(id.tenant(), id.application(), version.get().id(), submission.applicationPackage().zippedContent(), submission.testPackage(), diff); applications.applicationStore().putMeta(id.tenant(), id.application(), controller.clock().instant(), submission.applicationPackage().metaDataZip()); application = application.withProjectId(projectId == -1 ? OptionalLong.empty() : OptionalLong.of(projectId)); application = application.withRevisions(revisions -> revisions.with(version.get())); application = withPrunedPackages(application, version.get().id()); validate(id, submission); applications.storeWithUpdatedConfig(application, submission.applicationPackage()); if (application.get().projectId().isPresent()) applications.deploymentTrigger().triggerNewRevision(id); }); return version.get(); } private void validate(TenantAndApplicationId id, Submission submission) { validateTests(id, submission); validateParentVersion(id, submission); validateMajorVersion(id, submission); } private void validateTests(TenantAndApplicationId id, Submission submission) { TestSummary testSummary = TestPackage.validateTests(submission.applicationPackage().deploymentSpec(), submission.testPackage()); if (testSummary.problems().isEmpty()) controller.notificationsDb().removeNotification(NotificationSource.from(id), Type.testPackage); else controller.notificationsDb().setNotification(NotificationSource.from(id), Type.testPackage, Notification.Level.warning, testSummary.problems()); } private void validateParentVersion(TenantAndApplicationId id, Submission submission) { submission.applicationPackage().parentVersion().ifPresent(parent -> { if (parent.getMajor() < controller.readSystemVersion().getMajor()) controller.notificationsDb().setNotification(NotificationSource.from(id), Type.submission, Notification.Level.warning, "Parent version used to compile the application is on a " + "lower major version than the current Vespa Cloud version"); else controller.notificationsDb().removeNotification(NotificationSource.from(id), Type.submission); }); } private void validateMajorVersion(TenantAndApplicationId id, Submission submission) { submission.applicationPackage().deploymentSpec().majorVersion().ifPresent(explicitMajor -> { if (explicitMajor < 8) controller.notificationsDb().setNotification(NotificationSource.from(id), Type.applicationPackage, Notification.Level.warning, "Vespa 7 will soon be end of life, upgrade to Vespa 8 now:" + "https: else controller.notificationsDb().removeNotification(NotificationSource.from(id), Type.applicationPackage); }); } private LockedApplication withPrunedPackages(LockedApplication application, RevisionId latest){ TenantAndApplicationId id = application.get().id(); Application wrapped = application.get(); RevisionId oldestDeployed = application.get().oldestDeployedRevision() .or(() -> wrapped.instances().values().stream() .flatMap(instance -> instance.change().revision().stream()) .min(naturalOrder())) .orElse(latest); controller.applications().applicationStore().prune(id.tenant(), id.application(), oldestDeployed); for (ApplicationVersion version : application.get().revisions().withPackage()) if (version.id().compareTo(oldestDeployed) < 0) application = application.withRevisions(revisions -> revisions.with(version.withoutPackage())); return application; } /** Forget revisions no longer present in any relevant job history. */ private void pruneRevisions(Run run) { TenantAndApplicationId applicationId = TenantAndApplicationId.from(run.id().application()); boolean isProduction = run.versions().targetRevision().isProduction(); (isProduction ? deploymentStatus(controller.applications().requireApplication(applicationId)).jobs().asList().stream() : Stream.of(jobStatus(run.id().job()))) .flatMap(jobs -> jobs.runs().values().stream()) .map(r -> r.versions().targetRevision()) .filter(id -> id.isProduction() == isProduction) .min(naturalOrder()) .ifPresent(oldestRevision -> { controller.applications().lockApplicationOrThrow(applicationId, application -> { if (isProduction) { controller.applications().applicationStore().pruneDiffs(run.id().application().tenant(), run.id().application().application(), oldestRevision.number()); controller.applications().store(application.withRevisions(revisions -> revisions.withoutOlderThan(oldestRevision))); } else { controller.applications().applicationStore().pruneDevDiffs(new DeploymentId(run.id().application(), run.id().job().type().zone()), oldestRevision.number()); controller.applications().store(application.withRevisions(revisions -> revisions.withoutOlderThan(oldestRevision, run.id().job()))); } }); }); } /** Orders a run of the given type, or throws an IllegalStateException if that job type is already running. */ public void start(ApplicationId id, JobType type, Versions versions, boolean isRedeployment, Optional<String> reason) { start(id, type, versions, isRedeployment, JobProfile.of(type), reason); } /** Orders a run of the given type, or throws an IllegalStateException if that job type is already running. */ public void start(ApplicationId id, JobType type, Versions versions, boolean isRedeployment, JobProfile profile, Optional<String> reason) { ApplicationVersion revision = controller.applications().requireApplication(TenantAndApplicationId.from(id)).revisions().get(versions.targetRevision()); if (revision.compileVersion() .map(version -> controller.applications().versionCompatibility(id).refuse(versions.targetPlatform(), version)) .orElse(false)) throw new IllegalArgumentException("Will not start " + type + " for " + id + " with incompatible platform version (" + versions.targetPlatform() + ") " + "and compile versions (" + revision.compileVersion().get() + ")"); locked(id, type, __ -> { Optional<Run> last = last(id, type); if (last.flatMap(run -> active(run.id())).isPresent()) throw new IllegalArgumentException("Cannot start " + type + " for " + id + "; it is already running!"); RunId newId = new RunId(id, type, last.map(run -> run.id().number()).orElse(0L) + 1); curator.writeLastRun(Run.initial(newId, versions, isRedeployment, controller.clock().instant(), profile, reason)); metric.jobStarted(newId.job()); }); } /** Stores the given package and starts a deployment of it, after aborting any such ongoing deployment. */ public void deploy(ApplicationId id, JobType type, Optional<Version> platform, ApplicationPackage applicationPackage) { deploy(id, type, platform, applicationPackage, false); } /** Stores the given package and starts a deployment of it, after aborting any such ongoing deployment.*/ /* Application package diff against previous version, or against empty version if previous does not exist or is invalid */ private byte[] getDiff(ApplicationPackage applicationPackage, DeploymentId deploymentId, Optional<Run> lastRun) { return lastRun.map(run -> run.versions().targetRevision()) .map(prevVersion -> { ApplicationPackage previous; try { previous = new ApplicationPackage(controller.applications().applicationStore().get(deploymentId, prevVersion)); } catch (IllegalArgumentException e) { return ApplicationPackageDiff.diffAgainstEmpty(applicationPackage); } return ApplicationPackageDiff.diff(previous, applicationPackage); }) .orElseGet(() -> ApplicationPackageDiff.diffAgainstEmpty(applicationPackage)); } private Version findTargetPlatform(ApplicationPackage applicationPackage, DeploymentId id, Optional<Instance> instance) { List<Version> versions = controller.readVersionStatus().deployableVersions().stream() .map(VespaVersion::versionNumber) .collect(toList()); instance.map(Instance::deployments) .map(deployments -> deployments.get(id.zoneId())) .map(Deployment::version) .ifPresent(versions::add); if (versions.isEmpty()) throw new IllegalStateException("no deployable platform version found in the system"); VersionCompatibility compatibility = controller.applications().versionCompatibility(id.applicationId()); List<Version> compatibleVersions = new ArrayList<>(); for (Version target : reversed(versions)) if (applicationPackage.compileVersion().isEmpty() || compatibility.accept(target, applicationPackage.compileVersion().get())) compatibleVersions.add(target); if (compatibleVersions.isEmpty()) throw new IllegalArgumentException("no platforms are compatible with compile version " + applicationPackage.compileVersion().get()); Optional<Integer> major = applicationPackage.deploymentSpec().majorVersion(); List<Version> versionOnRightMajor = new ArrayList<>(); for (Version target : reversed(versions)) if (major.isEmpty() || major.get() == target.getMajor()) versionOnRightMajor.add(target); if (versionOnRightMajor.isEmpty()) throw new IllegalArgumentException("no platforms were found for major version " + major.get() + " specified in deployment.xml"); for (Version target : compatibleVersions) if (versionOnRightMajor.contains(target)) return target; throw new IllegalArgumentException("no platforms on major version " + major.get() + " specified in deployment.xml " + "are compatible with compile version " + applicationPackage.compileVersion().get()); } /** Aborts a run and waits for it complete. */ private void abortAndWait(RunId id, Duration timeout) { abort(id, "replaced by new deployment"); runner.get().accept(last(id.application(), id.type()).get()); Instant doom = controller.clock().instant().plus(timeout); Duration sleep = Duration.ofMillis(100); while ( ! last(id.application(), id.type()).get().hasEnded()) { if (controller.clock().instant().plus(sleep).isAfter(doom)) throw new UncheckedTimeoutException("timeout waiting for " + id + " to abort and finish"); try { Thread.sleep(sleep.toMillis()); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new RuntimeException(e); } } } /** Deletes run data and tester deployments for applications which are unknown, or no longer built internally. */ public void collectGarbage() { Set<ApplicationId> applicationsToBuild = new HashSet<>(instances()); curator.applicationsWithJobs().stream() .filter(id -> ! applicationsToBuild.contains(id)) .forEach(id -> { try { TesterId tester = TesterId.of(id); for (JobType type : jobs(id)) locked(id, type, deactivateTester, __ -> { try (Mutex ___ = curator.lock(id, type)) { try { deactivateTester(tester, type); } catch (Exception e) { } curator.deleteRunData(id, type); } }); logs.delete(id); curator.deleteRunData(id); } catch (Exception e) { log.log(WARNING, "failed cleaning up after deleted application", e); } }); } public void deactivateTester(TesterId id, JobType type) { controller.serviceRegistry().configServer().deactivate(new DeploymentId(id.id(), type.zone())); } /** Locks all runs and modifies the list of historic runs for the given application and job type. */ private void locked(ApplicationId id, JobType type, Consumer<SortedMap<RunId, Run>> modifications) { try (Mutex __ = curator.lock(id, type)) { SortedMap<RunId, Run> runs = new TreeMap<>(curator.readHistoricRuns(id, type)); modifications.accept(runs); curator.writeHistoricRuns(id, type, runs.values()); } } /** Locks and modifies the run with the given id, provided it is still active. */ public void locked(RunId id, UnaryOperator<Run> modifications) { try (Mutex __ = curator.lock(id.application(), id.type())) { active(id).ifPresent(run -> { Run modified = modifications.apply(run); if (modified != null) curator.writeLastRun(modified); }); } } /** Locks the given step and checks none of its prerequisites are running, then performs the given actions. */ public void locked(ApplicationId id, JobType type, Step step, Consumer<LockedStep> action) throws TimeoutException { try (Mutex lock = curator.lock(id, type, step)) { for (Step prerequisite : step.allPrerequisites(last(id, type).get().steps().keySet())) try (Mutex __ = curator.lock(id, type, prerequisite)) { ; } action.accept(new LockedStep(lock, step)); } } }
Mega-nitpick: missing spaces after comma in this and above ctors
public Pong(ErrorMessage error) { this(null, null,false, error); }
this(null, null,false, error);
public Pong(ErrorMessage error) { this(null, null, false, error); }
class Pong { private final ElapsedTime elapsed = new ElapsedTime(); private final Long activeDocuments; private final Long targetActiveDocuments; private final boolean isBlockingWrites; private final ErrorMessage error; public Pong() { this(null, null,false, null); } public Pong(long activeDocuments, long targetActiveDocuments) { this(activeDocuments, targetActiveDocuments, false, null); } public Pong(long activeDocuments, long targetActiveDocuments, boolean isBlockingWrites) { this(activeDocuments, targetActiveDocuments, isBlockingWrites, null); } private Pong(Long activeDocuments, Long targetActiveDocuments, boolean isBlockingWrites, ErrorMessage error) { this.activeDocuments = activeDocuments; this.targetActiveDocuments = targetActiveDocuments; this.isBlockingWrites = isBlockingWrites; this.error = error; } public Optional<ErrorMessage> error() { return Optional.ofNullable(error); } /** Returns the number of active documents in the backend responding in this Pong, if available */ public Optional<Long> activeDocuments() { return Optional.ofNullable(activeDocuments); } /** Returns the number of target active documents in the backend responding in this Pong, if available */ public Optional<Long> targetActiveDocuments() { return Optional.ofNullable(targetActiveDocuments); } /** Returns true if the pinged node is currently blocking write operations due to being full */ public boolean isBlockingWrites() { return isBlockingWrites; } /** Returns whether there is an error or not */ public boolean badResponse() { return error != null; } public ElapsedTime getElapsedTime() { return elapsed; } /** Returns a string which included the ping info (if any) and any errors added to this */ @Override public String toString() { StringBuilder m = new StringBuilder("Ping result"); activeDocuments().ifPresent(docCount -> m.append(" active docs: ").append(docCount)); if (isBlockingWrites) m.append(" blocking writes: true"); error().ifPresent(e -> m.append(" error: ").append(error)); return m.toString(); } }
class Pong { private final ElapsedTime elapsed = new ElapsedTime(); private final Long activeDocuments; private final Long targetActiveDocuments; private final boolean isBlockingWrites; private final ErrorMessage error; public Pong() { this(null, null, false, null); } public Pong(long activeDocuments, long targetActiveDocuments) { this(activeDocuments, targetActiveDocuments, false, null); } public Pong(long activeDocuments, long targetActiveDocuments, boolean isBlockingWrites) { this(activeDocuments, targetActiveDocuments, isBlockingWrites, null); } private Pong(Long activeDocuments, Long targetActiveDocuments, boolean isBlockingWrites, ErrorMessage error) { this.activeDocuments = activeDocuments; this.targetActiveDocuments = targetActiveDocuments; this.isBlockingWrites = isBlockingWrites; this.error = error; } public Optional<ErrorMessage> error() { return Optional.ofNullable(error); } /** Returns the number of active documents in the backend responding in this Pong, if available */ public Optional<Long> activeDocuments() { return Optional.ofNullable(activeDocuments); } /** Returns the number of target active documents in the backend responding in this Pong, if available */ public Optional<Long> targetActiveDocuments() { return Optional.ofNullable(targetActiveDocuments); } /** Returns true if the pinged node is currently blocking write operations due to being full */ public boolean isBlockingWrites() { return isBlockingWrites; } /** Returns whether there is an error or not */ public boolean badResponse() { return error != null; } public ElapsedTime getElapsedTime() { return elapsed; } /** Returns a string which included the ping info (if any) and any errors added to this */ @Override public String toString() { StringBuilder m = new StringBuilder("Ping result"); activeDocuments().ifPresent(docCount -> m.append(" active docs: ").append(docCount)); targetActiveDocuments().ifPresent(docCount -> m.append(" target active docs: ").append(docCount)); if (isBlockingWrites) m.append(" blocking writes: true"); error().ifPresent(e -> m.append(" error: ").append(error)); return m.toString(); } }
Should we add output for target active documents as well?
public String toString() { StringBuilder m = new StringBuilder("Ping result"); activeDocuments().ifPresent(docCount -> m.append(" active docs: ").append(docCount)); if (isBlockingWrites) m.append(" blocking writes: true"); error().ifPresent(e -> m.append(" error: ").append(error)); return m.toString(); }
activeDocuments().ifPresent(docCount -> m.append(" active docs: ").append(docCount));
public String toString() { StringBuilder m = new StringBuilder("Ping result"); activeDocuments().ifPresent(docCount -> m.append(" active docs: ").append(docCount)); targetActiveDocuments().ifPresent(docCount -> m.append(" target active docs: ").append(docCount)); if (isBlockingWrites) m.append(" blocking writes: true"); error().ifPresent(e -> m.append(" error: ").append(error)); return m.toString(); }
class Pong { private final ElapsedTime elapsed = new ElapsedTime(); private final Long activeDocuments; private final Long targetActiveDocuments; private final boolean isBlockingWrites; private final ErrorMessage error; public Pong() { this(null, null,false, null); } public Pong(ErrorMessage error) { this(null, null,false, error); } public Pong(long activeDocuments, long targetActiveDocuments) { this(activeDocuments, targetActiveDocuments, false, null); } public Pong(long activeDocuments, long targetActiveDocuments, boolean isBlockingWrites) { this(activeDocuments, targetActiveDocuments, isBlockingWrites, null); } private Pong(Long activeDocuments, Long targetActiveDocuments, boolean isBlockingWrites, ErrorMessage error) { this.activeDocuments = activeDocuments; this.targetActiveDocuments = targetActiveDocuments; this.isBlockingWrites = isBlockingWrites; this.error = error; } public Optional<ErrorMessage> error() { return Optional.ofNullable(error); } /** Returns the number of active documents in the backend responding in this Pong, if available */ public Optional<Long> activeDocuments() { return Optional.ofNullable(activeDocuments); } /** Returns the number of target active documents in the backend responding in this Pong, if available */ public Optional<Long> targetActiveDocuments() { return Optional.ofNullable(targetActiveDocuments); } /** Returns true if the pinged node is currently blocking write operations due to being full */ public boolean isBlockingWrites() { return isBlockingWrites; } /** Returns whether there is an error or not */ public boolean badResponse() { return error != null; } public ElapsedTime getElapsedTime() { return elapsed; } /** Returns a string which included the ping info (if any) and any errors added to this */ @Override }
class Pong { private final ElapsedTime elapsed = new ElapsedTime(); private final Long activeDocuments; private final Long targetActiveDocuments; private final boolean isBlockingWrites; private final ErrorMessage error; public Pong() { this(null, null, false, null); } public Pong(ErrorMessage error) { this(null, null, false, error); } public Pong(long activeDocuments, long targetActiveDocuments) { this(activeDocuments, targetActiveDocuments, false, null); } public Pong(long activeDocuments, long targetActiveDocuments, boolean isBlockingWrites) { this(activeDocuments, targetActiveDocuments, isBlockingWrites, null); } private Pong(Long activeDocuments, Long targetActiveDocuments, boolean isBlockingWrites, ErrorMessage error) { this.activeDocuments = activeDocuments; this.targetActiveDocuments = targetActiveDocuments; this.isBlockingWrites = isBlockingWrites; this.error = error; } public Optional<ErrorMessage> error() { return Optional.ofNullable(error); } /** Returns the number of active documents in the backend responding in this Pong, if available */ public Optional<Long> activeDocuments() { return Optional.ofNullable(activeDocuments); } /** Returns the number of target active documents in the backend responding in this Pong, if available */ public Optional<Long> targetActiveDocuments() { return Optional.ofNullable(targetActiveDocuments); } /** Returns true if the pinged node is currently blocking write operations due to being full */ public boolean isBlockingWrites() { return isBlockingWrites; } /** Returns whether there is an error or not */ public boolean badResponse() { return error != null; } public ElapsedTime getElapsedTime() { return elapsed; } /** Returns a string which included the ping info (if any) and any errors added to this */ @Override }
Yes, done
public String toString() { StringBuilder m = new StringBuilder("Ping result"); activeDocuments().ifPresent(docCount -> m.append(" active docs: ").append(docCount)); if (isBlockingWrites) m.append(" blocking writes: true"); error().ifPresent(e -> m.append(" error: ").append(error)); return m.toString(); }
activeDocuments().ifPresent(docCount -> m.append(" active docs: ").append(docCount));
public String toString() { StringBuilder m = new StringBuilder("Ping result"); activeDocuments().ifPresent(docCount -> m.append(" active docs: ").append(docCount)); targetActiveDocuments().ifPresent(docCount -> m.append(" target active docs: ").append(docCount)); if (isBlockingWrites) m.append(" blocking writes: true"); error().ifPresent(e -> m.append(" error: ").append(error)); return m.toString(); }
class Pong { private final ElapsedTime elapsed = new ElapsedTime(); private final Long activeDocuments; private final Long targetActiveDocuments; private final boolean isBlockingWrites; private final ErrorMessage error; public Pong() { this(null, null,false, null); } public Pong(ErrorMessage error) { this(null, null,false, error); } public Pong(long activeDocuments, long targetActiveDocuments) { this(activeDocuments, targetActiveDocuments, false, null); } public Pong(long activeDocuments, long targetActiveDocuments, boolean isBlockingWrites) { this(activeDocuments, targetActiveDocuments, isBlockingWrites, null); } private Pong(Long activeDocuments, Long targetActiveDocuments, boolean isBlockingWrites, ErrorMessage error) { this.activeDocuments = activeDocuments; this.targetActiveDocuments = targetActiveDocuments; this.isBlockingWrites = isBlockingWrites; this.error = error; } public Optional<ErrorMessage> error() { return Optional.ofNullable(error); } /** Returns the number of active documents in the backend responding in this Pong, if available */ public Optional<Long> activeDocuments() { return Optional.ofNullable(activeDocuments); } /** Returns the number of target active documents in the backend responding in this Pong, if available */ public Optional<Long> targetActiveDocuments() { return Optional.ofNullable(targetActiveDocuments); } /** Returns true if the pinged node is currently blocking write operations due to being full */ public boolean isBlockingWrites() { return isBlockingWrites; } /** Returns whether there is an error or not */ public boolean badResponse() { return error != null; } public ElapsedTime getElapsedTime() { return elapsed; } /** Returns a string which included the ping info (if any) and any errors added to this */ @Override }
class Pong { private final ElapsedTime elapsed = new ElapsedTime(); private final Long activeDocuments; private final Long targetActiveDocuments; private final boolean isBlockingWrites; private final ErrorMessage error; public Pong() { this(null, null, false, null); } public Pong(ErrorMessage error) { this(null, null, false, error); } public Pong(long activeDocuments, long targetActiveDocuments) { this(activeDocuments, targetActiveDocuments, false, null); } public Pong(long activeDocuments, long targetActiveDocuments, boolean isBlockingWrites) { this(activeDocuments, targetActiveDocuments, isBlockingWrites, null); } private Pong(Long activeDocuments, Long targetActiveDocuments, boolean isBlockingWrites, ErrorMessage error) { this.activeDocuments = activeDocuments; this.targetActiveDocuments = targetActiveDocuments; this.isBlockingWrites = isBlockingWrites; this.error = error; } public Optional<ErrorMessage> error() { return Optional.ofNullable(error); } /** Returns the number of active documents in the backend responding in this Pong, if available */ public Optional<Long> activeDocuments() { return Optional.ofNullable(activeDocuments); } /** Returns the number of target active documents in the backend responding in this Pong, if available */ public Optional<Long> targetActiveDocuments() { return Optional.ofNullable(targetActiveDocuments); } /** Returns true if the pinged node is currently blocking write operations due to being full */ public boolean isBlockingWrites() { return isBlockingWrites; } /** Returns whether there is an error or not */ public boolean badResponse() { return error != null; } public ElapsedTime getElapsedTime() { return elapsed; } /** Returns a string which included the ping info (if any) and any errors added to this */ @Override }
Fixed
public Pong(ErrorMessage error) { this(null, null,false, error); }
this(null, null,false, error);
public Pong(ErrorMessage error) { this(null, null, false, error); }
class Pong { private final ElapsedTime elapsed = new ElapsedTime(); private final Long activeDocuments; private final Long targetActiveDocuments; private final boolean isBlockingWrites; private final ErrorMessage error; public Pong() { this(null, null,false, null); } public Pong(long activeDocuments, long targetActiveDocuments) { this(activeDocuments, targetActiveDocuments, false, null); } public Pong(long activeDocuments, long targetActiveDocuments, boolean isBlockingWrites) { this(activeDocuments, targetActiveDocuments, isBlockingWrites, null); } private Pong(Long activeDocuments, Long targetActiveDocuments, boolean isBlockingWrites, ErrorMessage error) { this.activeDocuments = activeDocuments; this.targetActiveDocuments = targetActiveDocuments; this.isBlockingWrites = isBlockingWrites; this.error = error; } public Optional<ErrorMessage> error() { return Optional.ofNullable(error); } /** Returns the number of active documents in the backend responding in this Pong, if available */ public Optional<Long> activeDocuments() { return Optional.ofNullable(activeDocuments); } /** Returns the number of target active documents in the backend responding in this Pong, if available */ public Optional<Long> targetActiveDocuments() { return Optional.ofNullable(targetActiveDocuments); } /** Returns true if the pinged node is currently blocking write operations due to being full */ public boolean isBlockingWrites() { return isBlockingWrites; } /** Returns whether there is an error or not */ public boolean badResponse() { return error != null; } public ElapsedTime getElapsedTime() { return elapsed; } /** Returns a string which included the ping info (if any) and any errors added to this */ @Override public String toString() { StringBuilder m = new StringBuilder("Ping result"); activeDocuments().ifPresent(docCount -> m.append(" active docs: ").append(docCount)); if (isBlockingWrites) m.append(" blocking writes: true"); error().ifPresent(e -> m.append(" error: ").append(error)); return m.toString(); } }
class Pong { private final ElapsedTime elapsed = new ElapsedTime(); private final Long activeDocuments; private final Long targetActiveDocuments; private final boolean isBlockingWrites; private final ErrorMessage error; public Pong() { this(null, null, false, null); } public Pong(long activeDocuments, long targetActiveDocuments) { this(activeDocuments, targetActiveDocuments, false, null); } public Pong(long activeDocuments, long targetActiveDocuments, boolean isBlockingWrites) { this(activeDocuments, targetActiveDocuments, isBlockingWrites, null); } private Pong(Long activeDocuments, Long targetActiveDocuments, boolean isBlockingWrites, ErrorMessage error) { this.activeDocuments = activeDocuments; this.targetActiveDocuments = targetActiveDocuments; this.isBlockingWrites = isBlockingWrites; this.error = error; } public Optional<ErrorMessage> error() { return Optional.ofNullable(error); } /** Returns the number of active documents in the backend responding in this Pong, if available */ public Optional<Long> activeDocuments() { return Optional.ofNullable(activeDocuments); } /** Returns the number of target active documents in the backend responding in this Pong, if available */ public Optional<Long> targetActiveDocuments() { return Optional.ofNullable(targetActiveDocuments); } /** Returns true if the pinged node is currently blocking write operations due to being full */ public boolean isBlockingWrites() { return isBlockingWrites; } /** Returns whether there is an error or not */ public boolean badResponse() { return error != null; } public ElapsedTime getElapsedTime() { return elapsed; } /** Returns a string which included the ping info (if any) and any errors added to this */ @Override public String toString() { StringBuilder m = new StringBuilder("Ping result"); activeDocuments().ifPresent(docCount -> m.append(" active docs: ").append(docCount)); targetActiveDocuments().ifPresent(docCount -> m.append(" target active docs: ").append(docCount)); if (isBlockingWrites) m.append(" blocking writes: true"); error().ifPresent(e -> m.append(" error: ").append(error)); return m.toString(); } }
This does not come without a performance cost. At least one of the Token implementations uses lazy lookup of the token when calling getOrig(), and getOffset will cause a non cached jni call that we want to avoid.
private static void addAnnotationSpan(String input, SpanList parent, Token token, StemMode mode, TermOccurrences termOccurrences) { if ( ! token.isSpecialToken()) { if (token.getNumComponents() > 0) { for (int i = 0; i < token.getNumComponents(); ++i) { addAnnotationSpan(input, parent, token.getComponent(i), mode, termOccurrences); } return; } if ( ! token.isIndexable()) return; } if (token.getOffset() >= input.length()) { throw new IllegalArgumentException(token + " has offset " + token.getOffset() + ", which is outside the " + "bounds of the input string '" + input + "'"); } if (token.getOffset() + token.getOrig().length() > input.length()) { throw new IllegalArgumentException(token + " has offset " + token.getOffset() + ", which makes it overflow " + "the bounds of the input string; " + input); } if (mode == StemMode.ALL) { Span where = parent.span((int)token.getOffset(), token.getOrig().length()); String lowercasedOrig = toLowerCase(token.getOrig()); addAnnotation(where, token.getOrig(), token.getOrig(), termOccurrences); String lowercasedTerm = lowercasedOrig; String term = token.getTokenString(); if (term != null) { lowercasedTerm = toLowerCase(term); } if (! lowercasedOrig.equals(lowercasedTerm)) { addAnnotation(where, term, token.getOrig(), termOccurrences); } for (int i = 0; i < token.getNumStems(); i++) { String stem = token.getStem(i); String lowercasedStem = toLowerCase(stem); if (! (lowercasedOrig.equals(lowercasedStem) || lowercasedTerm.equals(lowercasedStem))) { addAnnotation(where, stem, token.getOrig(), termOccurrences); } } } else { String term = token.getTokenString(); if (term == null || term.trim().isEmpty()) return; if (termOccurrences.termCountBelowLimit(term)) { parent.span((int)token.getOffset(), token.getOrig().length()).annotate(lowerCaseTermAnnotation(term, token.getOrig())); } } }
if (token.getOffset() >= input.length()) {
private static void addAnnotationSpan(String input, SpanList parent, Token token, StemMode mode, TermOccurrences termOccurrences) { if ( ! token.isSpecialToken()) { if (token.getNumComponents() > 0) { for (int i = 0; i < token.getNumComponents(); ++i) { addAnnotationSpan(input, parent, token.getComponent(i), mode, termOccurrences); } return; } if ( ! token.isIndexable()) return; } if (token.getOffset() >= input.length()) { throw new IllegalArgumentException(token + " has offset " + token.getOffset() + ", which is outside the " + "bounds of the input string '" + input + "'"); } if (token.getOffset() + token.getOrig().length() > input.length()) { throw new IllegalArgumentException(token + " has offset " + token.getOffset() + ", which makes it overflow " + "the bounds of the input string; " + input); } if (mode == StemMode.ALL) { Span where = parent.span((int)token.getOffset(), token.getOrig().length()); String lowercasedOrig = toLowerCase(token.getOrig()); addAnnotation(where, token.getOrig(), token.getOrig(), termOccurrences); String lowercasedTerm = lowercasedOrig; String term = token.getTokenString(); if (term != null) { lowercasedTerm = toLowerCase(term); } if (! lowercasedOrig.equals(lowercasedTerm)) { addAnnotation(where, term, token.getOrig(), termOccurrences); } for (int i = 0; i < token.getNumStems(); i++) { String stem = token.getStem(i); String lowercasedStem = toLowerCase(stem); if (! (lowercasedOrig.equals(lowercasedStem) || lowercasedTerm.equals(lowercasedStem))) { addAnnotation(where, stem, token.getOrig(), termOccurrences); } } } else { String term = token.getTokenString(); if (term == null || term.trim().isEmpty()) return; if (termOccurrences.termCountBelowLimit(term)) { parent.span((int)token.getOffset(), token.getOrig().length()).annotate(lowerCaseTermAnnotation(term, token.getOrig())); } } }
class TermOccurrences { final Map<String, Integer> termOccurrences = new HashMap<>(); final int maxOccurrences; public TermOccurrences(int maxOccurences) { this.maxOccurrences = maxOccurences; } boolean termCountBelowLimit(String term) { String lowerCasedTerm = toLowerCase(term); int occurrences = termOccurrences.getOrDefault(lowerCasedTerm, 0); if (occurrences >= maxOccurrences) return false; termOccurrences.put(lowerCasedTerm, occurrences + 1); return true; } }
class TermOccurrences { final Map<String, Integer> termOccurrences = new HashMap<>(); final int maxOccurrences; public TermOccurrences(int maxOccurences) { this.maxOccurrences = maxOccurences; } boolean termCountBelowLimit(String term) { String lowerCasedTerm = toLowerCase(term); int occurrences = termOccurrences.getOrDefault(lowerCasedTerm, 0); if (occurrences >= maxOccurrences) return false; termOccurrences.put(lowerCasedTerm, occurrences + 1); return true; } }
How about no `deployment.xml` at all?
void testDevDeployment() { ApplicationPackage applicationPackage = ApplicationPackageBuilder.fromDeploymentXml(DeploymentSpec.empty.xmlForm()); var context = tester.newDeploymentContext(); ZoneId zone = ZoneId.from("dev", "us-east-1"); tester.controllerTester().zoneRegistry() .setRoutingMethod(ZoneApiMock.from(zone), RoutingMethod.sharedLayer4); context.runJob(zone, applicationPackage); assertTrue(tester.configServer().application(context.instanceId(), zone).get().activated(), "Application deployed and activated"); assertTrue(context.instanceJobs().isEmpty(), "No job status added"); assertEquals(DeploymentSpec.empty, context.application().deploymentSpec(), "DeploymentSpec is not stored"); Set<RoutingMethod> routingMethods = tester.controller().routing().readEndpointsOf(context.deploymentIdIn(zone)) .asList() .stream() .map(Endpoint::routingMethod) .collect(Collectors.toSet()); assertEquals(routingMethods, Set.of(RoutingMethod.sharedLayer4)); assertNotNull(tester.controllerTester().serviceRegistry().applicationStore() .getMeta(new DeploymentId(context.instanceId(), zone)) .get(tester.clock().instant())); tester.clock().advance(Duration.ofSeconds(1)); tester.controller().applications().deactivate(context.instanceId(), zone); assertArrayEquals(new byte[0], tester.controllerTester().serviceRegistry().applicationStore() .getMeta(new DeploymentId(context.instanceId(), zone)) .get(tester.clock().instant())); }
ApplicationPackage applicationPackage = ApplicationPackageBuilder.fromDeploymentXml(DeploymentSpec.empty.xmlForm());
void testDevDeployment() { ApplicationPackage applicationPackage = new ApplicationPackage(new byte[0]); var context = tester.newDeploymentContext(); ZoneId zone = ZoneId.from("dev", "us-east-1"); tester.controllerTester().zoneRegistry() .setRoutingMethod(ZoneApiMock.from(zone), RoutingMethod.sharedLayer4); context.runJob(zone, applicationPackage); assertTrue(tester.configServer().application(context.instanceId(), zone).get().activated(), "Application deployed and activated"); assertTrue(context.instanceJobs().isEmpty(), "No job status added"); assertEquals(DeploymentSpec.empty, context.application().deploymentSpec(), "DeploymentSpec is not stored"); Set<RoutingMethod> routingMethods = tester.controller().routing().readEndpointsOf(context.deploymentIdIn(zone)) .asList() .stream() .map(Endpoint::routingMethod) .collect(Collectors.toSet()); assertEquals(routingMethods, Set.of(RoutingMethod.sharedLayer4)); assertNotNull(tester.controllerTester().serviceRegistry().applicationStore() .getMeta(new DeploymentId(context.instanceId(), zone)) .get(tester.clock().instant())); tester.clock().advance(Duration.ofSeconds(1)); tester.controller().applications().deactivate(context.instanceId(), zone); assertArrayEquals(new byte[0], tester.controllerTester().serviceRegistry().applicationStore() .getMeta(new DeploymentId(context.instanceId(), zone)) .get(tester.clock().instant())); }
class ControllerTest { private final DeploymentTester tester = new DeploymentTester(); @Test void testDeployment() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .explicitEnvironment(Environment.dev, Environment.perf) .region("us-west-1") .region("us-east-3") .build(); Version version1 = tester.configServer().initialVersion(); var context = tester.newDeploymentContext(); context.submit(applicationPackage); assertEquals(ApplicationVersion.from(RevisionId.forProduction(1), DeploymentContext.defaultSourceRevision, "a@b", new Version("6.1"), Instant.ofEpochSecond(1)), context.application().revisions().get(context.instance().change().revision().get()), "Application version is known from completion of initial job"); context.runJob(systemTest); context.runJob(stagingTest); RevisionId applicationVersion = context.instance().change().revision().get(); assertTrue(applicationVersion.isProduction(), "Application version has been set during deployment"); tester.triggerJobs(); tester.clock().advance(Duration.ofSeconds(1)); context.timeOutUpgrade(productionUsWest1); assertEquals(4, context.instanceJobs().size()); tester.triggerJobs(); tester.controllerTester().createNewController(); assertNotNull(tester.controller().tenants().get(TenantName.from("tenant1"))); assertNotNull(tester.controller().applications().requireInstance(context.instanceId())); context.submit(applicationPackage); context.runJob(systemTest); context.runJob(stagingTest); context.triggerJobs().jobAborted(productionUsWest1); context.runJob(productionUsWest1); tester.triggerJobs(); context.runJob(productionUsEast3); assertEquals(4, context.instanceJobs().size()); applicationPackage = new ApplicationPackageBuilder() .instances("hellO") .build(); try { context.submit(applicationPackage); fail("Expected exception due to illegal deployment spec."); } catch (IllegalArgumentException e) { assertEquals("Invalid id 'hellO'. Tenant, application and instance names must start with a letter, may contain no more than 20 characters, and may only contain lowercase letters, digits or dashes, but no double-dashes.", e.getMessage()); } applicationPackage = new ApplicationPackageBuilder() .region("deep-space-9") .build(); try { context.submit(applicationPackage); fail("Expected exception due to illegal deployment spec."); } catch (IllegalArgumentException e) { assertEquals("Zone prod.deep-space-9 in deployment spec was not found in this system!", e.getMessage()); } applicationPackage = new ApplicationPackageBuilder() .region("us-east-3") .build(); try { assertTrue(context.instance().deployments().containsKey(ZoneId.from("prod", "us-west-1"))); context.submit(applicationPackage); fail("Expected exception due to illegal production deployment removal"); } catch (IllegalArgumentException e) { assertEquals("deployment-removal: application 'tenant.application' is deployed in us-west-1, but does not include this zone in deployment.xml. " + ValidationOverrides.toAllowMessage(ValidationId.deploymentRemoval), e.getMessage()); } assertNotNull(context.instance().deployments().get(productionUsWest1.zone()), "Zone was not removed"); applicationPackage = new ApplicationPackageBuilder() .allow(ValidationId.deploymentRemoval) .upgradePolicy("default") .region("us-east-3") .build(); context.submit(applicationPackage); assertNull(context.instance().deployments().get(productionUsWest1.zone()), "Zone was removed"); assertNull(context.instanceJobs().get(productionUsWest1), "Deployment job was removed"); assertNotNull(tester.controllerTester().serviceRegistry().applicationStore() .getMeta(context.instanceId()) .get(tester.clock().instant())); tester.clock().advance(Duration.ofSeconds(1)); context.submit(ApplicationPackage.deploymentRemoval()); tester.clock().advance(Duration.ofSeconds(1)); context.submit(ApplicationPackage.deploymentRemoval()); tester.applications().deleteApplication(context.application().id(), tester.controllerTester().credentialsFor(context.instanceId().tenant())); assertArrayEquals(new byte[0], tester.controllerTester().serviceRegistry().applicationStore() .getMeta(context.instanceId()) .get(tester.clock().instant())); assertNull(tester.controllerTester().serviceRegistry().applicationStore() .getMeta(context.deploymentIdIn(productionUsWest1.zone()))); } @Test void testGlobalRotationStatus() { var context = tester.newDeploymentContext(); var zone1 = ZoneId.from("prod", "us-west-1"); var zone2 = ZoneId.from("prod", "us-east-3"); var applicationPackage = new ApplicationPackageBuilder() .region(zone1.region()) .region(zone2.region()) .endpoint("default", "default", zone1.region().value(), zone2.region().value()) .build(); context.submit(applicationPackage).deploy(); var deployment1 = context.deploymentIdIn(zone1); DeploymentRoutingContext routingContext = tester.controller().routing().of(deployment1); RoutingStatus status1 = routingContext.routingStatus(); assertEquals(RoutingStatus.Value.in, status1.value()); routingContext.setRoutingStatus(RoutingStatus.Value.out, RoutingStatus.Agent.operator); RoutingStatus status2 = routingContext.routingStatus(); assertEquals(RoutingStatus.Value.out, status2.value()); RoutingStatus status3 = tester.controller().routing().of(context.deploymentIdIn(zone2)).routingStatus(); assertEquals(RoutingStatus.Value.in, status3.value()); } @Test void testDnsUpdatesForGlobalEndpoint() { var betaContext = tester.newDeploymentContext("tenant1", "app1", "beta"); var defaultContext = tester.newDeploymentContext("tenant1", "app1", "default"); ZoneId usWest = ZoneId.from("prod.us-west-1"); ZoneId usCentral = ZoneId.from("prod.us-central-1"); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .instances("beta,default") .endpoint("default", "foo") .region(usWest.region()) .region(usCentral.region()) .build(); tester.controllerTester().zoneRegistry().setRoutingMethod(List.of(ZoneApiMock.from(usWest), ZoneApiMock.from(usCentral)), RoutingMethod.sharedLayer4); betaContext.submit(applicationPackage).deploy(); { Collection<Deployment> betaDeployments = betaContext.instance().deployments().values(); assertFalse(betaDeployments.isEmpty()); Set<ContainerEndpoint> containerEndpoints = Set.of(new ContainerEndpoint("foo", "global", List.of("beta.app1.tenant1.global.vespa.oath.cloud", "rotation-id-01"), OptionalInt.empty(), RoutingMethod.sharedLayer4)); for (Deployment deployment : betaDeployments) { assertEquals(containerEndpoints, tester.configServer().containerEndpoints() .get(betaContext.deploymentIdIn(deployment.zone()))); } betaContext.flushDnsUpdates(); } { Collection<Deployment> defaultDeployments = defaultContext.instance().deployments().values(); assertFalse(defaultDeployments.isEmpty()); Set<ContainerEndpoint> containerEndpoints = Set.of(new ContainerEndpoint("foo", "global", List.of("app1.tenant1.global.vespa.oath.cloud", "rotation-id-02"), OptionalInt.empty(), RoutingMethod.sharedLayer4)); for (Deployment deployment : defaultDeployments) { assertEquals(containerEndpoints, tester.configServer().containerEndpoints().get(defaultContext.deploymentIdIn(deployment.zone()))); } defaultContext.flushDnsUpdates(); } Map<String, String> rotationCnames = Map.of("beta.app1.tenant1.global.vespa.oath.cloud", "rotation-fqdn-01.", "app1.tenant1.global.vespa.oath.cloud", "rotation-fqdn-02."); rotationCnames.forEach((cname, data) -> { var record = tester.controllerTester().findCname(cname); assertTrue(record.isPresent()); assertEquals(cname, record.get().name().asString()); assertEquals(data, record.get().data().asString()); }); Map<ApplicationId, Set<String>> globalDnsNamesByInstance = Map.of(betaContext.instanceId(), Set.of("beta.app1.tenant1.global.vespa.oath.cloud"), defaultContext.instanceId(), Set.of("app1.tenant1.global.vespa.oath.cloud")); globalDnsNamesByInstance.forEach((instance, dnsNames) -> { Set<String> actualDnsNames = tester.controller().routing().readDeclaredEndpointsOf(instance) .scope(Endpoint.Scope.global) .asList().stream() .map(Endpoint::dnsName) .collect(Collectors.toSet()); assertEquals(dnsNames, actualDnsNames, "Global DNS names for " + instance); }); } @Test void testDnsUpdatesForGlobalEndpointLegacySyntax() { var context = tester.newDeploymentContext("tenant1", "app1", "default"); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .globalServiceId("foo") .region("us-west-1") .region("us-central-1") .build(); context.submit(applicationPackage).deploy(); Collection<Deployment> deployments = context.instance().deployments().values(); assertFalse(deployments.isEmpty()); for (Deployment deployment : deployments) { assertEquals(Set.of("rotation-id-01", "app1.tenant1.global.vespa.oath.cloud"), tester.configServer().containerEndpointNames(context.deploymentIdIn(deployment.zone())), "Rotation names are passed to config server in " + deployment.zone()); } context.flushDnsUpdates(); assertEquals(1, tester.controllerTester().nameService().records().size()); Optional<Record> record = tester.controllerTester().findCname("app1.tenant1.global.vespa.oath.cloud"); assertTrue(record.isPresent()); assertEquals("app1.tenant1.global.vespa.oath.cloud", record.get().name().asString()); assertEquals("rotation-fqdn-01.", record.get().data().asString()); List<String> globalDnsNames = tester.controller().routing().readDeclaredEndpointsOf(context.instanceId()) .scope(Endpoint.Scope.global) .sortedBy(Comparator.comparing(Endpoint::dnsName)) .mapToList(Endpoint::dnsName); assertEquals(List.of("app1.tenant1.global.vespa.oath.cloud"), globalDnsNames); } @Test void testDnsUpdatesForMultipleGlobalEndpoints() { var context = tester.newDeploymentContext("tenant1", "app1", "default"); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .endpoint("foobar", "qrs", "us-west-1", "us-central-1") .endpoint("default", "qrs", "us-west-1", "us-central-1") .endpoint("all", "qrs") .endpoint("west", "qrs", "us-west-1") .region("us-west-1") .region("us-central-1") .build(); context.submit(applicationPackage).deploy(); Collection<Deployment> deployments = context.instance().deployments().values(); assertFalse(deployments.isEmpty()); var notWest = Set.of( "rotation-id-01", "foobar.app1.tenant1.global.vespa.oath.cloud", "rotation-id-02", "app1.tenant1.global.vespa.oath.cloud", "rotation-id-03", "all.app1.tenant1.global.vespa.oath.cloud" ); var west = Sets.union(notWest, Set.of("rotation-id-04", "west.app1.tenant1.global.vespa.oath.cloud")); for (Deployment deployment : deployments) { assertEquals(ZoneId.from("prod.us-west-1").equals(deployment.zone()) ? west : notWest, tester.configServer().containerEndpointNames(context.deploymentIdIn(deployment.zone())), "Rotation names are passed to config server in " + deployment.zone()); } context.flushDnsUpdates(); assertEquals(4, tester.controllerTester().nameService().records().size()); var record1 = tester.controllerTester().findCname("app1.tenant1.global.vespa.oath.cloud"); assertTrue(record1.isPresent()); assertEquals("app1.tenant1.global.vespa.oath.cloud", record1.get().name().asString()); assertEquals("rotation-fqdn-02.", record1.get().data().asString()); var record2 = tester.controllerTester().findCname("foobar.app1.tenant1.global.vespa.oath.cloud"); assertTrue(record2.isPresent()); assertEquals("foobar.app1.tenant1.global.vespa.oath.cloud", record2.get().name().asString()); assertEquals("rotation-fqdn-01.", record2.get().data().asString()); var record3 = tester.controllerTester().findCname("all.app1.tenant1.global.vespa.oath.cloud"); assertTrue(record3.isPresent()); assertEquals("all.app1.tenant1.global.vespa.oath.cloud", record3.get().name().asString()); assertEquals("rotation-fqdn-03.", record3.get().data().asString()); var record4 = tester.controllerTester().findCname("west.app1.tenant1.global.vespa.oath.cloud"); assertTrue(record4.isPresent()); assertEquals("west.app1.tenant1.global.vespa.oath.cloud", record4.get().name().asString()); assertEquals("rotation-fqdn-04.", record4.get().data().asString()); } @Test void testDnsUpdatesForGlobalEndpointChanges() { var context = tester.newDeploymentContext("tenant1", "app1", "default"); var west = ZoneId.from("prod", "us-west-1"); var central = ZoneId.from("prod", "us-central-1"); var east = ZoneId.from("prod", "us-east-3"); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .endpoint("default", "qrs", west.region().value(), central.region().value()) .region(west.region().value()) .region(central.region().value()) .region(east.region().value()) .build(); context.submit(applicationPackage).deploy(); for (var zone : List.of(west, central)) { assertEquals( Set.of("rotation-id-01", "app1.tenant1.global.vespa.oath.cloud"), tester.configServer().containerEndpointNames(context.deploymentIdIn(zone)) , "Zone " + zone + " is a member of global endpoint"); } ApplicationPackage applicationPackage2 = new ApplicationPackageBuilder() .endpoint("default", "qrs", west.region().value(), central.region().value()) .endpoint("east", "qrs", east.region().value()) .region(west.region().value()) .region(central.region().value()) .region(east.region().value()) .build(); context.submit(applicationPackage2).deploy(); for (var zone : List.of(west, central)) { assertEquals( Set.of("rotation-id-01", "app1.tenant1.global.vespa.oath.cloud"), tester.configServer().containerEndpointNames(context.deploymentIdIn(zone)) , "Zone " + zone + " is a member of global endpoint"); } assertEquals( Set.of("rotation-id-02", "east.app1.tenant1.global.vespa.oath.cloud"), tester.configServer().containerEndpointNames(context.deploymentIdIn(east)) , "Zone " + east + " is a member of global endpoint"); ApplicationPackage applicationPackage3 = new ApplicationPackageBuilder() .endpoint("default", "qrs", west.region().value(), central.region().value(), east.region().value()) .endpoint("east", "qrs", east.region().value()) .region(west.region().value()) .region(central.region().value()) .region(east.region().value()) .build(); context.submit(applicationPackage3).deploy(); for (var zone : List.of(west, central, east)) { assertEquals( zone.equals(east) ? Set.of("rotation-id-01", "app1.tenant1.global.vespa.oath.cloud", "rotation-id-02", "east.app1.tenant1.global.vespa.oath.cloud") : Set.of("rotation-id-01", "app1.tenant1.global.vespa.oath.cloud"), tester.configServer().containerEndpointNames(context.deploymentIdIn(zone)) , "Zone " + zone + " is a member of global endpoint"); } ApplicationPackage applicationPackage4 = new ApplicationPackageBuilder() .endpoint("default", "qrs", west.region().value(), central.region().value()) .endpoint("east", "qrs", east.region().value()) .region(west.region().value()) .region(central.region().value()) .region(east.region().value()) .build(); try { context.submit(applicationPackage4); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("global-endpoint-change: application 'tenant1.app1' has endpoints " + "[endpoint 'default' (cluster qrs) -> us-central-1, us-east-3, us-west-1, endpoint 'east' (cluster qrs) -> us-east-3], " + "but does not include all of these in deployment.xml. Deploying given deployment.xml " + "will remove [endpoint 'default' (cluster qrs) -> us-central-1, us-east-3, us-west-1] " + "and add [endpoint 'default' (cluster qrs) -> us-central-1, us-west-1]. " + ValidationOverrides.toAllowMessage(ValidationId.globalEndpointChange), e.getMessage()); } ApplicationPackage applicationPackage5 = new ApplicationPackageBuilder() .endpoint("east", "qrs", east.region().value()) .region(west.region().value()) .region(central.region().value()) .region(east.region().value()) .build(); try { context.submit(applicationPackage5); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("global-endpoint-change: application 'tenant1.app1' has endpoints " + "[endpoint 'default' (cluster qrs) -> us-central-1, us-east-3, us-west-1, endpoint 'east' (cluster qrs) -> us-east-3], " + "but does not include all of these in deployment.xml. Deploying given deployment.xml " + "will remove [endpoint 'default' (cluster qrs) -> us-central-1, us-east-3, us-west-1]. " + ValidationOverrides.toAllowMessage(ValidationId.globalEndpointChange), e.getMessage()); } ApplicationPackage applicationPackage6 = new ApplicationPackageBuilder() .endpoint("east", "qrs", east.region().value()) .region(west.region().value()) .region(central.region().value()) .region(east.region().value()) .allow(ValidationId.globalEndpointChange) .build(); context.submit(applicationPackage6); } @Test void testUnassignRotations() { var context = tester.newDeploymentContext(); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .endpoint("default", "qrs", "us-west-1", "us-central-1") .region("us-west-1") .region("us-central-1") .build(); context.submit(applicationPackage).deploy(); ApplicationPackage applicationPackage2 = new ApplicationPackageBuilder() .region("us-west-1") .region("us-central-1") .allow(ValidationId.globalEndpointChange) .build(); context.submit(applicationPackage2).deploy(); assertEquals(List.of(), context.instance().rotations()); assertEquals( Set.of(), tester.configServer().containerEndpoints().get(context.deploymentIdIn(ZoneId.from("prod", "us-west-1"))) ); } @Test void testDnsUpdatesWithChangeInRotationAssignment() { String dnsName1 = "app1.tenant1.global.vespa.oath.cloud"; { var context = tester.newDeploymentContext("tenant1", "app1", "default"); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .endpoint("default", "foo") .region("us-west-1") .region("us-central-1") .build(); context.submit(applicationPackage).deploy(); assertEquals(1, tester.controllerTester().nameService().records().size()); { Optional<Record> record = tester.controllerTester().findCname(dnsName1); assertTrue(record.isPresent()); assertEquals(dnsName1, record.get().name().asString()); assertEquals("rotation-fqdn-01.", record.get().data().asString()); } applicationPackage = new ApplicationPackageBuilder() .allow(ValidationId.deploymentRemoval) .allow(ValidationId.globalEndpointChange) .build(); context.submit(applicationPackage); tester.applications().deleteApplication(context.application().id(), tester.controllerTester().credentialsFor(context.application().id().tenant())); try (RotationLock lock = tester.controller().routing().rotations().lock()) { assertTrue(tester.controller().routing().rotations().availableRotations(lock) .containsKey(new RotationId("rotation-id-01")), "Rotation is unassigned"); } context.flushDnsUpdates(); Optional<Record> record = tester.controllerTester().findCname(dnsName1); assertTrue(record.isEmpty(), dnsName1 + " is removed"); } String dnsName2 = "app2.tenant2.global.vespa.oath.cloud"; { var context = tester.newDeploymentContext("tenant2", "app2", "default"); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .endpoint("default", "foo") .region("us-west-1") .region("us-central-1") .build(); context.submit(applicationPackage).deploy(); assertEquals(1, tester.controllerTester().nameService().records().size()); var record = tester.controllerTester().findCname(dnsName2); assertTrue(record.isPresent()); assertEquals(dnsName2, record.get().name().asString()); assertEquals("rotation-fqdn-01.", record.get().data().asString()); } { var context = tester.newDeploymentContext("tenant1", "app1", "default"); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .endpoint("default", "foo") .region("us-west-1") .region("us-central-1") .build(); context.submit(applicationPackage).deploy(); assertEquals("rotation-id-02", context.instance().rotations().get(0).rotationId().asString()); assertEquals(2, tester.controllerTester().nameService().records().size()); var record1 = tester.controllerTester().findCname(dnsName1); assertTrue(record1.isPresent()); assertEquals("rotation-fqdn-02.", record1.get().data().asString()); var record2 = tester.controllerTester().findCname(dnsName2); assertTrue(record2.isPresent()); assertEquals("rotation-fqdn-01.", record2.get().data().asString()); } } @Test void testDnsUpdatesForApplicationEndpoint() { ApplicationId beta = ApplicationId.from("tenant1", "app1", "beta"); ApplicationId main = ApplicationId.from("tenant1", "app1", "main"); var context = tester.newDeploymentContext(beta); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .instances("beta,main") .region("us-west-1") .region("us-east-3") .applicationEndpoint("a", "default", "us-west-1", Map.of(beta.instance(), 2, main.instance(), 8)) .applicationEndpoint("b", "default", "us-west-1", Map.of(beta.instance(), 1, main.instance(), 1)) .applicationEndpoint("c", "default", "us-east-3", Map.of(beta.instance(), 4, main.instance(), 6)) .build(); context.submit(applicationPackage).deploy(); ZoneId usWest = ZoneId.from("prod", "us-west-1"); ZoneId usEast = ZoneId.from("prod", "us-east-3"); Map<DeploymentId, Map<String, Integer>> deploymentEndpoints = Map.of( new DeploymentId(beta, usWest), Map.of("a.app1.tenant1.us-west-1-r.vespa.oath.cloud", 2, "b.app1.tenant1.us-west-1-r.vespa.oath.cloud", 1), new DeploymentId(main, usWest), Map.of("a.app1.tenant1.us-west-1-r.vespa.oath.cloud", 8, "b.app1.tenant1.us-west-1-r.vespa.oath.cloud", 1), new DeploymentId(beta, usEast), Map.of("c.app1.tenant1.us-east-3-r.vespa.oath.cloud", 4), new DeploymentId(main, usEast), Map.of("c.app1.tenant1.us-east-3-r.vespa.oath.cloud", 6) ); deploymentEndpoints.forEach((deployment, endpoints) -> { Set<ContainerEndpoint> expected = endpoints.entrySet().stream() .map(kv -> new ContainerEndpoint("default", "application", List.of(kv.getKey()), OptionalInt.of(kv.getValue()), RoutingMethod.sharedLayer4)) .collect(Collectors.toSet()); assertEquals(expected, tester.configServer().containerEndpoints().get(deployment), "Endpoint names for " + deployment + " are passed to config server"); }); context.flushDnsUpdates(); Set<Record> records = tester.controllerTester().nameService().records(); assertEquals(Set.of(new Record(Record.Type.CNAME, RecordName.from("a.app1.tenant1.us-west-1-r.vespa.oath.cloud"), RecordData.from("vip.prod.us-west-1.")), new Record(Record.Type.CNAME, RecordName.from("b.app1.tenant1.us-west-1-r.vespa.oath.cloud"), RecordData.from("vip.prod.us-west-1.")), new Record(Record.Type.CNAME, RecordName.from("c.app1.tenant1.us-east-3-r.vespa.oath.cloud"), RecordData.from("vip.prod.us-east-3."))), records); List<String> endpointDnsNames = tester.controller().routing().declaredEndpointsOf(context.application()) .scope(Endpoint.Scope.application) .mapToList(Endpoint::dnsName); assertEquals(List.of("a.app1.tenant1.us-west-1-r.vespa.oath.cloud", "b.app1.tenant1.us-west-1-r.vespa.oath.cloud", "c.app1.tenant1.us-east-3-r.vespa.oath.cloud"), endpointDnsNames); } @Test @Test void testDevDeploymentWithIncompatibleVersions() { Version version1 = new Version("7"); Version version2 = new Version("7.5"); Version version3 = new Version("8"); var context = tester.newDeploymentContext(); tester.controllerTester().flagSource().withListFlag(PermanentFlags.INCOMPATIBLE_VERSIONS.id(), List.of("8"), String.class); tester.controllerTester().upgradeSystem(version2); ZoneId zone = ZoneId.from("dev", "us-east-1"); context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version1).build()); assertEquals(version2, context.deployment(zone).version()); assertEquals(Optional.of(version1), context.application().revisions().get(context.deployment(zone).revision()).compileVersion()); try { context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version1).majorVersion(8).build()); fail("Should fail when specifying a major that does not yet exist"); } catch (IllegalArgumentException e) { assertEquals("no platforms were found for major version 8 specified in deployment.xml", e.getMessage()); } try { context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version3).build()); fail("Should fail when compiled against a version which is only compatible with not-yet-existent versions"); } catch (IllegalArgumentException e) { assertEquals("no platforms are compatible with compile version 8", e.getMessage()); } tester.controllerTester().upgradeSystem(version3); try { context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version1).majorVersion(8).build()); fail("Should fail when specifying a major which is incompatible with compile version"); } catch (IllegalArgumentException e) { assertEquals("no platforms on major version 8 specified in deployment.xml are compatible with compile version 7", e.getMessage()); } context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version3).majorVersion(8).build()); assertEquals(version3, context.deployment(zone).version()); assertEquals(Optional.of(version3), context.application().revisions().get(context.deployment(zone).revision()).compileVersion()); context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version3).build()); assertEquals(version3, context.deployment(zone).version()); assertEquals(Optional.of(version3), context.application().revisions().get(context.deployment(zone).revision()).compileVersion()); } @Test void testSuspension() { var context = tester.newDeploymentContext(); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .region("us-west-1") .region("us-east-3") .build(); context.submit(applicationPackage).deploy(); DeploymentId deployment1 = context.deploymentIdIn(ZoneId.from(Environment.prod, RegionName.from("us-west-1"))); DeploymentId deployment2 = context.deploymentIdIn(ZoneId.from(Environment.prod, RegionName.from("us-east-3"))); assertFalse(tester.configServer().isSuspended(deployment1)); assertFalse(tester.configServer().isSuspended(deployment2)); tester.configServer().setSuspension(deployment1, true); assertTrue(tester.configServer().isSuspended(deployment1)); assertFalse(tester.configServer().isSuspended(deployment2)); } @Test void testDeletingApplicationThatHasAlreadyBeenDeleted() { var context = tester.newDeploymentContext(); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .region("us-west-1") .build(); ZoneId zone = ZoneId.from(Environment.prod, RegionName.from("us-west-1")); context.submit(applicationPackage).runJob(zone, applicationPackage); tester.controller().applications().deactivate(context.instanceId(), zone); tester.controller().applications().deactivate(context.instanceId(), zone); } @Test void testDeployApplicationWithWarnings() { var context = tester.newDeploymentContext(); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .region("us-west-1") .build(); ZoneId zone = ZoneId.from("prod", "us-west-1"); int warnings = 3; tester.configServer().generateWarnings(context.deploymentIdIn(zone), warnings); context.submit(applicationPackage).deploy(); assertEquals(warnings, context.deployment(zone) .metrics().warnings().get(DeploymentMetrics.Warning.all).intValue()); } @Test void testDeploySelectivelyProvisionsCertificate() { Function<Instance, Optional<EndpointCertificateMetadata>> certificate = (application) -> tester.controller().curator().readEndpointCertificateMetadata(application.id()); var context1 = tester.newDeploymentContext("tenant1", "app1", "default"); var prodZone = ZoneId.from("prod", "us-west-1"); var stagingZone = ZoneId.from("staging", "us-east-3"); var testZone = ZoneId.from("test", "us-east-1"); tester.controllerTester().zoneRegistry().exclusiveRoutingIn(ZoneApiMock.from(prodZone)); var applicationPackage = new ApplicationPackageBuilder().athenzIdentity(AthenzDomain.from("domain"), AthenzService.from("service")) .region(prodZone.region()) .build(); context1.submit(applicationPackage).deploy(); var cert = certificate.apply(context1.instance()); assertTrue(cert.isPresent(), "Provisions certificate in " + Environment.prod); assertEquals(Stream.concat(Stream.of("vznqtz7a5ygwjkbhhj7ymxvlrekgt4l6g.vespa.oath.cloud", "app1.tenant1.global.vespa.oath.cloud", "*.app1.tenant1.global.vespa.oath.cloud"), Stream.of(prodZone, testZone, stagingZone) .flatMap(zone -> Stream.of("", "*.") .map(prefix -> prefix + "app1.tenant1." + zone.region().value() + (zone.environment() == Environment.prod ? "" : "." + zone.environment().value()) + ".vespa.oath.cloud"))) .collect(Collectors.toUnmodifiableSet()), Set.copyOf(tester.controllerTester().serviceRegistry().endpointCertificateMock().dnsNamesOf(context1.instanceId()))); context1.submit(applicationPackage).deploy(); assertEquals(cert, certificate.apply(context1.instance())); var context2 = tester.newDeploymentContext("tenant1", "app2", "default"); var devZone = ZoneId.from("dev", "us-east-1"); context2.runJob(devZone, applicationPackage); assertTrue(tester.configServer().application(context2.instanceId(), devZone).get().activated(), "Application deployed and activated"); assertTrue(certificate.apply(context2.instance()).isPresent(), "Provisions certificate also in zone with routing layer"); } @Test void testDeployWithGlobalEndpointsInMultipleClouds() { tester.controllerTester().zoneRegistry().setZones( ZoneApiMock.fromId("test.us-west-1"), ZoneApiMock.fromId("staging.us-west-1"), ZoneApiMock.fromId("prod.us-west-1"), ZoneApiMock.newBuilder().with(CloudName.AWS).withId("prod.aws-us-east-1").build() ); var context = tester.newDeploymentContext(); var applicationPackage = new ApplicationPackageBuilder() .region("aws-us-east-1") .region("us-west-1") .endpoint("default", "default") .build(); try { context.submit(applicationPackage); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("Endpoint 'default' in instance 'default' cannot contain regions in different clouds: [aws-us-east-1, us-west-1]", e.getMessage()); } var applicationPackage2 = new ApplicationPackageBuilder() .region("aws-us-east-1") .region("us-west-1") .endpoint("aws", "default", "aws-us-east-1") .endpoint("foo", "default", "aws-us-east-1", "us-west-1") .build(); try { context.submit(applicationPackage2); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("Endpoint 'foo' in instance 'default' cannot contain regions in different clouds: [aws-us-east-1, us-west-1]", e.getMessage()); } } @Test void testDeployWithGlobalEndpointsInGcp() { tester.controllerTester().zoneRegistry().setZones( ZoneApiMock.fromId("test.us-west-1"), ZoneApiMock.fromId("staging.us-west-1"), ZoneApiMock.newBuilder().with(CloudName.GCP).withId("prod.gcp-us-east1-b").build() ); var context = tester.newDeploymentContext(); var applicationPackage = new ApplicationPackageBuilder() .region("gcp-us-east1-b") .endpoint("default", "default") .build(); try { context.submit(applicationPackage); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("Endpoint 'default' in instance 'default' contains a Google Cloud region (gcp-us-east1-b), which is not yet supported", e.getMessage()); } var applicationPackage2 = new ApplicationPackageBuilder() .region("gcp-us-east1-b") .endpoint("gcp", "default", "gcp-us-east1-b") .build(); try { context.submit(applicationPackage2); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("Endpoint 'gcp' in instance 'default' contains a Google Cloud region (gcp-us-east1-b), which is not yet supported", e.getMessage()); } } @Test void testDeployWithoutSourceRevision() { var context = tester.newDeploymentContext(); var applicationPackage = new ApplicationPackageBuilder() .upgradePolicy("default") .region("us-west-1") .build(); context.submit(applicationPackage, Optional.empty()) .deploy(); assertEquals(1, context.instance().deployments().size(), "Deployed application"); } @Test void testDeployWithGlobalEndpointsAndMultipleRoutingMethods() { var context = tester.newDeploymentContext(); var zone1 = ZoneId.from("prod", "us-west-1"); var zone2 = ZoneId.from("prod", "us-east-3"); var applicationPackage = new ApplicationPackageBuilder() .athenzIdentity(AthenzDomain.from("domain"), AthenzService.from("service")) .endpoint("default", "default", zone1.region().value(), zone2.region().value()) .endpoint("east", "default", zone2.region().value()) .region(zone1.region()) .region(zone2.region()) .build(); tester.controllerTester().zoneRegistry().setRoutingMethod(ZoneApiMock.from(zone1), RoutingMethod.sharedLayer4); tester.controllerTester().zoneRegistry().setRoutingMethod(ZoneApiMock.from(zone2), RoutingMethod.exclusive); context.submit(applicationPackage).deploy(); var expectedRecords = List.of( new Record(Record.Type.ALIAS, RecordName.from("application.tenant.us-east-3-w.vespa.oath.cloud"), new WeightedAliasTarget(HostName.of("lb-0--tenant.application.default--prod.us-east-3"), "dns-zone-1", ZoneId.from("prod.us-east-3"), 1).pack()), new Record(Record.Type.ALIAS, RecordName.from("east.application.tenant.global.vespa.oath.cloud"), new LatencyAliasTarget(HostName.of("application.tenant.us-east-3-w.vespa.oath.cloud"), "dns-zone-1", ZoneId.from("prod.us-east-3")).pack()), new Record(Record.Type.CNAME, RecordName.from("application.tenant.us-east-3.vespa.oath.cloud"), RecordData.from("lb-0--tenant.application.default--prod.us-east-3."))); assertEquals(expectedRecords, List.copyOf(tester.controllerTester().nameService().records())); } @Test void testDeploymentDirectRouting() { DeploymentTester tester = new DeploymentTester(new ControllerTester(new RotationsConfig.Builder().build(), main)); var context = tester.newDeploymentContext(); var zone1 = ZoneId.from("prod", "us-west-1"); var zone2 = ZoneId.from("prod", "us-east-3"); var zone3 = ZoneId.from("prod", "eu-west-1"); tester.controllerTester().zoneRegistry() .exclusiveRoutingIn(ZoneApiMock.from(zone1), ZoneApiMock.from(zone2), ZoneApiMock.from(zone3)); var applicationPackageBuilder = new ApplicationPackageBuilder() .region(zone1.region()) .region(zone2.region()) .region(zone3.region()) .endpoint("default", "default") .endpoint("foo", "qrs") .endpoint("us", "default", zone1.region().value(), zone2.region().value()) .athenzIdentity(AthenzDomain.from("domain"), AthenzService.from("service")); context.submit(applicationPackageBuilder.build()).deploy(); for (var zone : List.of(zone1, zone2)) { assertEquals(Set.of("application.tenant.global.vespa.oath.cloud", "foo.application.tenant.global.vespa.oath.cloud", "us.application.tenant.global.vespa.oath.cloud"), tester.configServer().containerEndpointNames(context.deploymentIdIn(zone)), "Expected container endpoints in " + zone); } assertEquals(Set.of("application.tenant.global.vespa.oath.cloud", "foo.application.tenant.global.vespa.oath.cloud"), tester.configServer().containerEndpointNames(context.deploymentIdIn(zone3)), "Expected container endpoints in " + zone3); } @Test void testChangeEndpointCluster() { var context = tester.newDeploymentContext(); var west = ZoneId.from("prod", "us-west-1"); var east = ZoneId.from("prod", "us-east-3"); var applicationPackage = new ApplicationPackageBuilder() .endpoint("default", "foo") .region(west.region().value()) .region(east.region().value()) .build(); context.submit(applicationPackage).deploy(); assertEquals(ClusterSpec.Id.from("foo"), tester.applications().requireInstance(context.instanceId()) .rotations().get(0).clusterId()); applicationPackage = new ApplicationPackageBuilder() .endpoint("default", "bar") .region(west.region().value()) .region(east.region().value()) .build(); try { context.submit(applicationPackage).deploy(); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("global-endpoint-change: application 'tenant.application' has endpoints [endpoint " + "'default' (cluster foo) -> us-east-3, us-west-1], but does not include all of these in " + "deployment.xml. Deploying given deployment.xml will remove " + "[endpoint 'default' (cluster foo) -> us-east-3, us-west-1] and add " + "[endpoint 'default' (cluster bar) -> us-east-3, us-west-1]. To allow this add " + "<allow until='yyyy-mm-dd'>global-endpoint-change</allow> to validation-overrides.xml, see " + "https: } applicationPackage = new ApplicationPackageBuilder() .endpoint("default", "bar") .region(west.region().value()) .region(east.region().value()) .allow(ValidationId.globalEndpointChange) .build(); context.submit(applicationPackage).deploy(); assertEquals(ClusterSpec.Id.from("bar"), tester.applications().requireInstance(context.instanceId()) .rotations().get(0).clusterId()); } @Test void testReadableApplications() { var db = new MockCuratorDb(tester.controller().system()); var tester = new DeploymentTester(new ControllerTester(db)); var app1 = tester.newDeploymentContext("t1", "a1", "default") .submit() .deploy(); var app2 = tester.newDeploymentContext("t2", "a2", "default") .submit() .deploy(); assertEquals(2, tester.applications().readable().size()); db.curator().set(Path.fromString("/controller/v1/applications/" + app2.application().id().serialized()), new byte[]{(byte) 0xDE, (byte) 0xAD}); assertEquals(1, tester.applications().readable().size()); try { tester.applications().asList(); fail("Expected exception"); } catch (Exception ignored) { } app1.submit().deploy(); } @Test void testClashingEndpointIdAndInstanceName() { String deploymentXml = "<deployment version='1.0' athenz-domain='domain' athenz-service='service'>\n" + " <instance id=\"default\">\n" + " <prod>\n" + " <region active=\"true\">us-west-1</region>\n" + " </prod>\n" + " <endpoints>\n" + " <endpoint id=\"dev\" container-id=\"qrs\"/>\n" + " </endpoints>\n" + " </instance>\n" + " <instance id=\"dev\">\n" + " <prod>\n" + " <region active=\"true\">us-west-1</region>\n" + " </prod>\n" + " <endpoints>\n" + " <endpoint id=\"default\" container-id=\"qrs\"/>\n" + " </endpoints>\n" + " </instance>\n" + "</deployment>\n"; ApplicationPackage applicationPackage = ApplicationPackageBuilder.fromDeploymentXml(deploymentXml); try { tester.newDeploymentContext().submit(applicationPackage); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("Endpoint with ID 'default' in instance 'dev' clashes with endpoint 'dev' in instance 'default'", e.getMessage()); } } @Test void testTestPackageWarnings() { String deploymentXml = "<deployment version='1.0'>\n" + " <prod>\n" + " <region>us-west-1</region>\n" + " </prod>\n" + "</deployment>\n"; ApplicationPackage applicationPackage = ApplicationPackageBuilder.fromDeploymentXml(deploymentXml); byte[] testPackage = ApplicationPackage.filesZip(Map.of("tests/staging-test/foo.json", new byte[0])); var app = tester.newDeploymentContext(); tester.jobs().submit(app.application().id(), Submission.basic(applicationPackage, testPackage), 1); assertEquals(List.of(new Notification(tester.clock().instant(), Type.testPackage, Level.warning, NotificationSource.from(app.application().id()), List.of("test package has staging tests, so it should also include staging setup", "see https: tester.controller().notificationsDb().listNotifications(NotificationSource.from(app.application().id()), true)); } @Test void testCompileVersion() { DeploymentContext context = tester.newDeploymentContext(); ApplicationPackage applicationPackage = new ApplicationPackageBuilder().region("us-west-1").build(); TenantAndApplicationId application = TenantAndApplicationId.from(context.instanceId()); Version version0 = Version.fromString("7.1"); tester.controllerTester().upgradeSystem(version0); tester.upgrader().overrideConfidence(version0, Confidence.normal); tester.controllerTester().computeVersionStatus(); assertEquals(version0, tester.applications().compileVersion(application, OptionalInt.of(7))); assertEquals(version0, tester.applications().compileVersion(application, OptionalInt.empty())); assertEquals("this system has no available versions on specified major: 8", assertThrows(IllegalArgumentException.class, () -> tester.applications().compileVersion(application, OptionalInt.of(8))) .getMessage()); context.submit(applicationPackage).deploy(); Version version1 = Version.fromString("7.2"); tester.controllerTester().upgradeSystem(version1); tester.upgrader().overrideConfidence(version1, Confidence.normal); tester.controllerTester().computeVersionStatus(); assertEquals(version0, tester.applications().compileVersion(application, OptionalInt.empty())); tester.upgrader().maintain(); context.deployPlatform(version1); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty())); DeploymentContext legacyApp = tester.newDeploymentContext("avoid", "gc", "default").submit().deploy(); TenantAndApplicationId newApp = TenantAndApplicationId.from("new", "app"); Version version2 = Version.fromString("8.0"); tester.controllerTester().upgradeSystem(version2); tester.upgrader().overrideConfidence(version2, Confidence.low); tester.controllerTester().computeVersionStatus(); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty())); assertEquals("this system has no available versions on specified major: 8", assertThrows(IllegalArgumentException.class, () -> tester.applications().compileVersion(application, OptionalInt.of(8))) .getMessage()); tester.upgrader().overrideConfidence(version2, Confidence.normal); tester.controllerTester().computeVersionStatus(); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty())); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(8))); assertEquals(version2, tester.applications().compileVersion(newApp, OptionalInt.empty())); tester.controllerTester().flagSource().withListFlag(PermanentFlags.INCOMPATIBLE_VERSIONS.id(), List.of("8"), String.class); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7))); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty())); assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.of(8))); assertEquals(version2, tester.applications().compileVersion(newApp, OptionalInt.empty())); tester.upgrader().overrideConfidence(version2, Confidence.low); tester.controllerTester().computeVersionStatus(); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7))); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty())); assertEquals("this system has no available versions on specified major: 8", assertThrows(IllegalArgumentException.class, () -> tester.applications().compileVersion(application, OptionalInt.of(8))) .getMessage()); assertEquals(version1, tester.applications().compileVersion(newApp, OptionalInt.empty())); assertEquals(version1, tester.applications().compileVersion(newApp, OptionalInt.empty())); tester.upgrader().overrideConfidence(version2, Confidence.normal); tester.controllerTester().computeVersionStatus(); context.submit(new ApplicationPackageBuilder().region("us-west-1").compileVersion(version2).build()).deploy(); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7))); assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.empty())); tester.upgrader().overrideConfidence(version2, Confidence.low); tester.controllerTester().computeVersionStatus(); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7))); assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.empty())); assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.of(8))); tester.upgrader().overrideConfidence(version2, Confidence.broken); tester.controllerTester().computeVersionStatus(); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7))); assertEquals("no suitable, released compile version exists", assertThrows(IllegalArgumentException.class, () -> tester.applications().compileVersion(application, OptionalInt.empty())) .getMessage()); assertEquals("no suitable, released compile version exists for specified major: 8", assertThrows(IllegalArgumentException.class, () -> tester.applications().compileVersion(application, OptionalInt.of(8))) .getMessage()); tester.controllerTester().flagSource().withListFlag(PermanentFlags.INCOMPATIBLE_VERSIONS.id(), List.of(), String.class); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7))); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty())); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(8))); tester.upgrader().overrideConfidence(version2, Confidence.low); tester.controllerTester().computeVersionStatus(); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7))); assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.empty())); assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.of(8))); } @Test void testCloudAccount() { DeploymentContext context = tester.newDeploymentContext(); ZoneId devZone = devUsEast1.zone(); ZoneId prodZone = productionUsWest1.zone(); String cloudAccount = "012345678912"; var applicationPackage = new ApplicationPackageBuilder() .cloudAccount(cloudAccount) .region(prodZone.region()) .build(); context.submit(applicationPackage).runJobExpectingFailure(systemTest, "Requested cloud account '012345678912' is not valid for tenant 'tenant'"); tester.controllerTester().flagSource().withListFlag(PermanentFlags.CLOUD_ACCOUNTS.id(), List.of(cloudAccount), String.class); context.runJobExpectingFailure(systemTest, "Zone test.us-east-1 is not configured in requested cloud account '012345678912'") .abortJob(stagingTest); tester.controllerTester().zoneRegistry().configureCloudAccount(new CloudAccount(cloudAccount), systemTest.zone(), stagingTest.zone(), prodZone); context.submit(applicationPackage).deploy(); tester.controllerTester().zoneRegistry().configureCloudAccount(new CloudAccount(cloudAccount), devZone); context.runJob(devZone, applicationPackage); for (var zoneId : List.of(systemTest.zone(), stagingTest.zone(), devZone, prodZone)) { assertEquals(cloudAccount, tester.controllerTester().configServer() .cloudAccount(context.deploymentIdIn(zoneId)) .get().value()); } } @Test void testSubmitWithElementDeprecatedOnPreviousMajor() { DeploymentContext context = tester.newDeploymentContext(); var applicationPackage = new ApplicationPackageBuilder() .compileVersion(Version.fromString("8.1")) .region("us-west-1") .globalServiceId("qrs") .build(); try { context.submit(applicationPackage).deploy(); fail("Expected exception"); } catch (IllegalArgumentException e) { assertTrue(e.getMessage().contains("Element 'prod' contains attribute 'global-service-id' deprecated since major version 7")); } } }
class ControllerTest { private final DeploymentTester tester = new DeploymentTester(); @Test void testDeployment() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .explicitEnvironment(Environment.dev, Environment.perf) .region("us-west-1") .region("us-east-3") .build(); Version version1 = tester.configServer().initialVersion(); var context = tester.newDeploymentContext(); context.submit(applicationPackage); assertEquals(ApplicationVersion.from(RevisionId.forProduction(1), DeploymentContext.defaultSourceRevision, "a@b", new Version("6.1"), Instant.ofEpochSecond(1)), context.application().revisions().get(context.instance().change().revision().get()), "Application version is known from completion of initial job"); context.runJob(systemTest); context.runJob(stagingTest); RevisionId applicationVersion = context.instance().change().revision().get(); assertTrue(applicationVersion.isProduction(), "Application version has been set during deployment"); tester.triggerJobs(); tester.clock().advance(Duration.ofSeconds(1)); context.timeOutUpgrade(productionUsWest1); assertEquals(4, context.instanceJobs().size()); tester.triggerJobs(); tester.controllerTester().createNewController(); assertNotNull(tester.controller().tenants().get(TenantName.from("tenant1"))); assertNotNull(tester.controller().applications().requireInstance(context.instanceId())); context.submit(applicationPackage); context.runJob(systemTest); context.runJob(stagingTest); context.triggerJobs().jobAborted(productionUsWest1); context.runJob(productionUsWest1); tester.triggerJobs(); context.runJob(productionUsEast3); assertEquals(4, context.instanceJobs().size()); applicationPackage = new ApplicationPackageBuilder() .instances("hellO") .build(); try { context.submit(applicationPackage); fail("Expected exception due to illegal deployment spec."); } catch (IllegalArgumentException e) { assertEquals("Invalid id 'hellO'. Tenant, application and instance names must start with a letter, may contain no more than 20 characters, and may only contain lowercase letters, digits or dashes, but no double-dashes.", e.getMessage()); } applicationPackage = new ApplicationPackageBuilder() .region("deep-space-9") .build(); try { context.submit(applicationPackage); fail("Expected exception due to illegal deployment spec."); } catch (IllegalArgumentException e) { assertEquals("Zone prod.deep-space-9 in deployment spec was not found in this system!", e.getMessage()); } applicationPackage = new ApplicationPackageBuilder() .region("us-east-3") .build(); try { assertTrue(context.instance().deployments().containsKey(ZoneId.from("prod", "us-west-1"))); context.submit(applicationPackage); fail("Expected exception due to illegal production deployment removal"); } catch (IllegalArgumentException e) { assertEquals("deployment-removal: application 'tenant.application' is deployed in us-west-1, but does not include this zone in deployment.xml. " + ValidationOverrides.toAllowMessage(ValidationId.deploymentRemoval), e.getMessage()); } assertNotNull(context.instance().deployments().get(productionUsWest1.zone()), "Zone was not removed"); applicationPackage = new ApplicationPackageBuilder() .allow(ValidationId.deploymentRemoval) .upgradePolicy("default") .region("us-east-3") .build(); context.submit(applicationPackage); assertNull(context.instance().deployments().get(productionUsWest1.zone()), "Zone was removed"); assertNull(context.instanceJobs().get(productionUsWest1), "Deployment job was removed"); assertNotNull(tester.controllerTester().serviceRegistry().applicationStore() .getMeta(context.instanceId()) .get(tester.clock().instant())); tester.clock().advance(Duration.ofSeconds(1)); context.submit(ApplicationPackage.deploymentRemoval()); tester.clock().advance(Duration.ofSeconds(1)); context.submit(ApplicationPackage.deploymentRemoval()); tester.applications().deleteApplication(context.application().id(), tester.controllerTester().credentialsFor(context.instanceId().tenant())); assertArrayEquals(new byte[0], tester.controllerTester().serviceRegistry().applicationStore() .getMeta(context.instanceId()) .get(tester.clock().instant())); assertNull(tester.controllerTester().serviceRegistry().applicationStore() .getMeta(context.deploymentIdIn(productionUsWest1.zone()))); } @Test void testGlobalRotationStatus() { var context = tester.newDeploymentContext(); var zone1 = ZoneId.from("prod", "us-west-1"); var zone2 = ZoneId.from("prod", "us-east-3"); var applicationPackage = new ApplicationPackageBuilder() .region(zone1.region()) .region(zone2.region()) .endpoint("default", "default", zone1.region().value(), zone2.region().value()) .build(); context.submit(applicationPackage).deploy(); var deployment1 = context.deploymentIdIn(zone1); DeploymentRoutingContext routingContext = tester.controller().routing().of(deployment1); RoutingStatus status1 = routingContext.routingStatus(); assertEquals(RoutingStatus.Value.in, status1.value()); routingContext.setRoutingStatus(RoutingStatus.Value.out, RoutingStatus.Agent.operator); RoutingStatus status2 = routingContext.routingStatus(); assertEquals(RoutingStatus.Value.out, status2.value()); RoutingStatus status3 = tester.controller().routing().of(context.deploymentIdIn(zone2)).routingStatus(); assertEquals(RoutingStatus.Value.in, status3.value()); } @Test void testDnsUpdatesForGlobalEndpoint() { var betaContext = tester.newDeploymentContext("tenant1", "app1", "beta"); var defaultContext = tester.newDeploymentContext("tenant1", "app1", "default"); ZoneId usWest = ZoneId.from("prod.us-west-1"); ZoneId usCentral = ZoneId.from("prod.us-central-1"); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .instances("beta,default") .endpoint("default", "foo") .region(usWest.region()) .region(usCentral.region()) .build(); tester.controllerTester().zoneRegistry().setRoutingMethod(List.of(ZoneApiMock.from(usWest), ZoneApiMock.from(usCentral)), RoutingMethod.sharedLayer4); betaContext.submit(applicationPackage).deploy(); { Collection<Deployment> betaDeployments = betaContext.instance().deployments().values(); assertFalse(betaDeployments.isEmpty()); Set<ContainerEndpoint> containerEndpoints = Set.of(new ContainerEndpoint("foo", "global", List.of("beta.app1.tenant1.global.vespa.oath.cloud", "rotation-id-01"), OptionalInt.empty(), RoutingMethod.sharedLayer4)); for (Deployment deployment : betaDeployments) { assertEquals(containerEndpoints, tester.configServer().containerEndpoints() .get(betaContext.deploymentIdIn(deployment.zone()))); } betaContext.flushDnsUpdates(); } { Collection<Deployment> defaultDeployments = defaultContext.instance().deployments().values(); assertFalse(defaultDeployments.isEmpty()); Set<ContainerEndpoint> containerEndpoints = Set.of(new ContainerEndpoint("foo", "global", List.of("app1.tenant1.global.vespa.oath.cloud", "rotation-id-02"), OptionalInt.empty(), RoutingMethod.sharedLayer4)); for (Deployment deployment : defaultDeployments) { assertEquals(containerEndpoints, tester.configServer().containerEndpoints().get(defaultContext.deploymentIdIn(deployment.zone()))); } defaultContext.flushDnsUpdates(); } Map<String, String> rotationCnames = Map.of("beta.app1.tenant1.global.vespa.oath.cloud", "rotation-fqdn-01.", "app1.tenant1.global.vespa.oath.cloud", "rotation-fqdn-02."); rotationCnames.forEach((cname, data) -> { var record = tester.controllerTester().findCname(cname); assertTrue(record.isPresent()); assertEquals(cname, record.get().name().asString()); assertEquals(data, record.get().data().asString()); }); Map<ApplicationId, Set<String>> globalDnsNamesByInstance = Map.of(betaContext.instanceId(), Set.of("beta.app1.tenant1.global.vespa.oath.cloud"), defaultContext.instanceId(), Set.of("app1.tenant1.global.vespa.oath.cloud")); globalDnsNamesByInstance.forEach((instance, dnsNames) -> { Set<String> actualDnsNames = tester.controller().routing().readDeclaredEndpointsOf(instance) .scope(Endpoint.Scope.global) .asList().stream() .map(Endpoint::dnsName) .collect(Collectors.toSet()); assertEquals(dnsNames, actualDnsNames, "Global DNS names for " + instance); }); } @Test void testDnsUpdatesForGlobalEndpointLegacySyntax() { var context = tester.newDeploymentContext("tenant1", "app1", "default"); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .globalServiceId("foo") .region("us-west-1") .region("us-central-1") .build(); context.submit(applicationPackage).deploy(); Collection<Deployment> deployments = context.instance().deployments().values(); assertFalse(deployments.isEmpty()); for (Deployment deployment : deployments) { assertEquals(Set.of("rotation-id-01", "app1.tenant1.global.vespa.oath.cloud"), tester.configServer().containerEndpointNames(context.deploymentIdIn(deployment.zone())), "Rotation names are passed to config server in " + deployment.zone()); } context.flushDnsUpdates(); assertEquals(1, tester.controllerTester().nameService().records().size()); Optional<Record> record = tester.controllerTester().findCname("app1.tenant1.global.vespa.oath.cloud"); assertTrue(record.isPresent()); assertEquals("app1.tenant1.global.vespa.oath.cloud", record.get().name().asString()); assertEquals("rotation-fqdn-01.", record.get().data().asString()); List<String> globalDnsNames = tester.controller().routing().readDeclaredEndpointsOf(context.instanceId()) .scope(Endpoint.Scope.global) .sortedBy(Comparator.comparing(Endpoint::dnsName)) .mapToList(Endpoint::dnsName); assertEquals(List.of("app1.tenant1.global.vespa.oath.cloud"), globalDnsNames); } @Test void testDnsUpdatesForMultipleGlobalEndpoints() { var context = tester.newDeploymentContext("tenant1", "app1", "default"); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .endpoint("foobar", "qrs", "us-west-1", "us-central-1") .endpoint("default", "qrs", "us-west-1", "us-central-1") .endpoint("all", "qrs") .endpoint("west", "qrs", "us-west-1") .region("us-west-1") .region("us-central-1") .build(); context.submit(applicationPackage).deploy(); Collection<Deployment> deployments = context.instance().deployments().values(); assertFalse(deployments.isEmpty()); var notWest = Set.of( "rotation-id-01", "foobar.app1.tenant1.global.vespa.oath.cloud", "rotation-id-02", "app1.tenant1.global.vespa.oath.cloud", "rotation-id-03", "all.app1.tenant1.global.vespa.oath.cloud" ); var west = Sets.union(notWest, Set.of("rotation-id-04", "west.app1.tenant1.global.vespa.oath.cloud")); for (Deployment deployment : deployments) { assertEquals(ZoneId.from("prod.us-west-1").equals(deployment.zone()) ? west : notWest, tester.configServer().containerEndpointNames(context.deploymentIdIn(deployment.zone())), "Rotation names are passed to config server in " + deployment.zone()); } context.flushDnsUpdates(); assertEquals(4, tester.controllerTester().nameService().records().size()); var record1 = tester.controllerTester().findCname("app1.tenant1.global.vespa.oath.cloud"); assertTrue(record1.isPresent()); assertEquals("app1.tenant1.global.vespa.oath.cloud", record1.get().name().asString()); assertEquals("rotation-fqdn-02.", record1.get().data().asString()); var record2 = tester.controllerTester().findCname("foobar.app1.tenant1.global.vespa.oath.cloud"); assertTrue(record2.isPresent()); assertEquals("foobar.app1.tenant1.global.vespa.oath.cloud", record2.get().name().asString()); assertEquals("rotation-fqdn-01.", record2.get().data().asString()); var record3 = tester.controllerTester().findCname("all.app1.tenant1.global.vespa.oath.cloud"); assertTrue(record3.isPresent()); assertEquals("all.app1.tenant1.global.vespa.oath.cloud", record3.get().name().asString()); assertEquals("rotation-fqdn-03.", record3.get().data().asString()); var record4 = tester.controllerTester().findCname("west.app1.tenant1.global.vespa.oath.cloud"); assertTrue(record4.isPresent()); assertEquals("west.app1.tenant1.global.vespa.oath.cloud", record4.get().name().asString()); assertEquals("rotation-fqdn-04.", record4.get().data().asString()); } @Test void testDnsUpdatesForGlobalEndpointChanges() { var context = tester.newDeploymentContext("tenant1", "app1", "default"); var west = ZoneId.from("prod", "us-west-1"); var central = ZoneId.from("prod", "us-central-1"); var east = ZoneId.from("prod", "us-east-3"); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .endpoint("default", "qrs", west.region().value(), central.region().value()) .region(west.region().value()) .region(central.region().value()) .region(east.region().value()) .build(); context.submit(applicationPackage).deploy(); for (var zone : List.of(west, central)) { assertEquals( Set.of("rotation-id-01", "app1.tenant1.global.vespa.oath.cloud"), tester.configServer().containerEndpointNames(context.deploymentIdIn(zone)) , "Zone " + zone + " is a member of global endpoint"); } ApplicationPackage applicationPackage2 = new ApplicationPackageBuilder() .endpoint("default", "qrs", west.region().value(), central.region().value()) .endpoint("east", "qrs", east.region().value()) .region(west.region().value()) .region(central.region().value()) .region(east.region().value()) .build(); context.submit(applicationPackage2).deploy(); for (var zone : List.of(west, central)) { assertEquals( Set.of("rotation-id-01", "app1.tenant1.global.vespa.oath.cloud"), tester.configServer().containerEndpointNames(context.deploymentIdIn(zone)) , "Zone " + zone + " is a member of global endpoint"); } assertEquals( Set.of("rotation-id-02", "east.app1.tenant1.global.vespa.oath.cloud"), tester.configServer().containerEndpointNames(context.deploymentIdIn(east)) , "Zone " + east + " is a member of global endpoint"); ApplicationPackage applicationPackage3 = new ApplicationPackageBuilder() .endpoint("default", "qrs", west.region().value(), central.region().value(), east.region().value()) .endpoint("east", "qrs", east.region().value()) .region(west.region().value()) .region(central.region().value()) .region(east.region().value()) .build(); context.submit(applicationPackage3).deploy(); for (var zone : List.of(west, central, east)) { assertEquals( zone.equals(east) ? Set.of("rotation-id-01", "app1.tenant1.global.vespa.oath.cloud", "rotation-id-02", "east.app1.tenant1.global.vespa.oath.cloud") : Set.of("rotation-id-01", "app1.tenant1.global.vespa.oath.cloud"), tester.configServer().containerEndpointNames(context.deploymentIdIn(zone)) , "Zone " + zone + " is a member of global endpoint"); } ApplicationPackage applicationPackage4 = new ApplicationPackageBuilder() .endpoint("default", "qrs", west.region().value(), central.region().value()) .endpoint("east", "qrs", east.region().value()) .region(west.region().value()) .region(central.region().value()) .region(east.region().value()) .build(); try { context.submit(applicationPackage4); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("global-endpoint-change: application 'tenant1.app1' has endpoints " + "[endpoint 'default' (cluster qrs) -> us-central-1, us-east-3, us-west-1, endpoint 'east' (cluster qrs) -> us-east-3], " + "but does not include all of these in deployment.xml. Deploying given deployment.xml " + "will remove [endpoint 'default' (cluster qrs) -> us-central-1, us-east-3, us-west-1] " + "and add [endpoint 'default' (cluster qrs) -> us-central-1, us-west-1]. " + ValidationOverrides.toAllowMessage(ValidationId.globalEndpointChange), e.getMessage()); } ApplicationPackage applicationPackage5 = new ApplicationPackageBuilder() .endpoint("east", "qrs", east.region().value()) .region(west.region().value()) .region(central.region().value()) .region(east.region().value()) .build(); try { context.submit(applicationPackage5); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("global-endpoint-change: application 'tenant1.app1' has endpoints " + "[endpoint 'default' (cluster qrs) -> us-central-1, us-east-3, us-west-1, endpoint 'east' (cluster qrs) -> us-east-3], " + "but does not include all of these in deployment.xml. Deploying given deployment.xml " + "will remove [endpoint 'default' (cluster qrs) -> us-central-1, us-east-3, us-west-1]. " + ValidationOverrides.toAllowMessage(ValidationId.globalEndpointChange), e.getMessage()); } ApplicationPackage applicationPackage6 = new ApplicationPackageBuilder() .endpoint("east", "qrs", east.region().value()) .region(west.region().value()) .region(central.region().value()) .region(east.region().value()) .allow(ValidationId.globalEndpointChange) .build(); context.submit(applicationPackage6); } @Test void testUnassignRotations() { var context = tester.newDeploymentContext(); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .endpoint("default", "qrs", "us-west-1", "us-central-1") .region("us-west-1") .region("us-central-1") .build(); context.submit(applicationPackage).deploy(); ApplicationPackage applicationPackage2 = new ApplicationPackageBuilder() .region("us-west-1") .region("us-central-1") .allow(ValidationId.globalEndpointChange) .build(); context.submit(applicationPackage2).deploy(); assertEquals(List.of(), context.instance().rotations()); assertEquals( Set.of(), tester.configServer().containerEndpoints().get(context.deploymentIdIn(ZoneId.from("prod", "us-west-1"))) ); } @Test void testDnsUpdatesWithChangeInRotationAssignment() { String dnsName1 = "app1.tenant1.global.vespa.oath.cloud"; { var context = tester.newDeploymentContext("tenant1", "app1", "default"); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .endpoint("default", "foo") .region("us-west-1") .region("us-central-1") .build(); context.submit(applicationPackage).deploy(); assertEquals(1, tester.controllerTester().nameService().records().size()); { Optional<Record> record = tester.controllerTester().findCname(dnsName1); assertTrue(record.isPresent()); assertEquals(dnsName1, record.get().name().asString()); assertEquals("rotation-fqdn-01.", record.get().data().asString()); } applicationPackage = new ApplicationPackageBuilder() .allow(ValidationId.deploymentRemoval) .allow(ValidationId.globalEndpointChange) .build(); context.submit(applicationPackage); tester.applications().deleteApplication(context.application().id(), tester.controllerTester().credentialsFor(context.application().id().tenant())); try (RotationLock lock = tester.controller().routing().rotations().lock()) { assertTrue(tester.controller().routing().rotations().availableRotations(lock) .containsKey(new RotationId("rotation-id-01")), "Rotation is unassigned"); } context.flushDnsUpdates(); Optional<Record> record = tester.controllerTester().findCname(dnsName1); assertTrue(record.isEmpty(), dnsName1 + " is removed"); } String dnsName2 = "app2.tenant2.global.vespa.oath.cloud"; { var context = tester.newDeploymentContext("tenant2", "app2", "default"); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .endpoint("default", "foo") .region("us-west-1") .region("us-central-1") .build(); context.submit(applicationPackage).deploy(); assertEquals(1, tester.controllerTester().nameService().records().size()); var record = tester.controllerTester().findCname(dnsName2); assertTrue(record.isPresent()); assertEquals(dnsName2, record.get().name().asString()); assertEquals("rotation-fqdn-01.", record.get().data().asString()); } { var context = tester.newDeploymentContext("tenant1", "app1", "default"); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .endpoint("default", "foo") .region("us-west-1") .region("us-central-1") .build(); context.submit(applicationPackage).deploy(); assertEquals("rotation-id-02", context.instance().rotations().get(0).rotationId().asString()); assertEquals(2, tester.controllerTester().nameService().records().size()); var record1 = tester.controllerTester().findCname(dnsName1); assertTrue(record1.isPresent()); assertEquals("rotation-fqdn-02.", record1.get().data().asString()); var record2 = tester.controllerTester().findCname(dnsName2); assertTrue(record2.isPresent()); assertEquals("rotation-fqdn-01.", record2.get().data().asString()); } } @Test void testDnsUpdatesForApplicationEndpoint() { ApplicationId beta = ApplicationId.from("tenant1", "app1", "beta"); ApplicationId main = ApplicationId.from("tenant1", "app1", "main"); var context = tester.newDeploymentContext(beta); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .instances("beta,main") .region("us-west-1") .region("us-east-3") .applicationEndpoint("a", "default", "us-west-1", Map.of(beta.instance(), 2, main.instance(), 8)) .applicationEndpoint("b", "default", "us-west-1", Map.of(beta.instance(), 1, main.instance(), 1)) .applicationEndpoint("c", "default", "us-east-3", Map.of(beta.instance(), 4, main.instance(), 6)) .build(); context.submit(applicationPackage).deploy(); ZoneId usWest = ZoneId.from("prod", "us-west-1"); ZoneId usEast = ZoneId.from("prod", "us-east-3"); Map<DeploymentId, Map<String, Integer>> deploymentEndpoints = Map.of( new DeploymentId(beta, usWest), Map.of("a.app1.tenant1.us-west-1-r.vespa.oath.cloud", 2, "b.app1.tenant1.us-west-1-r.vespa.oath.cloud", 1), new DeploymentId(main, usWest), Map.of("a.app1.tenant1.us-west-1-r.vespa.oath.cloud", 8, "b.app1.tenant1.us-west-1-r.vespa.oath.cloud", 1), new DeploymentId(beta, usEast), Map.of("c.app1.tenant1.us-east-3-r.vespa.oath.cloud", 4), new DeploymentId(main, usEast), Map.of("c.app1.tenant1.us-east-3-r.vespa.oath.cloud", 6) ); deploymentEndpoints.forEach((deployment, endpoints) -> { Set<ContainerEndpoint> expected = endpoints.entrySet().stream() .map(kv -> new ContainerEndpoint("default", "application", List.of(kv.getKey()), OptionalInt.of(kv.getValue()), RoutingMethod.sharedLayer4)) .collect(Collectors.toSet()); assertEquals(expected, tester.configServer().containerEndpoints().get(deployment), "Endpoint names for " + deployment + " are passed to config server"); }); context.flushDnsUpdates(); Set<Record> records = tester.controllerTester().nameService().records(); assertEquals(Set.of(new Record(Record.Type.CNAME, RecordName.from("a.app1.tenant1.us-west-1-r.vespa.oath.cloud"), RecordData.from("vip.prod.us-west-1.")), new Record(Record.Type.CNAME, RecordName.from("b.app1.tenant1.us-west-1-r.vespa.oath.cloud"), RecordData.from("vip.prod.us-west-1.")), new Record(Record.Type.CNAME, RecordName.from("c.app1.tenant1.us-east-3-r.vespa.oath.cloud"), RecordData.from("vip.prod.us-east-3."))), records); List<String> endpointDnsNames = tester.controller().routing().declaredEndpointsOf(context.application()) .scope(Endpoint.Scope.application) .mapToList(Endpoint::dnsName); assertEquals(List.of("a.app1.tenant1.us-west-1-r.vespa.oath.cloud", "b.app1.tenant1.us-west-1-r.vespa.oath.cloud", "c.app1.tenant1.us-east-3-r.vespa.oath.cloud"), endpointDnsNames); } @Test @Test void testDevDeploymentWithIncompatibleVersions() { Version version1 = new Version("7"); Version version2 = new Version("7.5"); Version version3 = new Version("8"); var context = tester.newDeploymentContext(); tester.controllerTester().flagSource().withListFlag(PermanentFlags.INCOMPATIBLE_VERSIONS.id(), List.of("8"), String.class); tester.controllerTester().upgradeSystem(version2); ZoneId zone = ZoneId.from("dev", "us-east-1"); context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version1).build()); assertEquals(version2, context.deployment(zone).version()); assertEquals(Optional.of(version1), context.application().revisions().get(context.deployment(zone).revision()).compileVersion()); try { context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version1).majorVersion(8).build()); fail("Should fail when specifying a major that does not yet exist"); } catch (IllegalArgumentException e) { assertEquals("no platforms were found for major version 8 specified in deployment.xml", e.getMessage()); } try { context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version3).build()); fail("Should fail when compiled against a version which is only compatible with not-yet-existent versions"); } catch (IllegalArgumentException e) { assertEquals("no platforms are compatible with compile version 8", e.getMessage()); } tester.controllerTester().upgradeSystem(version3); try { context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version1).majorVersion(8).build()); fail("Should fail when specifying a major which is incompatible with compile version"); } catch (IllegalArgumentException e) { assertEquals("no platforms on major version 8 specified in deployment.xml are compatible with compile version 7", e.getMessage()); } context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version3).majorVersion(8).build()); assertEquals(version3, context.deployment(zone).version()); assertEquals(Optional.of(version3), context.application().revisions().get(context.deployment(zone).revision()).compileVersion()); context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version3).build()); assertEquals(version3, context.deployment(zone).version()); assertEquals(Optional.of(version3), context.application().revisions().get(context.deployment(zone).revision()).compileVersion()); } @Test void testSuspension() { var context = tester.newDeploymentContext(); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .region("us-west-1") .region("us-east-3") .build(); context.submit(applicationPackage).deploy(); DeploymentId deployment1 = context.deploymentIdIn(ZoneId.from(Environment.prod, RegionName.from("us-west-1"))); DeploymentId deployment2 = context.deploymentIdIn(ZoneId.from(Environment.prod, RegionName.from("us-east-3"))); assertFalse(tester.configServer().isSuspended(deployment1)); assertFalse(tester.configServer().isSuspended(deployment2)); tester.configServer().setSuspension(deployment1, true); assertTrue(tester.configServer().isSuspended(deployment1)); assertFalse(tester.configServer().isSuspended(deployment2)); } @Test void testDeletingApplicationThatHasAlreadyBeenDeleted() { var context = tester.newDeploymentContext(); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .region("us-west-1") .build(); ZoneId zone = ZoneId.from(Environment.prod, RegionName.from("us-west-1")); context.submit(applicationPackage).runJob(zone, applicationPackage); tester.controller().applications().deactivate(context.instanceId(), zone); tester.controller().applications().deactivate(context.instanceId(), zone); } @Test void testDeployApplicationWithWarnings() { var context = tester.newDeploymentContext(); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .region("us-west-1") .build(); ZoneId zone = ZoneId.from("prod", "us-west-1"); int warnings = 3; tester.configServer().generateWarnings(context.deploymentIdIn(zone), warnings); context.submit(applicationPackage).deploy(); assertEquals(warnings, context.deployment(zone) .metrics().warnings().get(DeploymentMetrics.Warning.all).intValue()); } @Test void testDeploySelectivelyProvisionsCertificate() { Function<Instance, Optional<EndpointCertificateMetadata>> certificate = (application) -> tester.controller().curator().readEndpointCertificateMetadata(application.id()); var context1 = tester.newDeploymentContext("tenant1", "app1", "default"); var prodZone = ZoneId.from("prod", "us-west-1"); var stagingZone = ZoneId.from("staging", "us-east-3"); var testZone = ZoneId.from("test", "us-east-1"); tester.controllerTester().zoneRegistry().exclusiveRoutingIn(ZoneApiMock.from(prodZone)); var applicationPackage = new ApplicationPackageBuilder().athenzIdentity(AthenzDomain.from("domain"), AthenzService.from("service")) .region(prodZone.region()) .build(); context1.submit(applicationPackage).deploy(); var cert = certificate.apply(context1.instance()); assertTrue(cert.isPresent(), "Provisions certificate in " + Environment.prod); assertEquals(Stream.concat(Stream.of("vznqtz7a5ygwjkbhhj7ymxvlrekgt4l6g.vespa.oath.cloud", "app1.tenant1.global.vespa.oath.cloud", "*.app1.tenant1.global.vespa.oath.cloud"), Stream.of(prodZone, testZone, stagingZone) .flatMap(zone -> Stream.of("", "*.") .map(prefix -> prefix + "app1.tenant1." + zone.region().value() + (zone.environment() == Environment.prod ? "" : "." + zone.environment().value()) + ".vespa.oath.cloud"))) .collect(Collectors.toUnmodifiableSet()), Set.copyOf(tester.controllerTester().serviceRegistry().endpointCertificateMock().dnsNamesOf(context1.instanceId()))); context1.submit(applicationPackage).deploy(); assertEquals(cert, certificate.apply(context1.instance())); var context2 = tester.newDeploymentContext("tenant1", "app2", "default"); var devZone = ZoneId.from("dev", "us-east-1"); context2.runJob(devZone, applicationPackage); assertTrue(tester.configServer().application(context2.instanceId(), devZone).get().activated(), "Application deployed and activated"); assertTrue(certificate.apply(context2.instance()).isPresent(), "Provisions certificate also in zone with routing layer"); } @Test void testDeployWithGlobalEndpointsInMultipleClouds() { tester.controllerTester().zoneRegistry().setZones( ZoneApiMock.fromId("test.us-west-1"), ZoneApiMock.fromId("staging.us-west-1"), ZoneApiMock.fromId("prod.us-west-1"), ZoneApiMock.newBuilder().with(CloudName.AWS).withId("prod.aws-us-east-1").build() ); var context = tester.newDeploymentContext(); var applicationPackage = new ApplicationPackageBuilder() .region("aws-us-east-1") .region("us-west-1") .endpoint("default", "default") .build(); try { context.submit(applicationPackage); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("Endpoint 'default' in instance 'default' cannot contain regions in different clouds: [aws-us-east-1, us-west-1]", e.getMessage()); } var applicationPackage2 = new ApplicationPackageBuilder() .region("aws-us-east-1") .region("us-west-1") .endpoint("aws", "default", "aws-us-east-1") .endpoint("foo", "default", "aws-us-east-1", "us-west-1") .build(); try { context.submit(applicationPackage2); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("Endpoint 'foo' in instance 'default' cannot contain regions in different clouds: [aws-us-east-1, us-west-1]", e.getMessage()); } } @Test void testDeployWithGlobalEndpointsInGcp() { tester.controllerTester().zoneRegistry().setZones( ZoneApiMock.fromId("test.us-west-1"), ZoneApiMock.fromId("staging.us-west-1"), ZoneApiMock.newBuilder().with(CloudName.GCP).withId("prod.gcp-us-east1-b").build() ); var context = tester.newDeploymentContext(); var applicationPackage = new ApplicationPackageBuilder() .region("gcp-us-east1-b") .endpoint("default", "default") .build(); try { context.submit(applicationPackage); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("Endpoint 'default' in instance 'default' contains a Google Cloud region (gcp-us-east1-b), which is not yet supported", e.getMessage()); } var applicationPackage2 = new ApplicationPackageBuilder() .region("gcp-us-east1-b") .endpoint("gcp", "default", "gcp-us-east1-b") .build(); try { context.submit(applicationPackage2); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("Endpoint 'gcp' in instance 'default' contains a Google Cloud region (gcp-us-east1-b), which is not yet supported", e.getMessage()); } } @Test void testDeployWithoutSourceRevision() { var context = tester.newDeploymentContext(); var applicationPackage = new ApplicationPackageBuilder() .upgradePolicy("default") .region("us-west-1") .build(); context.submit(applicationPackage, Optional.empty()) .deploy(); assertEquals(1, context.instance().deployments().size(), "Deployed application"); } @Test void testDeployWithGlobalEndpointsAndMultipleRoutingMethods() { var context = tester.newDeploymentContext(); var zone1 = ZoneId.from("prod", "us-west-1"); var zone2 = ZoneId.from("prod", "us-east-3"); var applicationPackage = new ApplicationPackageBuilder() .athenzIdentity(AthenzDomain.from("domain"), AthenzService.from("service")) .endpoint("default", "default", zone1.region().value(), zone2.region().value()) .endpoint("east", "default", zone2.region().value()) .region(zone1.region()) .region(zone2.region()) .build(); tester.controllerTester().zoneRegistry().setRoutingMethod(ZoneApiMock.from(zone1), RoutingMethod.sharedLayer4); tester.controllerTester().zoneRegistry().setRoutingMethod(ZoneApiMock.from(zone2), RoutingMethod.exclusive); context.submit(applicationPackage).deploy(); var expectedRecords = List.of( new Record(Record.Type.ALIAS, RecordName.from("application.tenant.us-east-3-w.vespa.oath.cloud"), new WeightedAliasTarget(HostName.of("lb-0--tenant.application.default--prod.us-east-3"), "dns-zone-1", ZoneId.from("prod.us-east-3"), 1).pack()), new Record(Record.Type.ALIAS, RecordName.from("east.application.tenant.global.vespa.oath.cloud"), new LatencyAliasTarget(HostName.of("application.tenant.us-east-3-w.vespa.oath.cloud"), "dns-zone-1", ZoneId.from("prod.us-east-3")).pack()), new Record(Record.Type.CNAME, RecordName.from("application.tenant.us-east-3.vespa.oath.cloud"), RecordData.from("lb-0--tenant.application.default--prod.us-east-3."))); assertEquals(expectedRecords, List.copyOf(tester.controllerTester().nameService().records())); } @Test void testDeploymentDirectRouting() { DeploymentTester tester = new DeploymentTester(new ControllerTester(new RotationsConfig.Builder().build(), main)); var context = tester.newDeploymentContext(); var zone1 = ZoneId.from("prod", "us-west-1"); var zone2 = ZoneId.from("prod", "us-east-3"); var zone3 = ZoneId.from("prod", "eu-west-1"); tester.controllerTester().zoneRegistry() .exclusiveRoutingIn(ZoneApiMock.from(zone1), ZoneApiMock.from(zone2), ZoneApiMock.from(zone3)); var applicationPackageBuilder = new ApplicationPackageBuilder() .region(zone1.region()) .region(zone2.region()) .region(zone3.region()) .endpoint("default", "default") .endpoint("foo", "qrs") .endpoint("us", "default", zone1.region().value(), zone2.region().value()) .athenzIdentity(AthenzDomain.from("domain"), AthenzService.from("service")); context.submit(applicationPackageBuilder.build()).deploy(); for (var zone : List.of(zone1, zone2)) { assertEquals(Set.of("application.tenant.global.vespa.oath.cloud", "foo.application.tenant.global.vespa.oath.cloud", "us.application.tenant.global.vespa.oath.cloud"), tester.configServer().containerEndpointNames(context.deploymentIdIn(zone)), "Expected container endpoints in " + zone); } assertEquals(Set.of("application.tenant.global.vespa.oath.cloud", "foo.application.tenant.global.vespa.oath.cloud"), tester.configServer().containerEndpointNames(context.deploymentIdIn(zone3)), "Expected container endpoints in " + zone3); } @Test void testChangeEndpointCluster() { var context = tester.newDeploymentContext(); var west = ZoneId.from("prod", "us-west-1"); var east = ZoneId.from("prod", "us-east-3"); var applicationPackage = new ApplicationPackageBuilder() .endpoint("default", "foo") .region(west.region().value()) .region(east.region().value()) .build(); context.submit(applicationPackage).deploy(); assertEquals(ClusterSpec.Id.from("foo"), tester.applications().requireInstance(context.instanceId()) .rotations().get(0).clusterId()); applicationPackage = new ApplicationPackageBuilder() .endpoint("default", "bar") .region(west.region().value()) .region(east.region().value()) .build(); try { context.submit(applicationPackage).deploy(); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("global-endpoint-change: application 'tenant.application' has endpoints [endpoint " + "'default' (cluster foo) -> us-east-3, us-west-1], but does not include all of these in " + "deployment.xml. Deploying given deployment.xml will remove " + "[endpoint 'default' (cluster foo) -> us-east-3, us-west-1] and add " + "[endpoint 'default' (cluster bar) -> us-east-3, us-west-1]. To allow this add " + "<allow until='yyyy-mm-dd'>global-endpoint-change</allow> to validation-overrides.xml, see " + "https: } applicationPackage = new ApplicationPackageBuilder() .endpoint("default", "bar") .region(west.region().value()) .region(east.region().value()) .allow(ValidationId.globalEndpointChange) .build(); context.submit(applicationPackage).deploy(); assertEquals(ClusterSpec.Id.from("bar"), tester.applications().requireInstance(context.instanceId()) .rotations().get(0).clusterId()); } @Test void testReadableApplications() { var db = new MockCuratorDb(tester.controller().system()); var tester = new DeploymentTester(new ControllerTester(db)); var app1 = tester.newDeploymentContext("t1", "a1", "default") .submit() .deploy(); var app2 = tester.newDeploymentContext("t2", "a2", "default") .submit() .deploy(); assertEquals(2, tester.applications().readable().size()); db.curator().set(Path.fromString("/controller/v1/applications/" + app2.application().id().serialized()), new byte[]{(byte) 0xDE, (byte) 0xAD}); assertEquals(1, tester.applications().readable().size()); try { tester.applications().asList(); fail("Expected exception"); } catch (Exception ignored) { } app1.submit().deploy(); } @Test void testClashingEndpointIdAndInstanceName() { String deploymentXml = "<deployment version='1.0' athenz-domain='domain' athenz-service='service'>\n" + " <instance id=\"default\">\n" + " <prod>\n" + " <region active=\"true\">us-west-1</region>\n" + " </prod>\n" + " <endpoints>\n" + " <endpoint id=\"dev\" container-id=\"qrs\"/>\n" + " </endpoints>\n" + " </instance>\n" + " <instance id=\"dev\">\n" + " <prod>\n" + " <region active=\"true\">us-west-1</region>\n" + " </prod>\n" + " <endpoints>\n" + " <endpoint id=\"default\" container-id=\"qrs\"/>\n" + " </endpoints>\n" + " </instance>\n" + "</deployment>\n"; ApplicationPackage applicationPackage = ApplicationPackageBuilder.fromDeploymentXml(deploymentXml); try { tester.newDeploymentContext().submit(applicationPackage); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("Endpoint with ID 'default' in instance 'dev' clashes with endpoint 'dev' in instance 'default'", e.getMessage()); } } @Test void testTestPackageWarnings() { String deploymentXml = "<deployment version='1.0'>\n" + " <prod>\n" + " <region>us-west-1</region>\n" + " </prod>\n" + "</deployment>\n"; ApplicationPackage applicationPackage = ApplicationPackageBuilder.fromDeploymentXml(deploymentXml); byte[] testPackage = ApplicationPackage.filesZip(Map.of("tests/staging-test/foo.json", new byte[0])); var app = tester.newDeploymentContext(); tester.jobs().submit(app.application().id(), Submission.basic(applicationPackage, testPackage), 1); assertEquals(List.of(new Notification(tester.clock().instant(), Type.testPackage, Level.warning, NotificationSource.from(app.application().id()), List.of("test package has staging tests, so it should also include staging setup", "see https: tester.controller().notificationsDb().listNotifications(NotificationSource.from(app.application().id()), true)); } @Test void testCompileVersion() { DeploymentContext context = tester.newDeploymentContext(); ApplicationPackage applicationPackage = new ApplicationPackageBuilder().region("us-west-1").build(); TenantAndApplicationId application = TenantAndApplicationId.from(context.instanceId()); Version version0 = Version.fromString("7.1"); tester.controllerTester().upgradeSystem(version0); tester.upgrader().overrideConfidence(version0, Confidence.normal); tester.controllerTester().computeVersionStatus(); assertEquals(version0, tester.applications().compileVersion(application, OptionalInt.of(7))); assertEquals(version0, tester.applications().compileVersion(application, OptionalInt.empty())); assertEquals("this system has no available versions on specified major: 8", assertThrows(IllegalArgumentException.class, () -> tester.applications().compileVersion(application, OptionalInt.of(8))) .getMessage()); context.submit(applicationPackage).deploy(); Version version1 = Version.fromString("7.2"); tester.controllerTester().upgradeSystem(version1); tester.upgrader().overrideConfidence(version1, Confidence.normal); tester.controllerTester().computeVersionStatus(); assertEquals(version0, tester.applications().compileVersion(application, OptionalInt.empty())); tester.upgrader().maintain(); context.deployPlatform(version1); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty())); DeploymentContext legacyApp = tester.newDeploymentContext("avoid", "gc", "default").submit().deploy(); TenantAndApplicationId newApp = TenantAndApplicationId.from("new", "app"); Version version2 = Version.fromString("8.0"); tester.controllerTester().upgradeSystem(version2); tester.upgrader().overrideConfidence(version2, Confidence.low); tester.controllerTester().computeVersionStatus(); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty())); assertEquals("this system has no available versions on specified major: 8", assertThrows(IllegalArgumentException.class, () -> tester.applications().compileVersion(application, OptionalInt.of(8))) .getMessage()); tester.upgrader().overrideConfidence(version2, Confidence.normal); tester.controllerTester().computeVersionStatus(); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty())); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(8))); assertEquals(version2, tester.applications().compileVersion(newApp, OptionalInt.empty())); tester.controllerTester().flagSource().withListFlag(PermanentFlags.INCOMPATIBLE_VERSIONS.id(), List.of("8"), String.class); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7))); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty())); assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.of(8))); assertEquals(version2, tester.applications().compileVersion(newApp, OptionalInt.empty())); tester.upgrader().overrideConfidence(version2, Confidence.low); tester.controllerTester().computeVersionStatus(); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7))); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty())); assertEquals("this system has no available versions on specified major: 8", assertThrows(IllegalArgumentException.class, () -> tester.applications().compileVersion(application, OptionalInt.of(8))) .getMessage()); assertEquals(version1, tester.applications().compileVersion(newApp, OptionalInt.empty())); assertEquals(version1, tester.applications().compileVersion(newApp, OptionalInt.empty())); tester.upgrader().overrideConfidence(version2, Confidence.normal); tester.controllerTester().computeVersionStatus(); context.submit(new ApplicationPackageBuilder().region("us-west-1").compileVersion(version2).build()).deploy(); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7))); assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.empty())); tester.upgrader().overrideConfidence(version2, Confidence.low); tester.controllerTester().computeVersionStatus(); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7))); assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.empty())); assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.of(8))); tester.upgrader().overrideConfidence(version2, Confidence.broken); tester.controllerTester().computeVersionStatus(); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7))); assertEquals("no suitable, released compile version exists", assertThrows(IllegalArgumentException.class, () -> tester.applications().compileVersion(application, OptionalInt.empty())) .getMessage()); assertEquals("no suitable, released compile version exists for specified major: 8", assertThrows(IllegalArgumentException.class, () -> tester.applications().compileVersion(application, OptionalInt.of(8))) .getMessage()); tester.controllerTester().flagSource().withListFlag(PermanentFlags.INCOMPATIBLE_VERSIONS.id(), List.of(), String.class); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7))); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty())); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(8))); tester.upgrader().overrideConfidence(version2, Confidence.low); tester.controllerTester().computeVersionStatus(); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7))); assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.empty())); assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.of(8))); } @Test void testCloudAccount() { DeploymentContext context = tester.newDeploymentContext(); ZoneId devZone = devUsEast1.zone(); ZoneId prodZone = productionUsWest1.zone(); String cloudAccount = "012345678912"; var applicationPackage = new ApplicationPackageBuilder() .cloudAccount(cloudAccount) .region(prodZone.region()) .build(); context.submit(applicationPackage).runJobExpectingFailure(systemTest, "Requested cloud account '012345678912' is not valid for tenant 'tenant'"); tester.controllerTester().flagSource().withListFlag(PermanentFlags.CLOUD_ACCOUNTS.id(), List.of(cloudAccount), String.class); context.runJobExpectingFailure(systemTest, "Zone test.us-east-1 is not configured in requested cloud account '012345678912'") .abortJob(stagingTest); tester.controllerTester().zoneRegistry().configureCloudAccount(new CloudAccount(cloudAccount), systemTest.zone(), stagingTest.zone(), prodZone); context.submit(applicationPackage).deploy(); tester.controllerTester().zoneRegistry().configureCloudAccount(new CloudAccount(cloudAccount), devZone); context.runJob(devZone, applicationPackage); for (var zoneId : List.of(systemTest.zone(), stagingTest.zone(), devZone, prodZone)) { assertEquals(cloudAccount, tester.controllerTester().configServer() .cloudAccount(context.deploymentIdIn(zoneId)) .get().value()); } } @Test void testSubmitWithElementDeprecatedOnPreviousMajor() { DeploymentContext context = tester.newDeploymentContext(); var applicationPackage = new ApplicationPackageBuilder() .compileVersion(Version.fromString("8.1")) .region("us-west-1") .globalServiceId("qrs") .build(); try { context.submit(applicationPackage).deploy(); fail("Expected exception"); } catch (IllegalArgumentException e) { assertTrue(e.getMessage().contains("Element 'prod' contains attribute 'global-service-id' deprecated since major version 7")); } } }
It's fine, but I updated the test to use a completely empty package.
void testDevDeployment() { ApplicationPackage applicationPackage = ApplicationPackageBuilder.fromDeploymentXml(DeploymentSpec.empty.xmlForm()); var context = tester.newDeploymentContext(); ZoneId zone = ZoneId.from("dev", "us-east-1"); tester.controllerTester().zoneRegistry() .setRoutingMethod(ZoneApiMock.from(zone), RoutingMethod.sharedLayer4); context.runJob(zone, applicationPackage); assertTrue(tester.configServer().application(context.instanceId(), zone).get().activated(), "Application deployed and activated"); assertTrue(context.instanceJobs().isEmpty(), "No job status added"); assertEquals(DeploymentSpec.empty, context.application().deploymentSpec(), "DeploymentSpec is not stored"); Set<RoutingMethod> routingMethods = tester.controller().routing().readEndpointsOf(context.deploymentIdIn(zone)) .asList() .stream() .map(Endpoint::routingMethod) .collect(Collectors.toSet()); assertEquals(routingMethods, Set.of(RoutingMethod.sharedLayer4)); assertNotNull(tester.controllerTester().serviceRegistry().applicationStore() .getMeta(new DeploymentId(context.instanceId(), zone)) .get(tester.clock().instant())); tester.clock().advance(Duration.ofSeconds(1)); tester.controller().applications().deactivate(context.instanceId(), zone); assertArrayEquals(new byte[0], tester.controllerTester().serviceRegistry().applicationStore() .getMeta(new DeploymentId(context.instanceId(), zone)) .get(tester.clock().instant())); }
ApplicationPackage applicationPackage = ApplicationPackageBuilder.fromDeploymentXml(DeploymentSpec.empty.xmlForm());
void testDevDeployment() { ApplicationPackage applicationPackage = new ApplicationPackage(new byte[0]); var context = tester.newDeploymentContext(); ZoneId zone = ZoneId.from("dev", "us-east-1"); tester.controllerTester().zoneRegistry() .setRoutingMethod(ZoneApiMock.from(zone), RoutingMethod.sharedLayer4); context.runJob(zone, applicationPackage); assertTrue(tester.configServer().application(context.instanceId(), zone).get().activated(), "Application deployed and activated"); assertTrue(context.instanceJobs().isEmpty(), "No job status added"); assertEquals(DeploymentSpec.empty, context.application().deploymentSpec(), "DeploymentSpec is not stored"); Set<RoutingMethod> routingMethods = tester.controller().routing().readEndpointsOf(context.deploymentIdIn(zone)) .asList() .stream() .map(Endpoint::routingMethod) .collect(Collectors.toSet()); assertEquals(routingMethods, Set.of(RoutingMethod.sharedLayer4)); assertNotNull(tester.controllerTester().serviceRegistry().applicationStore() .getMeta(new DeploymentId(context.instanceId(), zone)) .get(tester.clock().instant())); tester.clock().advance(Duration.ofSeconds(1)); tester.controller().applications().deactivate(context.instanceId(), zone); assertArrayEquals(new byte[0], tester.controllerTester().serviceRegistry().applicationStore() .getMeta(new DeploymentId(context.instanceId(), zone)) .get(tester.clock().instant())); }
class ControllerTest { private final DeploymentTester tester = new DeploymentTester(); @Test void testDeployment() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .explicitEnvironment(Environment.dev, Environment.perf) .region("us-west-1") .region("us-east-3") .build(); Version version1 = tester.configServer().initialVersion(); var context = tester.newDeploymentContext(); context.submit(applicationPackage); assertEquals(ApplicationVersion.from(RevisionId.forProduction(1), DeploymentContext.defaultSourceRevision, "a@b", new Version("6.1"), Instant.ofEpochSecond(1)), context.application().revisions().get(context.instance().change().revision().get()), "Application version is known from completion of initial job"); context.runJob(systemTest); context.runJob(stagingTest); RevisionId applicationVersion = context.instance().change().revision().get(); assertTrue(applicationVersion.isProduction(), "Application version has been set during deployment"); tester.triggerJobs(); tester.clock().advance(Duration.ofSeconds(1)); context.timeOutUpgrade(productionUsWest1); assertEquals(4, context.instanceJobs().size()); tester.triggerJobs(); tester.controllerTester().createNewController(); assertNotNull(tester.controller().tenants().get(TenantName.from("tenant1"))); assertNotNull(tester.controller().applications().requireInstance(context.instanceId())); context.submit(applicationPackage); context.runJob(systemTest); context.runJob(stagingTest); context.triggerJobs().jobAborted(productionUsWest1); context.runJob(productionUsWest1); tester.triggerJobs(); context.runJob(productionUsEast3); assertEquals(4, context.instanceJobs().size()); applicationPackage = new ApplicationPackageBuilder() .instances("hellO") .build(); try { context.submit(applicationPackage); fail("Expected exception due to illegal deployment spec."); } catch (IllegalArgumentException e) { assertEquals("Invalid id 'hellO'. Tenant, application and instance names must start with a letter, may contain no more than 20 characters, and may only contain lowercase letters, digits or dashes, but no double-dashes.", e.getMessage()); } applicationPackage = new ApplicationPackageBuilder() .region("deep-space-9") .build(); try { context.submit(applicationPackage); fail("Expected exception due to illegal deployment spec."); } catch (IllegalArgumentException e) { assertEquals("Zone prod.deep-space-9 in deployment spec was not found in this system!", e.getMessage()); } applicationPackage = new ApplicationPackageBuilder() .region("us-east-3") .build(); try { assertTrue(context.instance().deployments().containsKey(ZoneId.from("prod", "us-west-1"))); context.submit(applicationPackage); fail("Expected exception due to illegal production deployment removal"); } catch (IllegalArgumentException e) { assertEquals("deployment-removal: application 'tenant.application' is deployed in us-west-1, but does not include this zone in deployment.xml. " + ValidationOverrides.toAllowMessage(ValidationId.deploymentRemoval), e.getMessage()); } assertNotNull(context.instance().deployments().get(productionUsWest1.zone()), "Zone was not removed"); applicationPackage = new ApplicationPackageBuilder() .allow(ValidationId.deploymentRemoval) .upgradePolicy("default") .region("us-east-3") .build(); context.submit(applicationPackage); assertNull(context.instance().deployments().get(productionUsWest1.zone()), "Zone was removed"); assertNull(context.instanceJobs().get(productionUsWest1), "Deployment job was removed"); assertNotNull(tester.controllerTester().serviceRegistry().applicationStore() .getMeta(context.instanceId()) .get(tester.clock().instant())); tester.clock().advance(Duration.ofSeconds(1)); context.submit(ApplicationPackage.deploymentRemoval()); tester.clock().advance(Duration.ofSeconds(1)); context.submit(ApplicationPackage.deploymentRemoval()); tester.applications().deleteApplication(context.application().id(), tester.controllerTester().credentialsFor(context.instanceId().tenant())); assertArrayEquals(new byte[0], tester.controllerTester().serviceRegistry().applicationStore() .getMeta(context.instanceId()) .get(tester.clock().instant())); assertNull(tester.controllerTester().serviceRegistry().applicationStore() .getMeta(context.deploymentIdIn(productionUsWest1.zone()))); } @Test void testGlobalRotationStatus() { var context = tester.newDeploymentContext(); var zone1 = ZoneId.from("prod", "us-west-1"); var zone2 = ZoneId.from("prod", "us-east-3"); var applicationPackage = new ApplicationPackageBuilder() .region(zone1.region()) .region(zone2.region()) .endpoint("default", "default", zone1.region().value(), zone2.region().value()) .build(); context.submit(applicationPackage).deploy(); var deployment1 = context.deploymentIdIn(zone1); DeploymentRoutingContext routingContext = tester.controller().routing().of(deployment1); RoutingStatus status1 = routingContext.routingStatus(); assertEquals(RoutingStatus.Value.in, status1.value()); routingContext.setRoutingStatus(RoutingStatus.Value.out, RoutingStatus.Agent.operator); RoutingStatus status2 = routingContext.routingStatus(); assertEquals(RoutingStatus.Value.out, status2.value()); RoutingStatus status3 = tester.controller().routing().of(context.deploymentIdIn(zone2)).routingStatus(); assertEquals(RoutingStatus.Value.in, status3.value()); } @Test void testDnsUpdatesForGlobalEndpoint() { var betaContext = tester.newDeploymentContext("tenant1", "app1", "beta"); var defaultContext = tester.newDeploymentContext("tenant1", "app1", "default"); ZoneId usWest = ZoneId.from("prod.us-west-1"); ZoneId usCentral = ZoneId.from("prod.us-central-1"); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .instances("beta,default") .endpoint("default", "foo") .region(usWest.region()) .region(usCentral.region()) .build(); tester.controllerTester().zoneRegistry().setRoutingMethod(List.of(ZoneApiMock.from(usWest), ZoneApiMock.from(usCentral)), RoutingMethod.sharedLayer4); betaContext.submit(applicationPackage).deploy(); { Collection<Deployment> betaDeployments = betaContext.instance().deployments().values(); assertFalse(betaDeployments.isEmpty()); Set<ContainerEndpoint> containerEndpoints = Set.of(new ContainerEndpoint("foo", "global", List.of("beta.app1.tenant1.global.vespa.oath.cloud", "rotation-id-01"), OptionalInt.empty(), RoutingMethod.sharedLayer4)); for (Deployment deployment : betaDeployments) { assertEquals(containerEndpoints, tester.configServer().containerEndpoints() .get(betaContext.deploymentIdIn(deployment.zone()))); } betaContext.flushDnsUpdates(); } { Collection<Deployment> defaultDeployments = defaultContext.instance().deployments().values(); assertFalse(defaultDeployments.isEmpty()); Set<ContainerEndpoint> containerEndpoints = Set.of(new ContainerEndpoint("foo", "global", List.of("app1.tenant1.global.vespa.oath.cloud", "rotation-id-02"), OptionalInt.empty(), RoutingMethod.sharedLayer4)); for (Deployment deployment : defaultDeployments) { assertEquals(containerEndpoints, tester.configServer().containerEndpoints().get(defaultContext.deploymentIdIn(deployment.zone()))); } defaultContext.flushDnsUpdates(); } Map<String, String> rotationCnames = Map.of("beta.app1.tenant1.global.vespa.oath.cloud", "rotation-fqdn-01.", "app1.tenant1.global.vespa.oath.cloud", "rotation-fqdn-02."); rotationCnames.forEach((cname, data) -> { var record = tester.controllerTester().findCname(cname); assertTrue(record.isPresent()); assertEquals(cname, record.get().name().asString()); assertEquals(data, record.get().data().asString()); }); Map<ApplicationId, Set<String>> globalDnsNamesByInstance = Map.of(betaContext.instanceId(), Set.of("beta.app1.tenant1.global.vespa.oath.cloud"), defaultContext.instanceId(), Set.of("app1.tenant1.global.vespa.oath.cloud")); globalDnsNamesByInstance.forEach((instance, dnsNames) -> { Set<String> actualDnsNames = tester.controller().routing().readDeclaredEndpointsOf(instance) .scope(Endpoint.Scope.global) .asList().stream() .map(Endpoint::dnsName) .collect(Collectors.toSet()); assertEquals(dnsNames, actualDnsNames, "Global DNS names for " + instance); }); } @Test void testDnsUpdatesForGlobalEndpointLegacySyntax() { var context = tester.newDeploymentContext("tenant1", "app1", "default"); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .globalServiceId("foo") .region("us-west-1") .region("us-central-1") .build(); context.submit(applicationPackage).deploy(); Collection<Deployment> deployments = context.instance().deployments().values(); assertFalse(deployments.isEmpty()); for (Deployment deployment : deployments) { assertEquals(Set.of("rotation-id-01", "app1.tenant1.global.vespa.oath.cloud"), tester.configServer().containerEndpointNames(context.deploymentIdIn(deployment.zone())), "Rotation names are passed to config server in " + deployment.zone()); } context.flushDnsUpdates(); assertEquals(1, tester.controllerTester().nameService().records().size()); Optional<Record> record = tester.controllerTester().findCname("app1.tenant1.global.vespa.oath.cloud"); assertTrue(record.isPresent()); assertEquals("app1.tenant1.global.vespa.oath.cloud", record.get().name().asString()); assertEquals("rotation-fqdn-01.", record.get().data().asString()); List<String> globalDnsNames = tester.controller().routing().readDeclaredEndpointsOf(context.instanceId()) .scope(Endpoint.Scope.global) .sortedBy(Comparator.comparing(Endpoint::dnsName)) .mapToList(Endpoint::dnsName); assertEquals(List.of("app1.tenant1.global.vespa.oath.cloud"), globalDnsNames); } @Test void testDnsUpdatesForMultipleGlobalEndpoints() { var context = tester.newDeploymentContext("tenant1", "app1", "default"); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .endpoint("foobar", "qrs", "us-west-1", "us-central-1") .endpoint("default", "qrs", "us-west-1", "us-central-1") .endpoint("all", "qrs") .endpoint("west", "qrs", "us-west-1") .region("us-west-1") .region("us-central-1") .build(); context.submit(applicationPackage).deploy(); Collection<Deployment> deployments = context.instance().deployments().values(); assertFalse(deployments.isEmpty()); var notWest = Set.of( "rotation-id-01", "foobar.app1.tenant1.global.vespa.oath.cloud", "rotation-id-02", "app1.tenant1.global.vespa.oath.cloud", "rotation-id-03", "all.app1.tenant1.global.vespa.oath.cloud" ); var west = Sets.union(notWest, Set.of("rotation-id-04", "west.app1.tenant1.global.vespa.oath.cloud")); for (Deployment deployment : deployments) { assertEquals(ZoneId.from("prod.us-west-1").equals(deployment.zone()) ? west : notWest, tester.configServer().containerEndpointNames(context.deploymentIdIn(deployment.zone())), "Rotation names are passed to config server in " + deployment.zone()); } context.flushDnsUpdates(); assertEquals(4, tester.controllerTester().nameService().records().size()); var record1 = tester.controllerTester().findCname("app1.tenant1.global.vespa.oath.cloud"); assertTrue(record1.isPresent()); assertEquals("app1.tenant1.global.vespa.oath.cloud", record1.get().name().asString()); assertEquals("rotation-fqdn-02.", record1.get().data().asString()); var record2 = tester.controllerTester().findCname("foobar.app1.tenant1.global.vespa.oath.cloud"); assertTrue(record2.isPresent()); assertEquals("foobar.app1.tenant1.global.vespa.oath.cloud", record2.get().name().asString()); assertEquals("rotation-fqdn-01.", record2.get().data().asString()); var record3 = tester.controllerTester().findCname("all.app1.tenant1.global.vespa.oath.cloud"); assertTrue(record3.isPresent()); assertEquals("all.app1.tenant1.global.vespa.oath.cloud", record3.get().name().asString()); assertEquals("rotation-fqdn-03.", record3.get().data().asString()); var record4 = tester.controllerTester().findCname("west.app1.tenant1.global.vespa.oath.cloud"); assertTrue(record4.isPresent()); assertEquals("west.app1.tenant1.global.vespa.oath.cloud", record4.get().name().asString()); assertEquals("rotation-fqdn-04.", record4.get().data().asString()); } @Test void testDnsUpdatesForGlobalEndpointChanges() { var context = tester.newDeploymentContext("tenant1", "app1", "default"); var west = ZoneId.from("prod", "us-west-1"); var central = ZoneId.from("prod", "us-central-1"); var east = ZoneId.from("prod", "us-east-3"); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .endpoint("default", "qrs", west.region().value(), central.region().value()) .region(west.region().value()) .region(central.region().value()) .region(east.region().value()) .build(); context.submit(applicationPackage).deploy(); for (var zone : List.of(west, central)) { assertEquals( Set.of("rotation-id-01", "app1.tenant1.global.vespa.oath.cloud"), tester.configServer().containerEndpointNames(context.deploymentIdIn(zone)) , "Zone " + zone + " is a member of global endpoint"); } ApplicationPackage applicationPackage2 = new ApplicationPackageBuilder() .endpoint("default", "qrs", west.region().value(), central.region().value()) .endpoint("east", "qrs", east.region().value()) .region(west.region().value()) .region(central.region().value()) .region(east.region().value()) .build(); context.submit(applicationPackage2).deploy(); for (var zone : List.of(west, central)) { assertEquals( Set.of("rotation-id-01", "app1.tenant1.global.vespa.oath.cloud"), tester.configServer().containerEndpointNames(context.deploymentIdIn(zone)) , "Zone " + zone + " is a member of global endpoint"); } assertEquals( Set.of("rotation-id-02", "east.app1.tenant1.global.vespa.oath.cloud"), tester.configServer().containerEndpointNames(context.deploymentIdIn(east)) , "Zone " + east + " is a member of global endpoint"); ApplicationPackage applicationPackage3 = new ApplicationPackageBuilder() .endpoint("default", "qrs", west.region().value(), central.region().value(), east.region().value()) .endpoint("east", "qrs", east.region().value()) .region(west.region().value()) .region(central.region().value()) .region(east.region().value()) .build(); context.submit(applicationPackage3).deploy(); for (var zone : List.of(west, central, east)) { assertEquals( zone.equals(east) ? Set.of("rotation-id-01", "app1.tenant1.global.vespa.oath.cloud", "rotation-id-02", "east.app1.tenant1.global.vespa.oath.cloud") : Set.of("rotation-id-01", "app1.tenant1.global.vespa.oath.cloud"), tester.configServer().containerEndpointNames(context.deploymentIdIn(zone)) , "Zone " + zone + " is a member of global endpoint"); } ApplicationPackage applicationPackage4 = new ApplicationPackageBuilder() .endpoint("default", "qrs", west.region().value(), central.region().value()) .endpoint("east", "qrs", east.region().value()) .region(west.region().value()) .region(central.region().value()) .region(east.region().value()) .build(); try { context.submit(applicationPackage4); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("global-endpoint-change: application 'tenant1.app1' has endpoints " + "[endpoint 'default' (cluster qrs) -> us-central-1, us-east-3, us-west-1, endpoint 'east' (cluster qrs) -> us-east-3], " + "but does not include all of these in deployment.xml. Deploying given deployment.xml " + "will remove [endpoint 'default' (cluster qrs) -> us-central-1, us-east-3, us-west-1] " + "and add [endpoint 'default' (cluster qrs) -> us-central-1, us-west-1]. " + ValidationOverrides.toAllowMessage(ValidationId.globalEndpointChange), e.getMessage()); } ApplicationPackage applicationPackage5 = new ApplicationPackageBuilder() .endpoint("east", "qrs", east.region().value()) .region(west.region().value()) .region(central.region().value()) .region(east.region().value()) .build(); try { context.submit(applicationPackage5); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("global-endpoint-change: application 'tenant1.app1' has endpoints " + "[endpoint 'default' (cluster qrs) -> us-central-1, us-east-3, us-west-1, endpoint 'east' (cluster qrs) -> us-east-3], " + "but does not include all of these in deployment.xml. Deploying given deployment.xml " + "will remove [endpoint 'default' (cluster qrs) -> us-central-1, us-east-3, us-west-1]. " + ValidationOverrides.toAllowMessage(ValidationId.globalEndpointChange), e.getMessage()); } ApplicationPackage applicationPackage6 = new ApplicationPackageBuilder() .endpoint("east", "qrs", east.region().value()) .region(west.region().value()) .region(central.region().value()) .region(east.region().value()) .allow(ValidationId.globalEndpointChange) .build(); context.submit(applicationPackage6); } @Test void testUnassignRotations() { var context = tester.newDeploymentContext(); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .endpoint("default", "qrs", "us-west-1", "us-central-1") .region("us-west-1") .region("us-central-1") .build(); context.submit(applicationPackage).deploy(); ApplicationPackage applicationPackage2 = new ApplicationPackageBuilder() .region("us-west-1") .region("us-central-1") .allow(ValidationId.globalEndpointChange) .build(); context.submit(applicationPackage2).deploy(); assertEquals(List.of(), context.instance().rotations()); assertEquals( Set.of(), tester.configServer().containerEndpoints().get(context.deploymentIdIn(ZoneId.from("prod", "us-west-1"))) ); } @Test void testDnsUpdatesWithChangeInRotationAssignment() { String dnsName1 = "app1.tenant1.global.vespa.oath.cloud"; { var context = tester.newDeploymentContext("tenant1", "app1", "default"); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .endpoint("default", "foo") .region("us-west-1") .region("us-central-1") .build(); context.submit(applicationPackage).deploy(); assertEquals(1, tester.controllerTester().nameService().records().size()); { Optional<Record> record = tester.controllerTester().findCname(dnsName1); assertTrue(record.isPresent()); assertEquals(dnsName1, record.get().name().asString()); assertEquals("rotation-fqdn-01.", record.get().data().asString()); } applicationPackage = new ApplicationPackageBuilder() .allow(ValidationId.deploymentRemoval) .allow(ValidationId.globalEndpointChange) .build(); context.submit(applicationPackage); tester.applications().deleteApplication(context.application().id(), tester.controllerTester().credentialsFor(context.application().id().tenant())); try (RotationLock lock = tester.controller().routing().rotations().lock()) { assertTrue(tester.controller().routing().rotations().availableRotations(lock) .containsKey(new RotationId("rotation-id-01")), "Rotation is unassigned"); } context.flushDnsUpdates(); Optional<Record> record = tester.controllerTester().findCname(dnsName1); assertTrue(record.isEmpty(), dnsName1 + " is removed"); } String dnsName2 = "app2.tenant2.global.vespa.oath.cloud"; { var context = tester.newDeploymentContext("tenant2", "app2", "default"); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .endpoint("default", "foo") .region("us-west-1") .region("us-central-1") .build(); context.submit(applicationPackage).deploy(); assertEquals(1, tester.controllerTester().nameService().records().size()); var record = tester.controllerTester().findCname(dnsName2); assertTrue(record.isPresent()); assertEquals(dnsName2, record.get().name().asString()); assertEquals("rotation-fqdn-01.", record.get().data().asString()); } { var context = tester.newDeploymentContext("tenant1", "app1", "default"); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .endpoint("default", "foo") .region("us-west-1") .region("us-central-1") .build(); context.submit(applicationPackage).deploy(); assertEquals("rotation-id-02", context.instance().rotations().get(0).rotationId().asString()); assertEquals(2, tester.controllerTester().nameService().records().size()); var record1 = tester.controllerTester().findCname(dnsName1); assertTrue(record1.isPresent()); assertEquals("rotation-fqdn-02.", record1.get().data().asString()); var record2 = tester.controllerTester().findCname(dnsName2); assertTrue(record2.isPresent()); assertEquals("rotation-fqdn-01.", record2.get().data().asString()); } } @Test void testDnsUpdatesForApplicationEndpoint() { ApplicationId beta = ApplicationId.from("tenant1", "app1", "beta"); ApplicationId main = ApplicationId.from("tenant1", "app1", "main"); var context = tester.newDeploymentContext(beta); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .instances("beta,main") .region("us-west-1") .region("us-east-3") .applicationEndpoint("a", "default", "us-west-1", Map.of(beta.instance(), 2, main.instance(), 8)) .applicationEndpoint("b", "default", "us-west-1", Map.of(beta.instance(), 1, main.instance(), 1)) .applicationEndpoint("c", "default", "us-east-3", Map.of(beta.instance(), 4, main.instance(), 6)) .build(); context.submit(applicationPackage).deploy(); ZoneId usWest = ZoneId.from("prod", "us-west-1"); ZoneId usEast = ZoneId.from("prod", "us-east-3"); Map<DeploymentId, Map<String, Integer>> deploymentEndpoints = Map.of( new DeploymentId(beta, usWest), Map.of("a.app1.tenant1.us-west-1-r.vespa.oath.cloud", 2, "b.app1.tenant1.us-west-1-r.vespa.oath.cloud", 1), new DeploymentId(main, usWest), Map.of("a.app1.tenant1.us-west-1-r.vespa.oath.cloud", 8, "b.app1.tenant1.us-west-1-r.vespa.oath.cloud", 1), new DeploymentId(beta, usEast), Map.of("c.app1.tenant1.us-east-3-r.vespa.oath.cloud", 4), new DeploymentId(main, usEast), Map.of("c.app1.tenant1.us-east-3-r.vespa.oath.cloud", 6) ); deploymentEndpoints.forEach((deployment, endpoints) -> { Set<ContainerEndpoint> expected = endpoints.entrySet().stream() .map(kv -> new ContainerEndpoint("default", "application", List.of(kv.getKey()), OptionalInt.of(kv.getValue()), RoutingMethod.sharedLayer4)) .collect(Collectors.toSet()); assertEquals(expected, tester.configServer().containerEndpoints().get(deployment), "Endpoint names for " + deployment + " are passed to config server"); }); context.flushDnsUpdates(); Set<Record> records = tester.controllerTester().nameService().records(); assertEquals(Set.of(new Record(Record.Type.CNAME, RecordName.from("a.app1.tenant1.us-west-1-r.vespa.oath.cloud"), RecordData.from("vip.prod.us-west-1.")), new Record(Record.Type.CNAME, RecordName.from("b.app1.tenant1.us-west-1-r.vespa.oath.cloud"), RecordData.from("vip.prod.us-west-1.")), new Record(Record.Type.CNAME, RecordName.from("c.app1.tenant1.us-east-3-r.vespa.oath.cloud"), RecordData.from("vip.prod.us-east-3."))), records); List<String> endpointDnsNames = tester.controller().routing().declaredEndpointsOf(context.application()) .scope(Endpoint.Scope.application) .mapToList(Endpoint::dnsName); assertEquals(List.of("a.app1.tenant1.us-west-1-r.vespa.oath.cloud", "b.app1.tenant1.us-west-1-r.vespa.oath.cloud", "c.app1.tenant1.us-east-3-r.vespa.oath.cloud"), endpointDnsNames); } @Test @Test void testDevDeploymentWithIncompatibleVersions() { Version version1 = new Version("7"); Version version2 = new Version("7.5"); Version version3 = new Version("8"); var context = tester.newDeploymentContext(); tester.controllerTester().flagSource().withListFlag(PermanentFlags.INCOMPATIBLE_VERSIONS.id(), List.of("8"), String.class); tester.controllerTester().upgradeSystem(version2); ZoneId zone = ZoneId.from("dev", "us-east-1"); context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version1).build()); assertEquals(version2, context.deployment(zone).version()); assertEquals(Optional.of(version1), context.application().revisions().get(context.deployment(zone).revision()).compileVersion()); try { context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version1).majorVersion(8).build()); fail("Should fail when specifying a major that does not yet exist"); } catch (IllegalArgumentException e) { assertEquals("no platforms were found for major version 8 specified in deployment.xml", e.getMessage()); } try { context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version3).build()); fail("Should fail when compiled against a version which is only compatible with not-yet-existent versions"); } catch (IllegalArgumentException e) { assertEquals("no platforms are compatible with compile version 8", e.getMessage()); } tester.controllerTester().upgradeSystem(version3); try { context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version1).majorVersion(8).build()); fail("Should fail when specifying a major which is incompatible with compile version"); } catch (IllegalArgumentException e) { assertEquals("no platforms on major version 8 specified in deployment.xml are compatible with compile version 7", e.getMessage()); } context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version3).majorVersion(8).build()); assertEquals(version3, context.deployment(zone).version()); assertEquals(Optional.of(version3), context.application().revisions().get(context.deployment(zone).revision()).compileVersion()); context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version3).build()); assertEquals(version3, context.deployment(zone).version()); assertEquals(Optional.of(version3), context.application().revisions().get(context.deployment(zone).revision()).compileVersion()); } @Test void testSuspension() { var context = tester.newDeploymentContext(); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .region("us-west-1") .region("us-east-3") .build(); context.submit(applicationPackage).deploy(); DeploymentId deployment1 = context.deploymentIdIn(ZoneId.from(Environment.prod, RegionName.from("us-west-1"))); DeploymentId deployment2 = context.deploymentIdIn(ZoneId.from(Environment.prod, RegionName.from("us-east-3"))); assertFalse(tester.configServer().isSuspended(deployment1)); assertFalse(tester.configServer().isSuspended(deployment2)); tester.configServer().setSuspension(deployment1, true); assertTrue(tester.configServer().isSuspended(deployment1)); assertFalse(tester.configServer().isSuspended(deployment2)); } @Test void testDeletingApplicationThatHasAlreadyBeenDeleted() { var context = tester.newDeploymentContext(); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .region("us-west-1") .build(); ZoneId zone = ZoneId.from(Environment.prod, RegionName.from("us-west-1")); context.submit(applicationPackage).runJob(zone, applicationPackage); tester.controller().applications().deactivate(context.instanceId(), zone); tester.controller().applications().deactivate(context.instanceId(), zone); } @Test void testDeployApplicationWithWarnings() { var context = tester.newDeploymentContext(); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .region("us-west-1") .build(); ZoneId zone = ZoneId.from("prod", "us-west-1"); int warnings = 3; tester.configServer().generateWarnings(context.deploymentIdIn(zone), warnings); context.submit(applicationPackage).deploy(); assertEquals(warnings, context.deployment(zone) .metrics().warnings().get(DeploymentMetrics.Warning.all).intValue()); } @Test void testDeploySelectivelyProvisionsCertificate() { Function<Instance, Optional<EndpointCertificateMetadata>> certificate = (application) -> tester.controller().curator().readEndpointCertificateMetadata(application.id()); var context1 = tester.newDeploymentContext("tenant1", "app1", "default"); var prodZone = ZoneId.from("prod", "us-west-1"); var stagingZone = ZoneId.from("staging", "us-east-3"); var testZone = ZoneId.from("test", "us-east-1"); tester.controllerTester().zoneRegistry().exclusiveRoutingIn(ZoneApiMock.from(prodZone)); var applicationPackage = new ApplicationPackageBuilder().athenzIdentity(AthenzDomain.from("domain"), AthenzService.from("service")) .region(prodZone.region()) .build(); context1.submit(applicationPackage).deploy(); var cert = certificate.apply(context1.instance()); assertTrue(cert.isPresent(), "Provisions certificate in " + Environment.prod); assertEquals(Stream.concat(Stream.of("vznqtz7a5ygwjkbhhj7ymxvlrekgt4l6g.vespa.oath.cloud", "app1.tenant1.global.vespa.oath.cloud", "*.app1.tenant1.global.vespa.oath.cloud"), Stream.of(prodZone, testZone, stagingZone) .flatMap(zone -> Stream.of("", "*.") .map(prefix -> prefix + "app1.tenant1." + zone.region().value() + (zone.environment() == Environment.prod ? "" : "." + zone.environment().value()) + ".vespa.oath.cloud"))) .collect(Collectors.toUnmodifiableSet()), Set.copyOf(tester.controllerTester().serviceRegistry().endpointCertificateMock().dnsNamesOf(context1.instanceId()))); context1.submit(applicationPackage).deploy(); assertEquals(cert, certificate.apply(context1.instance())); var context2 = tester.newDeploymentContext("tenant1", "app2", "default"); var devZone = ZoneId.from("dev", "us-east-1"); context2.runJob(devZone, applicationPackage); assertTrue(tester.configServer().application(context2.instanceId(), devZone).get().activated(), "Application deployed and activated"); assertTrue(certificate.apply(context2.instance()).isPresent(), "Provisions certificate also in zone with routing layer"); } @Test void testDeployWithGlobalEndpointsInMultipleClouds() { tester.controllerTester().zoneRegistry().setZones( ZoneApiMock.fromId("test.us-west-1"), ZoneApiMock.fromId("staging.us-west-1"), ZoneApiMock.fromId("prod.us-west-1"), ZoneApiMock.newBuilder().with(CloudName.AWS).withId("prod.aws-us-east-1").build() ); var context = tester.newDeploymentContext(); var applicationPackage = new ApplicationPackageBuilder() .region("aws-us-east-1") .region("us-west-1") .endpoint("default", "default") .build(); try { context.submit(applicationPackage); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("Endpoint 'default' in instance 'default' cannot contain regions in different clouds: [aws-us-east-1, us-west-1]", e.getMessage()); } var applicationPackage2 = new ApplicationPackageBuilder() .region("aws-us-east-1") .region("us-west-1") .endpoint("aws", "default", "aws-us-east-1") .endpoint("foo", "default", "aws-us-east-1", "us-west-1") .build(); try { context.submit(applicationPackage2); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("Endpoint 'foo' in instance 'default' cannot contain regions in different clouds: [aws-us-east-1, us-west-1]", e.getMessage()); } } @Test void testDeployWithGlobalEndpointsInGcp() { tester.controllerTester().zoneRegistry().setZones( ZoneApiMock.fromId("test.us-west-1"), ZoneApiMock.fromId("staging.us-west-1"), ZoneApiMock.newBuilder().with(CloudName.GCP).withId("prod.gcp-us-east1-b").build() ); var context = tester.newDeploymentContext(); var applicationPackage = new ApplicationPackageBuilder() .region("gcp-us-east1-b") .endpoint("default", "default") .build(); try { context.submit(applicationPackage); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("Endpoint 'default' in instance 'default' contains a Google Cloud region (gcp-us-east1-b), which is not yet supported", e.getMessage()); } var applicationPackage2 = new ApplicationPackageBuilder() .region("gcp-us-east1-b") .endpoint("gcp", "default", "gcp-us-east1-b") .build(); try { context.submit(applicationPackage2); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("Endpoint 'gcp' in instance 'default' contains a Google Cloud region (gcp-us-east1-b), which is not yet supported", e.getMessage()); } } @Test void testDeployWithoutSourceRevision() { var context = tester.newDeploymentContext(); var applicationPackage = new ApplicationPackageBuilder() .upgradePolicy("default") .region("us-west-1") .build(); context.submit(applicationPackage, Optional.empty()) .deploy(); assertEquals(1, context.instance().deployments().size(), "Deployed application"); } @Test void testDeployWithGlobalEndpointsAndMultipleRoutingMethods() { var context = tester.newDeploymentContext(); var zone1 = ZoneId.from("prod", "us-west-1"); var zone2 = ZoneId.from("prod", "us-east-3"); var applicationPackage = new ApplicationPackageBuilder() .athenzIdentity(AthenzDomain.from("domain"), AthenzService.from("service")) .endpoint("default", "default", zone1.region().value(), zone2.region().value()) .endpoint("east", "default", zone2.region().value()) .region(zone1.region()) .region(zone2.region()) .build(); tester.controllerTester().zoneRegistry().setRoutingMethod(ZoneApiMock.from(zone1), RoutingMethod.sharedLayer4); tester.controllerTester().zoneRegistry().setRoutingMethod(ZoneApiMock.from(zone2), RoutingMethod.exclusive); context.submit(applicationPackage).deploy(); var expectedRecords = List.of( new Record(Record.Type.ALIAS, RecordName.from("application.tenant.us-east-3-w.vespa.oath.cloud"), new WeightedAliasTarget(HostName.of("lb-0--tenant.application.default--prod.us-east-3"), "dns-zone-1", ZoneId.from("prod.us-east-3"), 1).pack()), new Record(Record.Type.ALIAS, RecordName.from("east.application.tenant.global.vespa.oath.cloud"), new LatencyAliasTarget(HostName.of("application.tenant.us-east-3-w.vespa.oath.cloud"), "dns-zone-1", ZoneId.from("prod.us-east-3")).pack()), new Record(Record.Type.CNAME, RecordName.from("application.tenant.us-east-3.vespa.oath.cloud"), RecordData.from("lb-0--tenant.application.default--prod.us-east-3."))); assertEquals(expectedRecords, List.copyOf(tester.controllerTester().nameService().records())); } @Test void testDeploymentDirectRouting() { DeploymentTester tester = new DeploymentTester(new ControllerTester(new RotationsConfig.Builder().build(), main)); var context = tester.newDeploymentContext(); var zone1 = ZoneId.from("prod", "us-west-1"); var zone2 = ZoneId.from("prod", "us-east-3"); var zone3 = ZoneId.from("prod", "eu-west-1"); tester.controllerTester().zoneRegistry() .exclusiveRoutingIn(ZoneApiMock.from(zone1), ZoneApiMock.from(zone2), ZoneApiMock.from(zone3)); var applicationPackageBuilder = new ApplicationPackageBuilder() .region(zone1.region()) .region(zone2.region()) .region(zone3.region()) .endpoint("default", "default") .endpoint("foo", "qrs") .endpoint("us", "default", zone1.region().value(), zone2.region().value()) .athenzIdentity(AthenzDomain.from("domain"), AthenzService.from("service")); context.submit(applicationPackageBuilder.build()).deploy(); for (var zone : List.of(zone1, zone2)) { assertEquals(Set.of("application.tenant.global.vespa.oath.cloud", "foo.application.tenant.global.vespa.oath.cloud", "us.application.tenant.global.vespa.oath.cloud"), tester.configServer().containerEndpointNames(context.deploymentIdIn(zone)), "Expected container endpoints in " + zone); } assertEquals(Set.of("application.tenant.global.vespa.oath.cloud", "foo.application.tenant.global.vespa.oath.cloud"), tester.configServer().containerEndpointNames(context.deploymentIdIn(zone3)), "Expected container endpoints in " + zone3); } @Test void testChangeEndpointCluster() { var context = tester.newDeploymentContext(); var west = ZoneId.from("prod", "us-west-1"); var east = ZoneId.from("prod", "us-east-3"); var applicationPackage = new ApplicationPackageBuilder() .endpoint("default", "foo") .region(west.region().value()) .region(east.region().value()) .build(); context.submit(applicationPackage).deploy(); assertEquals(ClusterSpec.Id.from("foo"), tester.applications().requireInstance(context.instanceId()) .rotations().get(0).clusterId()); applicationPackage = new ApplicationPackageBuilder() .endpoint("default", "bar") .region(west.region().value()) .region(east.region().value()) .build(); try { context.submit(applicationPackage).deploy(); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("global-endpoint-change: application 'tenant.application' has endpoints [endpoint " + "'default' (cluster foo) -> us-east-3, us-west-1], but does not include all of these in " + "deployment.xml. Deploying given deployment.xml will remove " + "[endpoint 'default' (cluster foo) -> us-east-3, us-west-1] and add " + "[endpoint 'default' (cluster bar) -> us-east-3, us-west-1]. To allow this add " + "<allow until='yyyy-mm-dd'>global-endpoint-change</allow> to validation-overrides.xml, see " + "https: } applicationPackage = new ApplicationPackageBuilder() .endpoint("default", "bar") .region(west.region().value()) .region(east.region().value()) .allow(ValidationId.globalEndpointChange) .build(); context.submit(applicationPackage).deploy(); assertEquals(ClusterSpec.Id.from("bar"), tester.applications().requireInstance(context.instanceId()) .rotations().get(0).clusterId()); } @Test void testReadableApplications() { var db = new MockCuratorDb(tester.controller().system()); var tester = new DeploymentTester(new ControllerTester(db)); var app1 = tester.newDeploymentContext("t1", "a1", "default") .submit() .deploy(); var app2 = tester.newDeploymentContext("t2", "a2", "default") .submit() .deploy(); assertEquals(2, tester.applications().readable().size()); db.curator().set(Path.fromString("/controller/v1/applications/" + app2.application().id().serialized()), new byte[]{(byte) 0xDE, (byte) 0xAD}); assertEquals(1, tester.applications().readable().size()); try { tester.applications().asList(); fail("Expected exception"); } catch (Exception ignored) { } app1.submit().deploy(); } @Test void testClashingEndpointIdAndInstanceName() { String deploymentXml = "<deployment version='1.0' athenz-domain='domain' athenz-service='service'>\n" + " <instance id=\"default\">\n" + " <prod>\n" + " <region active=\"true\">us-west-1</region>\n" + " </prod>\n" + " <endpoints>\n" + " <endpoint id=\"dev\" container-id=\"qrs\"/>\n" + " </endpoints>\n" + " </instance>\n" + " <instance id=\"dev\">\n" + " <prod>\n" + " <region active=\"true\">us-west-1</region>\n" + " </prod>\n" + " <endpoints>\n" + " <endpoint id=\"default\" container-id=\"qrs\"/>\n" + " </endpoints>\n" + " </instance>\n" + "</deployment>\n"; ApplicationPackage applicationPackage = ApplicationPackageBuilder.fromDeploymentXml(deploymentXml); try { tester.newDeploymentContext().submit(applicationPackage); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("Endpoint with ID 'default' in instance 'dev' clashes with endpoint 'dev' in instance 'default'", e.getMessage()); } } @Test void testTestPackageWarnings() { String deploymentXml = "<deployment version='1.0'>\n" + " <prod>\n" + " <region>us-west-1</region>\n" + " </prod>\n" + "</deployment>\n"; ApplicationPackage applicationPackage = ApplicationPackageBuilder.fromDeploymentXml(deploymentXml); byte[] testPackage = ApplicationPackage.filesZip(Map.of("tests/staging-test/foo.json", new byte[0])); var app = tester.newDeploymentContext(); tester.jobs().submit(app.application().id(), Submission.basic(applicationPackage, testPackage), 1); assertEquals(List.of(new Notification(tester.clock().instant(), Type.testPackage, Level.warning, NotificationSource.from(app.application().id()), List.of("test package has staging tests, so it should also include staging setup", "see https: tester.controller().notificationsDb().listNotifications(NotificationSource.from(app.application().id()), true)); } @Test void testCompileVersion() { DeploymentContext context = tester.newDeploymentContext(); ApplicationPackage applicationPackage = new ApplicationPackageBuilder().region("us-west-1").build(); TenantAndApplicationId application = TenantAndApplicationId.from(context.instanceId()); Version version0 = Version.fromString("7.1"); tester.controllerTester().upgradeSystem(version0); tester.upgrader().overrideConfidence(version0, Confidence.normal); tester.controllerTester().computeVersionStatus(); assertEquals(version0, tester.applications().compileVersion(application, OptionalInt.of(7))); assertEquals(version0, tester.applications().compileVersion(application, OptionalInt.empty())); assertEquals("this system has no available versions on specified major: 8", assertThrows(IllegalArgumentException.class, () -> tester.applications().compileVersion(application, OptionalInt.of(8))) .getMessage()); context.submit(applicationPackage).deploy(); Version version1 = Version.fromString("7.2"); tester.controllerTester().upgradeSystem(version1); tester.upgrader().overrideConfidence(version1, Confidence.normal); tester.controllerTester().computeVersionStatus(); assertEquals(version0, tester.applications().compileVersion(application, OptionalInt.empty())); tester.upgrader().maintain(); context.deployPlatform(version1); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty())); DeploymentContext legacyApp = tester.newDeploymentContext("avoid", "gc", "default").submit().deploy(); TenantAndApplicationId newApp = TenantAndApplicationId.from("new", "app"); Version version2 = Version.fromString("8.0"); tester.controllerTester().upgradeSystem(version2); tester.upgrader().overrideConfidence(version2, Confidence.low); tester.controllerTester().computeVersionStatus(); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty())); assertEquals("this system has no available versions on specified major: 8", assertThrows(IllegalArgumentException.class, () -> tester.applications().compileVersion(application, OptionalInt.of(8))) .getMessage()); tester.upgrader().overrideConfidence(version2, Confidence.normal); tester.controllerTester().computeVersionStatus(); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty())); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(8))); assertEquals(version2, tester.applications().compileVersion(newApp, OptionalInt.empty())); tester.controllerTester().flagSource().withListFlag(PermanentFlags.INCOMPATIBLE_VERSIONS.id(), List.of("8"), String.class); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7))); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty())); assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.of(8))); assertEquals(version2, tester.applications().compileVersion(newApp, OptionalInt.empty())); tester.upgrader().overrideConfidence(version2, Confidence.low); tester.controllerTester().computeVersionStatus(); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7))); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty())); assertEquals("this system has no available versions on specified major: 8", assertThrows(IllegalArgumentException.class, () -> tester.applications().compileVersion(application, OptionalInt.of(8))) .getMessage()); assertEquals(version1, tester.applications().compileVersion(newApp, OptionalInt.empty())); assertEquals(version1, tester.applications().compileVersion(newApp, OptionalInt.empty())); tester.upgrader().overrideConfidence(version2, Confidence.normal); tester.controllerTester().computeVersionStatus(); context.submit(new ApplicationPackageBuilder().region("us-west-1").compileVersion(version2).build()).deploy(); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7))); assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.empty())); tester.upgrader().overrideConfidence(version2, Confidence.low); tester.controllerTester().computeVersionStatus(); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7))); assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.empty())); assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.of(8))); tester.upgrader().overrideConfidence(version2, Confidence.broken); tester.controllerTester().computeVersionStatus(); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7))); assertEquals("no suitable, released compile version exists", assertThrows(IllegalArgumentException.class, () -> tester.applications().compileVersion(application, OptionalInt.empty())) .getMessage()); assertEquals("no suitable, released compile version exists for specified major: 8", assertThrows(IllegalArgumentException.class, () -> tester.applications().compileVersion(application, OptionalInt.of(8))) .getMessage()); tester.controllerTester().flagSource().withListFlag(PermanentFlags.INCOMPATIBLE_VERSIONS.id(), List.of(), String.class); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7))); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty())); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(8))); tester.upgrader().overrideConfidence(version2, Confidence.low); tester.controllerTester().computeVersionStatus(); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7))); assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.empty())); assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.of(8))); } @Test void testCloudAccount() { DeploymentContext context = tester.newDeploymentContext(); ZoneId devZone = devUsEast1.zone(); ZoneId prodZone = productionUsWest1.zone(); String cloudAccount = "012345678912"; var applicationPackage = new ApplicationPackageBuilder() .cloudAccount(cloudAccount) .region(prodZone.region()) .build(); context.submit(applicationPackage).runJobExpectingFailure(systemTest, "Requested cloud account '012345678912' is not valid for tenant 'tenant'"); tester.controllerTester().flagSource().withListFlag(PermanentFlags.CLOUD_ACCOUNTS.id(), List.of(cloudAccount), String.class); context.runJobExpectingFailure(systemTest, "Zone test.us-east-1 is not configured in requested cloud account '012345678912'") .abortJob(stagingTest); tester.controllerTester().zoneRegistry().configureCloudAccount(new CloudAccount(cloudAccount), systemTest.zone(), stagingTest.zone(), prodZone); context.submit(applicationPackage).deploy(); tester.controllerTester().zoneRegistry().configureCloudAccount(new CloudAccount(cloudAccount), devZone); context.runJob(devZone, applicationPackage); for (var zoneId : List.of(systemTest.zone(), stagingTest.zone(), devZone, prodZone)) { assertEquals(cloudAccount, tester.controllerTester().configServer() .cloudAccount(context.deploymentIdIn(zoneId)) .get().value()); } } @Test void testSubmitWithElementDeprecatedOnPreviousMajor() { DeploymentContext context = tester.newDeploymentContext(); var applicationPackage = new ApplicationPackageBuilder() .compileVersion(Version.fromString("8.1")) .region("us-west-1") .globalServiceId("qrs") .build(); try { context.submit(applicationPackage).deploy(); fail("Expected exception"); } catch (IllegalArgumentException e) { assertTrue(e.getMessage().contains("Element 'prod' contains attribute 'global-service-id' deprecated since major version 7")); } } }
class ControllerTest { private final DeploymentTester tester = new DeploymentTester(); @Test void testDeployment() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .explicitEnvironment(Environment.dev, Environment.perf) .region("us-west-1") .region("us-east-3") .build(); Version version1 = tester.configServer().initialVersion(); var context = tester.newDeploymentContext(); context.submit(applicationPackage); assertEquals(ApplicationVersion.from(RevisionId.forProduction(1), DeploymentContext.defaultSourceRevision, "a@b", new Version("6.1"), Instant.ofEpochSecond(1)), context.application().revisions().get(context.instance().change().revision().get()), "Application version is known from completion of initial job"); context.runJob(systemTest); context.runJob(stagingTest); RevisionId applicationVersion = context.instance().change().revision().get(); assertTrue(applicationVersion.isProduction(), "Application version has been set during deployment"); tester.triggerJobs(); tester.clock().advance(Duration.ofSeconds(1)); context.timeOutUpgrade(productionUsWest1); assertEquals(4, context.instanceJobs().size()); tester.triggerJobs(); tester.controllerTester().createNewController(); assertNotNull(tester.controller().tenants().get(TenantName.from("tenant1"))); assertNotNull(tester.controller().applications().requireInstance(context.instanceId())); context.submit(applicationPackage); context.runJob(systemTest); context.runJob(stagingTest); context.triggerJobs().jobAborted(productionUsWest1); context.runJob(productionUsWest1); tester.triggerJobs(); context.runJob(productionUsEast3); assertEquals(4, context.instanceJobs().size()); applicationPackage = new ApplicationPackageBuilder() .instances("hellO") .build(); try { context.submit(applicationPackage); fail("Expected exception due to illegal deployment spec."); } catch (IllegalArgumentException e) { assertEquals("Invalid id 'hellO'. Tenant, application and instance names must start with a letter, may contain no more than 20 characters, and may only contain lowercase letters, digits or dashes, but no double-dashes.", e.getMessage()); } applicationPackage = new ApplicationPackageBuilder() .region("deep-space-9") .build(); try { context.submit(applicationPackage); fail("Expected exception due to illegal deployment spec."); } catch (IllegalArgumentException e) { assertEquals("Zone prod.deep-space-9 in deployment spec was not found in this system!", e.getMessage()); } applicationPackage = new ApplicationPackageBuilder() .region("us-east-3") .build(); try { assertTrue(context.instance().deployments().containsKey(ZoneId.from("prod", "us-west-1"))); context.submit(applicationPackage); fail("Expected exception due to illegal production deployment removal"); } catch (IllegalArgumentException e) { assertEquals("deployment-removal: application 'tenant.application' is deployed in us-west-1, but does not include this zone in deployment.xml. " + ValidationOverrides.toAllowMessage(ValidationId.deploymentRemoval), e.getMessage()); } assertNotNull(context.instance().deployments().get(productionUsWest1.zone()), "Zone was not removed"); applicationPackage = new ApplicationPackageBuilder() .allow(ValidationId.deploymentRemoval) .upgradePolicy("default") .region("us-east-3") .build(); context.submit(applicationPackage); assertNull(context.instance().deployments().get(productionUsWest1.zone()), "Zone was removed"); assertNull(context.instanceJobs().get(productionUsWest1), "Deployment job was removed"); assertNotNull(tester.controllerTester().serviceRegistry().applicationStore() .getMeta(context.instanceId()) .get(tester.clock().instant())); tester.clock().advance(Duration.ofSeconds(1)); context.submit(ApplicationPackage.deploymentRemoval()); tester.clock().advance(Duration.ofSeconds(1)); context.submit(ApplicationPackage.deploymentRemoval()); tester.applications().deleteApplication(context.application().id(), tester.controllerTester().credentialsFor(context.instanceId().tenant())); assertArrayEquals(new byte[0], tester.controllerTester().serviceRegistry().applicationStore() .getMeta(context.instanceId()) .get(tester.clock().instant())); assertNull(tester.controllerTester().serviceRegistry().applicationStore() .getMeta(context.deploymentIdIn(productionUsWest1.zone()))); } @Test void testGlobalRotationStatus() { var context = tester.newDeploymentContext(); var zone1 = ZoneId.from("prod", "us-west-1"); var zone2 = ZoneId.from("prod", "us-east-3"); var applicationPackage = new ApplicationPackageBuilder() .region(zone1.region()) .region(zone2.region()) .endpoint("default", "default", zone1.region().value(), zone2.region().value()) .build(); context.submit(applicationPackage).deploy(); var deployment1 = context.deploymentIdIn(zone1); DeploymentRoutingContext routingContext = tester.controller().routing().of(deployment1); RoutingStatus status1 = routingContext.routingStatus(); assertEquals(RoutingStatus.Value.in, status1.value()); routingContext.setRoutingStatus(RoutingStatus.Value.out, RoutingStatus.Agent.operator); RoutingStatus status2 = routingContext.routingStatus(); assertEquals(RoutingStatus.Value.out, status2.value()); RoutingStatus status3 = tester.controller().routing().of(context.deploymentIdIn(zone2)).routingStatus(); assertEquals(RoutingStatus.Value.in, status3.value()); } @Test void testDnsUpdatesForGlobalEndpoint() { var betaContext = tester.newDeploymentContext("tenant1", "app1", "beta"); var defaultContext = tester.newDeploymentContext("tenant1", "app1", "default"); ZoneId usWest = ZoneId.from("prod.us-west-1"); ZoneId usCentral = ZoneId.from("prod.us-central-1"); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .instances("beta,default") .endpoint("default", "foo") .region(usWest.region()) .region(usCentral.region()) .build(); tester.controllerTester().zoneRegistry().setRoutingMethod(List.of(ZoneApiMock.from(usWest), ZoneApiMock.from(usCentral)), RoutingMethod.sharedLayer4); betaContext.submit(applicationPackage).deploy(); { Collection<Deployment> betaDeployments = betaContext.instance().deployments().values(); assertFalse(betaDeployments.isEmpty()); Set<ContainerEndpoint> containerEndpoints = Set.of(new ContainerEndpoint("foo", "global", List.of("beta.app1.tenant1.global.vespa.oath.cloud", "rotation-id-01"), OptionalInt.empty(), RoutingMethod.sharedLayer4)); for (Deployment deployment : betaDeployments) { assertEquals(containerEndpoints, tester.configServer().containerEndpoints() .get(betaContext.deploymentIdIn(deployment.zone()))); } betaContext.flushDnsUpdates(); } { Collection<Deployment> defaultDeployments = defaultContext.instance().deployments().values(); assertFalse(defaultDeployments.isEmpty()); Set<ContainerEndpoint> containerEndpoints = Set.of(new ContainerEndpoint("foo", "global", List.of("app1.tenant1.global.vespa.oath.cloud", "rotation-id-02"), OptionalInt.empty(), RoutingMethod.sharedLayer4)); for (Deployment deployment : defaultDeployments) { assertEquals(containerEndpoints, tester.configServer().containerEndpoints().get(defaultContext.deploymentIdIn(deployment.zone()))); } defaultContext.flushDnsUpdates(); } Map<String, String> rotationCnames = Map.of("beta.app1.tenant1.global.vespa.oath.cloud", "rotation-fqdn-01.", "app1.tenant1.global.vespa.oath.cloud", "rotation-fqdn-02."); rotationCnames.forEach((cname, data) -> { var record = tester.controllerTester().findCname(cname); assertTrue(record.isPresent()); assertEquals(cname, record.get().name().asString()); assertEquals(data, record.get().data().asString()); }); Map<ApplicationId, Set<String>> globalDnsNamesByInstance = Map.of(betaContext.instanceId(), Set.of("beta.app1.tenant1.global.vespa.oath.cloud"), defaultContext.instanceId(), Set.of("app1.tenant1.global.vespa.oath.cloud")); globalDnsNamesByInstance.forEach((instance, dnsNames) -> { Set<String> actualDnsNames = tester.controller().routing().readDeclaredEndpointsOf(instance) .scope(Endpoint.Scope.global) .asList().stream() .map(Endpoint::dnsName) .collect(Collectors.toSet()); assertEquals(dnsNames, actualDnsNames, "Global DNS names for " + instance); }); } @Test void testDnsUpdatesForGlobalEndpointLegacySyntax() { var context = tester.newDeploymentContext("tenant1", "app1", "default"); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .globalServiceId("foo") .region("us-west-1") .region("us-central-1") .build(); context.submit(applicationPackage).deploy(); Collection<Deployment> deployments = context.instance().deployments().values(); assertFalse(deployments.isEmpty()); for (Deployment deployment : deployments) { assertEquals(Set.of("rotation-id-01", "app1.tenant1.global.vespa.oath.cloud"), tester.configServer().containerEndpointNames(context.deploymentIdIn(deployment.zone())), "Rotation names are passed to config server in " + deployment.zone()); } context.flushDnsUpdates(); assertEquals(1, tester.controllerTester().nameService().records().size()); Optional<Record> record = tester.controllerTester().findCname("app1.tenant1.global.vespa.oath.cloud"); assertTrue(record.isPresent()); assertEquals("app1.tenant1.global.vespa.oath.cloud", record.get().name().asString()); assertEquals("rotation-fqdn-01.", record.get().data().asString()); List<String> globalDnsNames = tester.controller().routing().readDeclaredEndpointsOf(context.instanceId()) .scope(Endpoint.Scope.global) .sortedBy(Comparator.comparing(Endpoint::dnsName)) .mapToList(Endpoint::dnsName); assertEquals(List.of("app1.tenant1.global.vespa.oath.cloud"), globalDnsNames); } @Test void testDnsUpdatesForMultipleGlobalEndpoints() { var context = tester.newDeploymentContext("tenant1", "app1", "default"); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .endpoint("foobar", "qrs", "us-west-1", "us-central-1") .endpoint("default", "qrs", "us-west-1", "us-central-1") .endpoint("all", "qrs") .endpoint("west", "qrs", "us-west-1") .region("us-west-1") .region("us-central-1") .build(); context.submit(applicationPackage).deploy(); Collection<Deployment> deployments = context.instance().deployments().values(); assertFalse(deployments.isEmpty()); var notWest = Set.of( "rotation-id-01", "foobar.app1.tenant1.global.vespa.oath.cloud", "rotation-id-02", "app1.tenant1.global.vespa.oath.cloud", "rotation-id-03", "all.app1.tenant1.global.vespa.oath.cloud" ); var west = Sets.union(notWest, Set.of("rotation-id-04", "west.app1.tenant1.global.vespa.oath.cloud")); for (Deployment deployment : deployments) { assertEquals(ZoneId.from("prod.us-west-1").equals(deployment.zone()) ? west : notWest, tester.configServer().containerEndpointNames(context.deploymentIdIn(deployment.zone())), "Rotation names are passed to config server in " + deployment.zone()); } context.flushDnsUpdates(); assertEquals(4, tester.controllerTester().nameService().records().size()); var record1 = tester.controllerTester().findCname("app1.tenant1.global.vespa.oath.cloud"); assertTrue(record1.isPresent()); assertEquals("app1.tenant1.global.vespa.oath.cloud", record1.get().name().asString()); assertEquals("rotation-fqdn-02.", record1.get().data().asString()); var record2 = tester.controllerTester().findCname("foobar.app1.tenant1.global.vespa.oath.cloud"); assertTrue(record2.isPresent()); assertEquals("foobar.app1.tenant1.global.vespa.oath.cloud", record2.get().name().asString()); assertEquals("rotation-fqdn-01.", record2.get().data().asString()); var record3 = tester.controllerTester().findCname("all.app1.tenant1.global.vespa.oath.cloud"); assertTrue(record3.isPresent()); assertEquals("all.app1.tenant1.global.vespa.oath.cloud", record3.get().name().asString()); assertEquals("rotation-fqdn-03.", record3.get().data().asString()); var record4 = tester.controllerTester().findCname("west.app1.tenant1.global.vespa.oath.cloud"); assertTrue(record4.isPresent()); assertEquals("west.app1.tenant1.global.vespa.oath.cloud", record4.get().name().asString()); assertEquals("rotation-fqdn-04.", record4.get().data().asString()); } @Test void testDnsUpdatesForGlobalEndpointChanges() { var context = tester.newDeploymentContext("tenant1", "app1", "default"); var west = ZoneId.from("prod", "us-west-1"); var central = ZoneId.from("prod", "us-central-1"); var east = ZoneId.from("prod", "us-east-3"); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .endpoint("default", "qrs", west.region().value(), central.region().value()) .region(west.region().value()) .region(central.region().value()) .region(east.region().value()) .build(); context.submit(applicationPackage).deploy(); for (var zone : List.of(west, central)) { assertEquals( Set.of("rotation-id-01", "app1.tenant1.global.vespa.oath.cloud"), tester.configServer().containerEndpointNames(context.deploymentIdIn(zone)) , "Zone " + zone + " is a member of global endpoint"); } ApplicationPackage applicationPackage2 = new ApplicationPackageBuilder() .endpoint("default", "qrs", west.region().value(), central.region().value()) .endpoint("east", "qrs", east.region().value()) .region(west.region().value()) .region(central.region().value()) .region(east.region().value()) .build(); context.submit(applicationPackage2).deploy(); for (var zone : List.of(west, central)) { assertEquals( Set.of("rotation-id-01", "app1.tenant1.global.vespa.oath.cloud"), tester.configServer().containerEndpointNames(context.deploymentIdIn(zone)) , "Zone " + zone + " is a member of global endpoint"); } assertEquals( Set.of("rotation-id-02", "east.app1.tenant1.global.vespa.oath.cloud"), tester.configServer().containerEndpointNames(context.deploymentIdIn(east)) , "Zone " + east + " is a member of global endpoint"); ApplicationPackage applicationPackage3 = new ApplicationPackageBuilder() .endpoint("default", "qrs", west.region().value(), central.region().value(), east.region().value()) .endpoint("east", "qrs", east.region().value()) .region(west.region().value()) .region(central.region().value()) .region(east.region().value()) .build(); context.submit(applicationPackage3).deploy(); for (var zone : List.of(west, central, east)) { assertEquals( zone.equals(east) ? Set.of("rotation-id-01", "app1.tenant1.global.vespa.oath.cloud", "rotation-id-02", "east.app1.tenant1.global.vespa.oath.cloud") : Set.of("rotation-id-01", "app1.tenant1.global.vespa.oath.cloud"), tester.configServer().containerEndpointNames(context.deploymentIdIn(zone)) , "Zone " + zone + " is a member of global endpoint"); } ApplicationPackage applicationPackage4 = new ApplicationPackageBuilder() .endpoint("default", "qrs", west.region().value(), central.region().value()) .endpoint("east", "qrs", east.region().value()) .region(west.region().value()) .region(central.region().value()) .region(east.region().value()) .build(); try { context.submit(applicationPackage4); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("global-endpoint-change: application 'tenant1.app1' has endpoints " + "[endpoint 'default' (cluster qrs) -> us-central-1, us-east-3, us-west-1, endpoint 'east' (cluster qrs) -> us-east-3], " + "but does not include all of these in deployment.xml. Deploying given deployment.xml " + "will remove [endpoint 'default' (cluster qrs) -> us-central-1, us-east-3, us-west-1] " + "and add [endpoint 'default' (cluster qrs) -> us-central-1, us-west-1]. " + ValidationOverrides.toAllowMessage(ValidationId.globalEndpointChange), e.getMessage()); } ApplicationPackage applicationPackage5 = new ApplicationPackageBuilder() .endpoint("east", "qrs", east.region().value()) .region(west.region().value()) .region(central.region().value()) .region(east.region().value()) .build(); try { context.submit(applicationPackage5); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("global-endpoint-change: application 'tenant1.app1' has endpoints " + "[endpoint 'default' (cluster qrs) -> us-central-1, us-east-3, us-west-1, endpoint 'east' (cluster qrs) -> us-east-3], " + "but does not include all of these in deployment.xml. Deploying given deployment.xml " + "will remove [endpoint 'default' (cluster qrs) -> us-central-1, us-east-3, us-west-1]. " + ValidationOverrides.toAllowMessage(ValidationId.globalEndpointChange), e.getMessage()); } ApplicationPackage applicationPackage6 = new ApplicationPackageBuilder() .endpoint("east", "qrs", east.region().value()) .region(west.region().value()) .region(central.region().value()) .region(east.region().value()) .allow(ValidationId.globalEndpointChange) .build(); context.submit(applicationPackage6); } @Test void testUnassignRotations() { var context = tester.newDeploymentContext(); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .endpoint("default", "qrs", "us-west-1", "us-central-1") .region("us-west-1") .region("us-central-1") .build(); context.submit(applicationPackage).deploy(); ApplicationPackage applicationPackage2 = new ApplicationPackageBuilder() .region("us-west-1") .region("us-central-1") .allow(ValidationId.globalEndpointChange) .build(); context.submit(applicationPackage2).deploy(); assertEquals(List.of(), context.instance().rotations()); assertEquals( Set.of(), tester.configServer().containerEndpoints().get(context.deploymentIdIn(ZoneId.from("prod", "us-west-1"))) ); } @Test void testDnsUpdatesWithChangeInRotationAssignment() { String dnsName1 = "app1.tenant1.global.vespa.oath.cloud"; { var context = tester.newDeploymentContext("tenant1", "app1", "default"); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .endpoint("default", "foo") .region("us-west-1") .region("us-central-1") .build(); context.submit(applicationPackage).deploy(); assertEquals(1, tester.controllerTester().nameService().records().size()); { Optional<Record> record = tester.controllerTester().findCname(dnsName1); assertTrue(record.isPresent()); assertEquals(dnsName1, record.get().name().asString()); assertEquals("rotation-fqdn-01.", record.get().data().asString()); } applicationPackage = new ApplicationPackageBuilder() .allow(ValidationId.deploymentRemoval) .allow(ValidationId.globalEndpointChange) .build(); context.submit(applicationPackage); tester.applications().deleteApplication(context.application().id(), tester.controllerTester().credentialsFor(context.application().id().tenant())); try (RotationLock lock = tester.controller().routing().rotations().lock()) { assertTrue(tester.controller().routing().rotations().availableRotations(lock) .containsKey(new RotationId("rotation-id-01")), "Rotation is unassigned"); } context.flushDnsUpdates(); Optional<Record> record = tester.controllerTester().findCname(dnsName1); assertTrue(record.isEmpty(), dnsName1 + " is removed"); } String dnsName2 = "app2.tenant2.global.vespa.oath.cloud"; { var context = tester.newDeploymentContext("tenant2", "app2", "default"); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .endpoint("default", "foo") .region("us-west-1") .region("us-central-1") .build(); context.submit(applicationPackage).deploy(); assertEquals(1, tester.controllerTester().nameService().records().size()); var record = tester.controllerTester().findCname(dnsName2); assertTrue(record.isPresent()); assertEquals(dnsName2, record.get().name().asString()); assertEquals("rotation-fqdn-01.", record.get().data().asString()); } { var context = tester.newDeploymentContext("tenant1", "app1", "default"); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .endpoint("default", "foo") .region("us-west-1") .region("us-central-1") .build(); context.submit(applicationPackage).deploy(); assertEquals("rotation-id-02", context.instance().rotations().get(0).rotationId().asString()); assertEquals(2, tester.controllerTester().nameService().records().size()); var record1 = tester.controllerTester().findCname(dnsName1); assertTrue(record1.isPresent()); assertEquals("rotation-fqdn-02.", record1.get().data().asString()); var record2 = tester.controllerTester().findCname(dnsName2); assertTrue(record2.isPresent()); assertEquals("rotation-fqdn-01.", record2.get().data().asString()); } } @Test void testDnsUpdatesForApplicationEndpoint() { ApplicationId beta = ApplicationId.from("tenant1", "app1", "beta"); ApplicationId main = ApplicationId.from("tenant1", "app1", "main"); var context = tester.newDeploymentContext(beta); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .instances("beta,main") .region("us-west-1") .region("us-east-3") .applicationEndpoint("a", "default", "us-west-1", Map.of(beta.instance(), 2, main.instance(), 8)) .applicationEndpoint("b", "default", "us-west-1", Map.of(beta.instance(), 1, main.instance(), 1)) .applicationEndpoint("c", "default", "us-east-3", Map.of(beta.instance(), 4, main.instance(), 6)) .build(); context.submit(applicationPackage).deploy(); ZoneId usWest = ZoneId.from("prod", "us-west-1"); ZoneId usEast = ZoneId.from("prod", "us-east-3"); Map<DeploymentId, Map<String, Integer>> deploymentEndpoints = Map.of( new DeploymentId(beta, usWest), Map.of("a.app1.tenant1.us-west-1-r.vespa.oath.cloud", 2, "b.app1.tenant1.us-west-1-r.vespa.oath.cloud", 1), new DeploymentId(main, usWest), Map.of("a.app1.tenant1.us-west-1-r.vespa.oath.cloud", 8, "b.app1.tenant1.us-west-1-r.vespa.oath.cloud", 1), new DeploymentId(beta, usEast), Map.of("c.app1.tenant1.us-east-3-r.vespa.oath.cloud", 4), new DeploymentId(main, usEast), Map.of("c.app1.tenant1.us-east-3-r.vespa.oath.cloud", 6) ); deploymentEndpoints.forEach((deployment, endpoints) -> { Set<ContainerEndpoint> expected = endpoints.entrySet().stream() .map(kv -> new ContainerEndpoint("default", "application", List.of(kv.getKey()), OptionalInt.of(kv.getValue()), RoutingMethod.sharedLayer4)) .collect(Collectors.toSet()); assertEquals(expected, tester.configServer().containerEndpoints().get(deployment), "Endpoint names for " + deployment + " are passed to config server"); }); context.flushDnsUpdates(); Set<Record> records = tester.controllerTester().nameService().records(); assertEquals(Set.of(new Record(Record.Type.CNAME, RecordName.from("a.app1.tenant1.us-west-1-r.vespa.oath.cloud"), RecordData.from("vip.prod.us-west-1.")), new Record(Record.Type.CNAME, RecordName.from("b.app1.tenant1.us-west-1-r.vespa.oath.cloud"), RecordData.from("vip.prod.us-west-1.")), new Record(Record.Type.CNAME, RecordName.from("c.app1.tenant1.us-east-3-r.vespa.oath.cloud"), RecordData.from("vip.prod.us-east-3."))), records); List<String> endpointDnsNames = tester.controller().routing().declaredEndpointsOf(context.application()) .scope(Endpoint.Scope.application) .mapToList(Endpoint::dnsName); assertEquals(List.of("a.app1.tenant1.us-west-1-r.vespa.oath.cloud", "b.app1.tenant1.us-west-1-r.vespa.oath.cloud", "c.app1.tenant1.us-east-3-r.vespa.oath.cloud"), endpointDnsNames); } @Test @Test void testDevDeploymentWithIncompatibleVersions() { Version version1 = new Version("7"); Version version2 = new Version("7.5"); Version version3 = new Version("8"); var context = tester.newDeploymentContext(); tester.controllerTester().flagSource().withListFlag(PermanentFlags.INCOMPATIBLE_VERSIONS.id(), List.of("8"), String.class); tester.controllerTester().upgradeSystem(version2); ZoneId zone = ZoneId.from("dev", "us-east-1"); context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version1).build()); assertEquals(version2, context.deployment(zone).version()); assertEquals(Optional.of(version1), context.application().revisions().get(context.deployment(zone).revision()).compileVersion()); try { context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version1).majorVersion(8).build()); fail("Should fail when specifying a major that does not yet exist"); } catch (IllegalArgumentException e) { assertEquals("no platforms were found for major version 8 specified in deployment.xml", e.getMessage()); } try { context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version3).build()); fail("Should fail when compiled against a version which is only compatible with not-yet-existent versions"); } catch (IllegalArgumentException e) { assertEquals("no platforms are compatible with compile version 8", e.getMessage()); } tester.controllerTester().upgradeSystem(version3); try { context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version1).majorVersion(8).build()); fail("Should fail when specifying a major which is incompatible with compile version"); } catch (IllegalArgumentException e) { assertEquals("no platforms on major version 8 specified in deployment.xml are compatible with compile version 7", e.getMessage()); } context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version3).majorVersion(8).build()); assertEquals(version3, context.deployment(zone).version()); assertEquals(Optional.of(version3), context.application().revisions().get(context.deployment(zone).revision()).compileVersion()); context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version3).build()); assertEquals(version3, context.deployment(zone).version()); assertEquals(Optional.of(version3), context.application().revisions().get(context.deployment(zone).revision()).compileVersion()); } @Test void testSuspension() { var context = tester.newDeploymentContext(); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .region("us-west-1") .region("us-east-3") .build(); context.submit(applicationPackage).deploy(); DeploymentId deployment1 = context.deploymentIdIn(ZoneId.from(Environment.prod, RegionName.from("us-west-1"))); DeploymentId deployment2 = context.deploymentIdIn(ZoneId.from(Environment.prod, RegionName.from("us-east-3"))); assertFalse(tester.configServer().isSuspended(deployment1)); assertFalse(tester.configServer().isSuspended(deployment2)); tester.configServer().setSuspension(deployment1, true); assertTrue(tester.configServer().isSuspended(deployment1)); assertFalse(tester.configServer().isSuspended(deployment2)); } @Test void testDeletingApplicationThatHasAlreadyBeenDeleted() { var context = tester.newDeploymentContext(); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .region("us-west-1") .build(); ZoneId zone = ZoneId.from(Environment.prod, RegionName.from("us-west-1")); context.submit(applicationPackage).runJob(zone, applicationPackage); tester.controller().applications().deactivate(context.instanceId(), zone); tester.controller().applications().deactivate(context.instanceId(), zone); } @Test void testDeployApplicationWithWarnings() { var context = tester.newDeploymentContext(); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .region("us-west-1") .build(); ZoneId zone = ZoneId.from("prod", "us-west-1"); int warnings = 3; tester.configServer().generateWarnings(context.deploymentIdIn(zone), warnings); context.submit(applicationPackage).deploy(); assertEquals(warnings, context.deployment(zone) .metrics().warnings().get(DeploymentMetrics.Warning.all).intValue()); } @Test void testDeploySelectivelyProvisionsCertificate() { Function<Instance, Optional<EndpointCertificateMetadata>> certificate = (application) -> tester.controller().curator().readEndpointCertificateMetadata(application.id()); var context1 = tester.newDeploymentContext("tenant1", "app1", "default"); var prodZone = ZoneId.from("prod", "us-west-1"); var stagingZone = ZoneId.from("staging", "us-east-3"); var testZone = ZoneId.from("test", "us-east-1"); tester.controllerTester().zoneRegistry().exclusiveRoutingIn(ZoneApiMock.from(prodZone)); var applicationPackage = new ApplicationPackageBuilder().athenzIdentity(AthenzDomain.from("domain"), AthenzService.from("service")) .region(prodZone.region()) .build(); context1.submit(applicationPackage).deploy(); var cert = certificate.apply(context1.instance()); assertTrue(cert.isPresent(), "Provisions certificate in " + Environment.prod); assertEquals(Stream.concat(Stream.of("vznqtz7a5ygwjkbhhj7ymxvlrekgt4l6g.vespa.oath.cloud", "app1.tenant1.global.vespa.oath.cloud", "*.app1.tenant1.global.vespa.oath.cloud"), Stream.of(prodZone, testZone, stagingZone) .flatMap(zone -> Stream.of("", "*.") .map(prefix -> prefix + "app1.tenant1." + zone.region().value() + (zone.environment() == Environment.prod ? "" : "." + zone.environment().value()) + ".vespa.oath.cloud"))) .collect(Collectors.toUnmodifiableSet()), Set.copyOf(tester.controllerTester().serviceRegistry().endpointCertificateMock().dnsNamesOf(context1.instanceId()))); context1.submit(applicationPackage).deploy(); assertEquals(cert, certificate.apply(context1.instance())); var context2 = tester.newDeploymentContext("tenant1", "app2", "default"); var devZone = ZoneId.from("dev", "us-east-1"); context2.runJob(devZone, applicationPackage); assertTrue(tester.configServer().application(context2.instanceId(), devZone).get().activated(), "Application deployed and activated"); assertTrue(certificate.apply(context2.instance()).isPresent(), "Provisions certificate also in zone with routing layer"); } @Test void testDeployWithGlobalEndpointsInMultipleClouds() { tester.controllerTester().zoneRegistry().setZones( ZoneApiMock.fromId("test.us-west-1"), ZoneApiMock.fromId("staging.us-west-1"), ZoneApiMock.fromId("prod.us-west-1"), ZoneApiMock.newBuilder().with(CloudName.AWS).withId("prod.aws-us-east-1").build() ); var context = tester.newDeploymentContext(); var applicationPackage = new ApplicationPackageBuilder() .region("aws-us-east-1") .region("us-west-1") .endpoint("default", "default") .build(); try { context.submit(applicationPackage); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("Endpoint 'default' in instance 'default' cannot contain regions in different clouds: [aws-us-east-1, us-west-1]", e.getMessage()); } var applicationPackage2 = new ApplicationPackageBuilder() .region("aws-us-east-1") .region("us-west-1") .endpoint("aws", "default", "aws-us-east-1") .endpoint("foo", "default", "aws-us-east-1", "us-west-1") .build(); try { context.submit(applicationPackage2); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("Endpoint 'foo' in instance 'default' cannot contain regions in different clouds: [aws-us-east-1, us-west-1]", e.getMessage()); } } @Test void testDeployWithGlobalEndpointsInGcp() { tester.controllerTester().zoneRegistry().setZones( ZoneApiMock.fromId("test.us-west-1"), ZoneApiMock.fromId("staging.us-west-1"), ZoneApiMock.newBuilder().with(CloudName.GCP).withId("prod.gcp-us-east1-b").build() ); var context = tester.newDeploymentContext(); var applicationPackage = new ApplicationPackageBuilder() .region("gcp-us-east1-b") .endpoint("default", "default") .build(); try { context.submit(applicationPackage); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("Endpoint 'default' in instance 'default' contains a Google Cloud region (gcp-us-east1-b), which is not yet supported", e.getMessage()); } var applicationPackage2 = new ApplicationPackageBuilder() .region("gcp-us-east1-b") .endpoint("gcp", "default", "gcp-us-east1-b") .build(); try { context.submit(applicationPackage2); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("Endpoint 'gcp' in instance 'default' contains a Google Cloud region (gcp-us-east1-b), which is not yet supported", e.getMessage()); } } @Test void testDeployWithoutSourceRevision() { var context = tester.newDeploymentContext(); var applicationPackage = new ApplicationPackageBuilder() .upgradePolicy("default") .region("us-west-1") .build(); context.submit(applicationPackage, Optional.empty()) .deploy(); assertEquals(1, context.instance().deployments().size(), "Deployed application"); } @Test void testDeployWithGlobalEndpointsAndMultipleRoutingMethods() { var context = tester.newDeploymentContext(); var zone1 = ZoneId.from("prod", "us-west-1"); var zone2 = ZoneId.from("prod", "us-east-3"); var applicationPackage = new ApplicationPackageBuilder() .athenzIdentity(AthenzDomain.from("domain"), AthenzService.from("service")) .endpoint("default", "default", zone1.region().value(), zone2.region().value()) .endpoint("east", "default", zone2.region().value()) .region(zone1.region()) .region(zone2.region()) .build(); tester.controllerTester().zoneRegistry().setRoutingMethod(ZoneApiMock.from(zone1), RoutingMethod.sharedLayer4); tester.controllerTester().zoneRegistry().setRoutingMethod(ZoneApiMock.from(zone2), RoutingMethod.exclusive); context.submit(applicationPackage).deploy(); var expectedRecords = List.of( new Record(Record.Type.ALIAS, RecordName.from("application.tenant.us-east-3-w.vespa.oath.cloud"), new WeightedAliasTarget(HostName.of("lb-0--tenant.application.default--prod.us-east-3"), "dns-zone-1", ZoneId.from("prod.us-east-3"), 1).pack()), new Record(Record.Type.ALIAS, RecordName.from("east.application.tenant.global.vespa.oath.cloud"), new LatencyAliasTarget(HostName.of("application.tenant.us-east-3-w.vespa.oath.cloud"), "dns-zone-1", ZoneId.from("prod.us-east-3")).pack()), new Record(Record.Type.CNAME, RecordName.from("application.tenant.us-east-3.vespa.oath.cloud"), RecordData.from("lb-0--tenant.application.default--prod.us-east-3."))); assertEquals(expectedRecords, List.copyOf(tester.controllerTester().nameService().records())); } @Test void testDeploymentDirectRouting() { DeploymentTester tester = new DeploymentTester(new ControllerTester(new RotationsConfig.Builder().build(), main)); var context = tester.newDeploymentContext(); var zone1 = ZoneId.from("prod", "us-west-1"); var zone2 = ZoneId.from("prod", "us-east-3"); var zone3 = ZoneId.from("prod", "eu-west-1"); tester.controllerTester().zoneRegistry() .exclusiveRoutingIn(ZoneApiMock.from(zone1), ZoneApiMock.from(zone2), ZoneApiMock.from(zone3)); var applicationPackageBuilder = new ApplicationPackageBuilder() .region(zone1.region()) .region(zone2.region()) .region(zone3.region()) .endpoint("default", "default") .endpoint("foo", "qrs") .endpoint("us", "default", zone1.region().value(), zone2.region().value()) .athenzIdentity(AthenzDomain.from("domain"), AthenzService.from("service")); context.submit(applicationPackageBuilder.build()).deploy(); for (var zone : List.of(zone1, zone2)) { assertEquals(Set.of("application.tenant.global.vespa.oath.cloud", "foo.application.tenant.global.vespa.oath.cloud", "us.application.tenant.global.vespa.oath.cloud"), tester.configServer().containerEndpointNames(context.deploymentIdIn(zone)), "Expected container endpoints in " + zone); } assertEquals(Set.of("application.tenant.global.vespa.oath.cloud", "foo.application.tenant.global.vespa.oath.cloud"), tester.configServer().containerEndpointNames(context.deploymentIdIn(zone3)), "Expected container endpoints in " + zone3); } @Test void testChangeEndpointCluster() { var context = tester.newDeploymentContext(); var west = ZoneId.from("prod", "us-west-1"); var east = ZoneId.from("prod", "us-east-3"); var applicationPackage = new ApplicationPackageBuilder() .endpoint("default", "foo") .region(west.region().value()) .region(east.region().value()) .build(); context.submit(applicationPackage).deploy(); assertEquals(ClusterSpec.Id.from("foo"), tester.applications().requireInstance(context.instanceId()) .rotations().get(0).clusterId()); applicationPackage = new ApplicationPackageBuilder() .endpoint("default", "bar") .region(west.region().value()) .region(east.region().value()) .build(); try { context.submit(applicationPackage).deploy(); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("global-endpoint-change: application 'tenant.application' has endpoints [endpoint " + "'default' (cluster foo) -> us-east-3, us-west-1], but does not include all of these in " + "deployment.xml. Deploying given deployment.xml will remove " + "[endpoint 'default' (cluster foo) -> us-east-3, us-west-1] and add " + "[endpoint 'default' (cluster bar) -> us-east-3, us-west-1]. To allow this add " + "<allow until='yyyy-mm-dd'>global-endpoint-change</allow> to validation-overrides.xml, see " + "https: } applicationPackage = new ApplicationPackageBuilder() .endpoint("default", "bar") .region(west.region().value()) .region(east.region().value()) .allow(ValidationId.globalEndpointChange) .build(); context.submit(applicationPackage).deploy(); assertEquals(ClusterSpec.Id.from("bar"), tester.applications().requireInstance(context.instanceId()) .rotations().get(0).clusterId()); } @Test void testReadableApplications() { var db = new MockCuratorDb(tester.controller().system()); var tester = new DeploymentTester(new ControllerTester(db)); var app1 = tester.newDeploymentContext("t1", "a1", "default") .submit() .deploy(); var app2 = tester.newDeploymentContext("t2", "a2", "default") .submit() .deploy(); assertEquals(2, tester.applications().readable().size()); db.curator().set(Path.fromString("/controller/v1/applications/" + app2.application().id().serialized()), new byte[]{(byte) 0xDE, (byte) 0xAD}); assertEquals(1, tester.applications().readable().size()); try { tester.applications().asList(); fail("Expected exception"); } catch (Exception ignored) { } app1.submit().deploy(); } @Test void testClashingEndpointIdAndInstanceName() { String deploymentXml = "<deployment version='1.0' athenz-domain='domain' athenz-service='service'>\n" + " <instance id=\"default\">\n" + " <prod>\n" + " <region active=\"true\">us-west-1</region>\n" + " </prod>\n" + " <endpoints>\n" + " <endpoint id=\"dev\" container-id=\"qrs\"/>\n" + " </endpoints>\n" + " </instance>\n" + " <instance id=\"dev\">\n" + " <prod>\n" + " <region active=\"true\">us-west-1</region>\n" + " </prod>\n" + " <endpoints>\n" + " <endpoint id=\"default\" container-id=\"qrs\"/>\n" + " </endpoints>\n" + " </instance>\n" + "</deployment>\n"; ApplicationPackage applicationPackage = ApplicationPackageBuilder.fromDeploymentXml(deploymentXml); try { tester.newDeploymentContext().submit(applicationPackage); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("Endpoint with ID 'default' in instance 'dev' clashes with endpoint 'dev' in instance 'default'", e.getMessage()); } } @Test void testTestPackageWarnings() { String deploymentXml = "<deployment version='1.0'>\n" + " <prod>\n" + " <region>us-west-1</region>\n" + " </prod>\n" + "</deployment>\n"; ApplicationPackage applicationPackage = ApplicationPackageBuilder.fromDeploymentXml(deploymentXml); byte[] testPackage = ApplicationPackage.filesZip(Map.of("tests/staging-test/foo.json", new byte[0])); var app = tester.newDeploymentContext(); tester.jobs().submit(app.application().id(), Submission.basic(applicationPackage, testPackage), 1); assertEquals(List.of(new Notification(tester.clock().instant(), Type.testPackage, Level.warning, NotificationSource.from(app.application().id()), List.of("test package has staging tests, so it should also include staging setup", "see https: tester.controller().notificationsDb().listNotifications(NotificationSource.from(app.application().id()), true)); } @Test void testCompileVersion() { DeploymentContext context = tester.newDeploymentContext(); ApplicationPackage applicationPackage = new ApplicationPackageBuilder().region("us-west-1").build(); TenantAndApplicationId application = TenantAndApplicationId.from(context.instanceId()); Version version0 = Version.fromString("7.1"); tester.controllerTester().upgradeSystem(version0); tester.upgrader().overrideConfidence(version0, Confidence.normal); tester.controllerTester().computeVersionStatus(); assertEquals(version0, tester.applications().compileVersion(application, OptionalInt.of(7))); assertEquals(version0, tester.applications().compileVersion(application, OptionalInt.empty())); assertEquals("this system has no available versions on specified major: 8", assertThrows(IllegalArgumentException.class, () -> tester.applications().compileVersion(application, OptionalInt.of(8))) .getMessage()); context.submit(applicationPackage).deploy(); Version version1 = Version.fromString("7.2"); tester.controllerTester().upgradeSystem(version1); tester.upgrader().overrideConfidence(version1, Confidence.normal); tester.controllerTester().computeVersionStatus(); assertEquals(version0, tester.applications().compileVersion(application, OptionalInt.empty())); tester.upgrader().maintain(); context.deployPlatform(version1); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty())); DeploymentContext legacyApp = tester.newDeploymentContext("avoid", "gc", "default").submit().deploy(); TenantAndApplicationId newApp = TenantAndApplicationId.from("new", "app"); Version version2 = Version.fromString("8.0"); tester.controllerTester().upgradeSystem(version2); tester.upgrader().overrideConfidence(version2, Confidence.low); tester.controllerTester().computeVersionStatus(); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty())); assertEquals("this system has no available versions on specified major: 8", assertThrows(IllegalArgumentException.class, () -> tester.applications().compileVersion(application, OptionalInt.of(8))) .getMessage()); tester.upgrader().overrideConfidence(version2, Confidence.normal); tester.controllerTester().computeVersionStatus(); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty())); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(8))); assertEquals(version2, tester.applications().compileVersion(newApp, OptionalInt.empty())); tester.controllerTester().flagSource().withListFlag(PermanentFlags.INCOMPATIBLE_VERSIONS.id(), List.of("8"), String.class); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7))); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty())); assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.of(8))); assertEquals(version2, tester.applications().compileVersion(newApp, OptionalInt.empty())); tester.upgrader().overrideConfidence(version2, Confidence.low); tester.controllerTester().computeVersionStatus(); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7))); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty())); assertEquals("this system has no available versions on specified major: 8", assertThrows(IllegalArgumentException.class, () -> tester.applications().compileVersion(application, OptionalInt.of(8))) .getMessage()); assertEquals(version1, tester.applications().compileVersion(newApp, OptionalInt.empty())); assertEquals(version1, tester.applications().compileVersion(newApp, OptionalInt.empty())); tester.upgrader().overrideConfidence(version2, Confidence.normal); tester.controllerTester().computeVersionStatus(); context.submit(new ApplicationPackageBuilder().region("us-west-1").compileVersion(version2).build()).deploy(); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7))); assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.empty())); tester.upgrader().overrideConfidence(version2, Confidence.low); tester.controllerTester().computeVersionStatus(); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7))); assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.empty())); assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.of(8))); tester.upgrader().overrideConfidence(version2, Confidence.broken); tester.controllerTester().computeVersionStatus(); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7))); assertEquals("no suitable, released compile version exists", assertThrows(IllegalArgumentException.class, () -> tester.applications().compileVersion(application, OptionalInt.empty())) .getMessage()); assertEquals("no suitable, released compile version exists for specified major: 8", assertThrows(IllegalArgumentException.class, () -> tester.applications().compileVersion(application, OptionalInt.of(8))) .getMessage()); tester.controllerTester().flagSource().withListFlag(PermanentFlags.INCOMPATIBLE_VERSIONS.id(), List.of(), String.class); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7))); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty())); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(8))); tester.upgrader().overrideConfidence(version2, Confidence.low); tester.controllerTester().computeVersionStatus(); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7))); assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.empty())); assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.of(8))); } @Test void testCloudAccount() { DeploymentContext context = tester.newDeploymentContext(); ZoneId devZone = devUsEast1.zone(); ZoneId prodZone = productionUsWest1.zone(); String cloudAccount = "012345678912"; var applicationPackage = new ApplicationPackageBuilder() .cloudAccount(cloudAccount) .region(prodZone.region()) .build(); context.submit(applicationPackage).runJobExpectingFailure(systemTest, "Requested cloud account '012345678912' is not valid for tenant 'tenant'"); tester.controllerTester().flagSource().withListFlag(PermanentFlags.CLOUD_ACCOUNTS.id(), List.of(cloudAccount), String.class); context.runJobExpectingFailure(systemTest, "Zone test.us-east-1 is not configured in requested cloud account '012345678912'") .abortJob(stagingTest); tester.controllerTester().zoneRegistry().configureCloudAccount(new CloudAccount(cloudAccount), systemTest.zone(), stagingTest.zone(), prodZone); context.submit(applicationPackage).deploy(); tester.controllerTester().zoneRegistry().configureCloudAccount(new CloudAccount(cloudAccount), devZone); context.runJob(devZone, applicationPackage); for (var zoneId : List.of(systemTest.zone(), stagingTest.zone(), devZone, prodZone)) { assertEquals(cloudAccount, tester.controllerTester().configServer() .cloudAccount(context.deploymentIdIn(zoneId)) .get().value()); } } @Test void testSubmitWithElementDeprecatedOnPreviousMajor() { DeploymentContext context = tester.newDeploymentContext(); var applicationPackage = new ApplicationPackageBuilder() .compileVersion(Version.fromString("8.1")) .region("us-west-1") .globalServiceId("qrs") .build(); try { context.submit(applicationPackage).deploy(); fail("Expected exception"); } catch (IllegalArgumentException e) { assertTrue(e.getMessage().contains("Element 'prod' contains attribute 'global-service-id' deprecated since major version 7")); } } }
I think `default` can be removed since the switch is exhaustive.
return switch (confidence) { case aborted -> aborted; case broken -> broken; case low -> low; case legacy -> legacy; case normal -> normal; case high -> high; default -> throw new IllegalArgumentException("Unexpected confidence '" + confidence + "'"); };
default -> throw new IllegalArgumentException("Unexpected confidence '" + confidence + "'");
return switch (confidence) { case aborted -> aborted; case broken -> broken; case low -> low; case legacy -> legacy; case normal -> normal; case high -> high; }
class VersionStatusUpdater extends ControllerMaintainer { public VersionStatusUpdater(Controller controller, Duration interval) { super(controller, interval); } @Override protected double maintain() { try { VersionStatus newStatus = VersionStatus.compute(controller()); controller().updateVersionStatus(newStatus); newStatus.systemVersion().ifPresent(version -> { controller().serviceRegistry().systemMonitor().reportSystemVersion(version.versionNumber(), convert(version.confidence())); }); return 1.0; } catch (Exception e) { log.log(Level.WARNING, "Failed to compute version status: " + Exceptions.toMessageString(e) + ". Retrying in " + interval()); } return 0.0; } static SystemMonitor.Confidence convert(VespaVersion.Confidence confidence) { ; } }
class VersionStatusUpdater extends ControllerMaintainer { public VersionStatusUpdater(Controller controller, Duration interval) { super(controller, interval); } @Override protected double maintain() { try { VersionStatus newStatus = VersionStatus.compute(controller()); controller().updateVersionStatus(newStatus); newStatus.systemVersion().ifPresent(version -> { controller().serviceRegistry().systemMonitor().reportSystemVersion(version.versionNumber(), convert(version.confidence())); }); return 1.0; } catch (Exception e) { log.log(Level.WARNING, "Failed to compute version status: " + Exceptions.toMessageString(e) + ". Retrying in " + interval()); } return 0.0; } static SystemMonitor.Confidence convert(VespaVersion.Confidence confidence) { ; } }
Fixed, thanks.
return switch (confidence) { case aborted -> aborted; case broken -> broken; case low -> low; case legacy -> legacy; case normal -> normal; case high -> high; default -> throw new IllegalArgumentException("Unexpected confidence '" + confidence + "'"); };
default -> throw new IllegalArgumentException("Unexpected confidence '" + confidence + "'");
return switch (confidence) { case aborted -> aborted; case broken -> broken; case low -> low; case legacy -> legacy; case normal -> normal; case high -> high; }
class VersionStatusUpdater extends ControllerMaintainer { public VersionStatusUpdater(Controller controller, Duration interval) { super(controller, interval); } @Override protected double maintain() { try { VersionStatus newStatus = VersionStatus.compute(controller()); controller().updateVersionStatus(newStatus); newStatus.systemVersion().ifPresent(version -> { controller().serviceRegistry().systemMonitor().reportSystemVersion(version.versionNumber(), convert(version.confidence())); }); return 1.0; } catch (Exception e) { log.log(Level.WARNING, "Failed to compute version status: " + Exceptions.toMessageString(e) + ". Retrying in " + interval()); } return 0.0; } static SystemMonitor.Confidence convert(VespaVersion.Confidence confidence) { ; } }
class VersionStatusUpdater extends ControllerMaintainer { public VersionStatusUpdater(Controller controller, Duration interval) { super(controller, interval); } @Override protected double maintain() { try { VersionStatus newStatus = VersionStatus.compute(controller()); controller().updateVersionStatus(newStatus); newStatus.systemVersion().ifPresent(version -> { controller().serviceRegistry().systemMonitor().reportSystemVersion(version.versionNumber(), convert(version.confidence())); }); return 1.0; } catch (Exception e) { log.log(Level.WARNING, "Failed to compute version status: " + Exceptions.toMessageString(e) + ". Retrying in " + interval()); } return 0.0; } static SystemMonitor.Confidence convert(VespaVersion.Confidence confidence) { ; } }
Since this method is called `setFailedNodes` should it be `=` and not `+=`? Otherwise it might be preferable to call it `addFailedNodes` instead to be more clear about the semantics
public void setFailedNodes(int failedNodes) { this.failedNodes += failedNodes; }
this.failedNodes += failedNodes;
public void setFailedNodes(int failedNodes) { this.failedNodes += failedNodes; }
class CoverageAggregator { private final int askedNodes; private int answeredNodes = 0; private int answeredNodesParticipated = 0; private int failedNodes = 0; private long answeredDocs = 0; private long answeredActiveDocs = 0; private long answeredTargetActiveDocs = 0; private boolean timedOut = false; private boolean degradedByMatchPhase = false; CoverageAggregator(int askedNodes) { this.askedNodes = askedNodes; } CoverageAggregator(CoverageAggregator rhs) { askedNodes = rhs.askedNodes; answeredNodes = rhs.answeredNodes; answeredNodesParticipated = rhs.answeredNodesParticipated; failedNodes = rhs.failedNodes; answeredDocs = rhs.answeredDocs; answeredActiveDocs = rhs.answeredActiveDocs; answeredTargetActiveDocs = rhs.answeredTargetActiveDocs; timedOut = rhs.timedOut; degradedByMatchPhase = rhs.degradedByMatchPhase; } void add(Coverage source) { answeredDocs += source.getDocs(); answeredActiveDocs += source.getActive(); answeredTargetActiveDocs += source.getTargetActive(); answeredNodesParticipated += source.getNodes(); answeredNodes++; degradedByMatchPhase |= source.isDegradedByMatchPhase(); timedOut |= source.isDegradedByTimeout(); } public int getAskedNodes() { return askedNodes; } public int getAnswerdNodes() { return answeredNodes; } public boolean hasNoAnswers() { return answeredNodes == 0; } public void setTimedOut() { timedOut = true; } public Coverage createCoverage(TimeoutHandler timeoutHandler) { Coverage coverage = new Coverage(answeredDocs, answeredActiveDocs, answeredNodesParticipated, 1); coverage.setNodesTried(askedNodes); coverage.setTargetActive(answeredTargetActiveDocs); int degradedReason = 0; if (timedOut) { degradedReason |= timeoutHandler.reason(); } if (degradedByMatchPhase) { degradedReason |= DEGRADED_BY_MATCH_PHASE; } coverage.setDegradedReason(degradedReason); return coverage; } public CoverageAggregator adjustDegradedCoverage(int searchableCopies, TimeoutHandler timeoutHandler) { int askedAndFailed = askedNodes + failedNodes; if (askedAndFailed == answeredNodesParticipated) { return this; } int notAnswered = askedAndFailed - answeredNodesParticipated; if ((timeoutHandler.reason() == DEGRADED_BY_ADAPTIVE_TIMEOUT) && answeredNodesParticipated > 0) { CoverageAggregator clone = new CoverageAggregator(this); clone.answeredActiveDocs += (notAnswered * answeredActiveDocs / answeredNodesParticipated); clone.answeredTargetActiveDocs += (notAnswered * answeredTargetActiveDocs / answeredNodesParticipated); return clone; } else { if (askedAndFailed > answeredNodesParticipated) { int missingNodes = notAnswered - (searchableCopies - 1); if (answeredNodesParticipated > 0) { CoverageAggregator clone = new CoverageAggregator(this); clone.answeredActiveDocs += (missingNodes * answeredActiveDocs / answeredNodesParticipated); clone.answeredTargetActiveDocs += (missingNodes * answeredTargetActiveDocs / answeredNodesParticipated); clone.timedOut = true; return clone; } } } return this; } }
class CoverageAggregator { private final int askedNodes; private int answeredNodes = 0; private int answeredNodesParticipated = 0; private int failedNodes = 0; private long answeredDocs = 0; private long answeredActiveDocs = 0; private long answeredTargetActiveDocs = 0; private boolean timedOut = false; private boolean degradedByMatchPhase = false; CoverageAggregator(int askedNodes) { this.askedNodes = askedNodes; } CoverageAggregator(CoverageAggregator rhs) { askedNodes = rhs.askedNodes; answeredNodes = rhs.answeredNodes; answeredNodesParticipated = rhs.answeredNodesParticipated; failedNodes = rhs.failedNodes; answeredDocs = rhs.answeredDocs; answeredActiveDocs = rhs.answeredActiveDocs; answeredTargetActiveDocs = rhs.answeredTargetActiveDocs; timedOut = rhs.timedOut; degradedByMatchPhase = rhs.degradedByMatchPhase; } void add(Coverage source) { answeredDocs += source.getDocs(); answeredActiveDocs += source.getActive(); answeredTargetActiveDocs += source.getTargetActive(); answeredNodesParticipated += source.getNodes(); answeredNodes++; degradedByMatchPhase |= source.isDegradedByMatchPhase(); timedOut |= source.isDegradedByTimeout(); } public int getAskedNodes() { return askedNodes; } public int getAnswerdNodes() { return answeredNodes; } public boolean hasNoAnswers() { return answeredNodes == 0; } public void setTimedOut() { timedOut = true; } public Coverage createCoverage(TimeoutHandler timeoutHandler) { Coverage coverage = new Coverage(answeredDocs, answeredActiveDocs, answeredNodesParticipated, 1); coverage.setNodesTried(askedNodes); coverage.setTargetActive(answeredTargetActiveDocs); int degradedReason = 0; if (timedOut) { degradedReason |= timeoutHandler.reason(); } if (degradedByMatchPhase) { degradedReason |= DEGRADED_BY_MATCH_PHASE; } coverage.setDegradedReason(degradedReason); return coverage; } public CoverageAggregator adjustDegradedCoverage(int searchableCopies, TimeoutHandler timeoutHandler) { int askedAndFailed = askedNodes + failedNodes; if (askedAndFailed == answeredNodesParticipated) { return this; } int notAnswered = askedAndFailed - answeredNodesParticipated; if ((timeoutHandler.reason() == DEGRADED_BY_ADAPTIVE_TIMEOUT) && answeredNodesParticipated > 0) { CoverageAggregator clone = new CoverageAggregator(this); clone.answeredActiveDocs += (notAnswered * answeredActiveDocs / answeredNodesParticipated); clone.answeredTargetActiveDocs += (notAnswered * answeredTargetActiveDocs / answeredNodesParticipated); return clone; } else { if (askedAndFailed > answeredNodesParticipated) { int missingNodes = notAnswered - (searchableCopies - 1); if (answeredNodesParticipated > 0) { CoverageAggregator clone = new CoverageAggregator(this); clone.answeredActiveDocs += (missingNodes * answeredActiveDocs / answeredNodesParticipated); clone.answeredTargetActiveDocs += (missingNodes * answeredTargetActiveDocs / answeredNodesParticipated); clone.timedOut = true; return clone; } } } return this; } }
= it is
public void setFailedNodes(int failedNodes) { this.failedNodes += failedNodes; }
this.failedNodes += failedNodes;
public void setFailedNodes(int failedNodes) { this.failedNodes += failedNodes; }
class CoverageAggregator { private final int askedNodes; private int answeredNodes = 0; private int answeredNodesParticipated = 0; private int failedNodes = 0; private long answeredDocs = 0; private long answeredActiveDocs = 0; private long answeredTargetActiveDocs = 0; private boolean timedOut = false; private boolean degradedByMatchPhase = false; CoverageAggregator(int askedNodes) { this.askedNodes = askedNodes; } CoverageAggregator(CoverageAggregator rhs) { askedNodes = rhs.askedNodes; answeredNodes = rhs.answeredNodes; answeredNodesParticipated = rhs.answeredNodesParticipated; failedNodes = rhs.failedNodes; answeredDocs = rhs.answeredDocs; answeredActiveDocs = rhs.answeredActiveDocs; answeredTargetActiveDocs = rhs.answeredTargetActiveDocs; timedOut = rhs.timedOut; degradedByMatchPhase = rhs.degradedByMatchPhase; } void add(Coverage source) { answeredDocs += source.getDocs(); answeredActiveDocs += source.getActive(); answeredTargetActiveDocs += source.getTargetActive(); answeredNodesParticipated += source.getNodes(); answeredNodes++; degradedByMatchPhase |= source.isDegradedByMatchPhase(); timedOut |= source.isDegradedByTimeout(); } public int getAskedNodes() { return askedNodes; } public int getAnswerdNodes() { return answeredNodes; } public boolean hasNoAnswers() { return answeredNodes == 0; } public void setTimedOut() { timedOut = true; } public Coverage createCoverage(TimeoutHandler timeoutHandler) { Coverage coverage = new Coverage(answeredDocs, answeredActiveDocs, answeredNodesParticipated, 1); coverage.setNodesTried(askedNodes); coverage.setTargetActive(answeredTargetActiveDocs); int degradedReason = 0; if (timedOut) { degradedReason |= timeoutHandler.reason(); } if (degradedByMatchPhase) { degradedReason |= DEGRADED_BY_MATCH_PHASE; } coverage.setDegradedReason(degradedReason); return coverage; } public CoverageAggregator adjustDegradedCoverage(int searchableCopies, TimeoutHandler timeoutHandler) { int askedAndFailed = askedNodes + failedNodes; if (askedAndFailed == answeredNodesParticipated) { return this; } int notAnswered = askedAndFailed - answeredNodesParticipated; if ((timeoutHandler.reason() == DEGRADED_BY_ADAPTIVE_TIMEOUT) && answeredNodesParticipated > 0) { CoverageAggregator clone = new CoverageAggregator(this); clone.answeredActiveDocs += (notAnswered * answeredActiveDocs / answeredNodesParticipated); clone.answeredTargetActiveDocs += (notAnswered * answeredTargetActiveDocs / answeredNodesParticipated); return clone; } else { if (askedAndFailed > answeredNodesParticipated) { int missingNodes = notAnswered - (searchableCopies - 1); if (answeredNodesParticipated > 0) { CoverageAggregator clone = new CoverageAggregator(this); clone.answeredActiveDocs += (missingNodes * answeredActiveDocs / answeredNodesParticipated); clone.answeredTargetActiveDocs += (missingNodes * answeredTargetActiveDocs / answeredNodesParticipated); clone.timedOut = true; return clone; } } } return this; } }
class CoverageAggregator { private final int askedNodes; private int answeredNodes = 0; private int answeredNodesParticipated = 0; private int failedNodes = 0; private long answeredDocs = 0; private long answeredActiveDocs = 0; private long answeredTargetActiveDocs = 0; private boolean timedOut = false; private boolean degradedByMatchPhase = false; CoverageAggregator(int askedNodes) { this.askedNodes = askedNodes; } CoverageAggregator(CoverageAggregator rhs) { askedNodes = rhs.askedNodes; answeredNodes = rhs.answeredNodes; answeredNodesParticipated = rhs.answeredNodesParticipated; failedNodes = rhs.failedNodes; answeredDocs = rhs.answeredDocs; answeredActiveDocs = rhs.answeredActiveDocs; answeredTargetActiveDocs = rhs.answeredTargetActiveDocs; timedOut = rhs.timedOut; degradedByMatchPhase = rhs.degradedByMatchPhase; } void add(Coverage source) { answeredDocs += source.getDocs(); answeredActiveDocs += source.getActive(); answeredTargetActiveDocs += source.getTargetActive(); answeredNodesParticipated += source.getNodes(); answeredNodes++; degradedByMatchPhase |= source.isDegradedByMatchPhase(); timedOut |= source.isDegradedByTimeout(); } public int getAskedNodes() { return askedNodes; } public int getAnswerdNodes() { return answeredNodes; } public boolean hasNoAnswers() { return answeredNodes == 0; } public void setTimedOut() { timedOut = true; } public Coverage createCoverage(TimeoutHandler timeoutHandler) { Coverage coverage = new Coverage(answeredDocs, answeredActiveDocs, answeredNodesParticipated, 1); coverage.setNodesTried(askedNodes); coverage.setTargetActive(answeredTargetActiveDocs); int degradedReason = 0; if (timedOut) { degradedReason |= timeoutHandler.reason(); } if (degradedByMatchPhase) { degradedReason |= DEGRADED_BY_MATCH_PHASE; } coverage.setDegradedReason(degradedReason); return coverage; } public CoverageAggregator adjustDegradedCoverage(int searchableCopies, TimeoutHandler timeoutHandler) { int askedAndFailed = askedNodes + failedNodes; if (askedAndFailed == answeredNodesParticipated) { return this; } int notAnswered = askedAndFailed - answeredNodesParticipated; if ((timeoutHandler.reason() == DEGRADED_BY_ADAPTIVE_TIMEOUT) && answeredNodesParticipated > 0) { CoverageAggregator clone = new CoverageAggregator(this); clone.answeredActiveDocs += (notAnswered * answeredActiveDocs / answeredNodesParticipated); clone.answeredTargetActiveDocs += (notAnswered * answeredTargetActiveDocs / answeredNodesParticipated); return clone; } else { if (askedAndFailed > answeredNodesParticipated) { int missingNodes = notAnswered - (searchableCopies - 1); if (answeredNodesParticipated > 0) { CoverageAggregator clone = new CoverageAggregator(this); clone.answeredActiveDocs += (missingNodes * answeredActiveDocs / answeredNodesParticipated); clone.answeredTargetActiveDocs += (missingNodes * answeredTargetActiveDocs / answeredNodesParticipated); clone.timedOut = true; return clone; } } } return this; } }
```suggestion return "application instance '" + id.ToFullString() + "'"; ```
public String toString() { return "application instance '" + id + "'"; }
return "application instance '" + id + "'";
public String toString() { return "application instance '" + id.toFullString() + "'"; }
class Instance { private final ApplicationId id; private final Map<ZoneId, Deployment> deployments; private final List<AssignedRotation> rotations; private final RotationStatus rotationStatus; private final Map<JobType, Instant> jobPauses; private final Change change; /** Creates an empty instance */ public Instance(ApplicationId id) { this(id, Set.of(), Map.of(), List.of(), RotationStatus.EMPTY, Change.empty()); } /** Creates an empty instance*/ public Instance(ApplicationId id, Collection<Deployment> deployments, Map<JobType, Instant> jobPauses, List<AssignedRotation> rotations, RotationStatus rotationStatus, Change change) { this.id = Objects.requireNonNull(id, "id cannot be null"); this.deployments = Objects.requireNonNull(deployments, "deployments cannot be null").stream() .collect(Collectors.toUnmodifiableMap(Deployment::zone, Function.identity())); this.jobPauses = Map.copyOf(Objects.requireNonNull(jobPauses, "deploymentJobs cannot be null")); this.rotations = List.copyOf(Objects.requireNonNull(rotations, "rotations cannot be null")); this.rotationStatus = Objects.requireNonNull(rotationStatus, "rotationStatus cannot be null"); this.change = Objects.requireNonNull(change, "change cannot be null"); } public Instance withNewDeployment(ZoneId zone, RevisionId revision, Version version, Instant instant, Map<DeploymentMetrics.Warning, Integer> warnings, QuotaUsage quotaUsage) { Deployment previousDeployment = deployments.getOrDefault(zone, new Deployment(zone, revision, version, instant, DeploymentMetrics.none, DeploymentActivity.none, QuotaUsage.none, OptionalDouble.empty())); Deployment newDeployment = new Deployment(zone, revision, version, instant, previousDeployment.metrics().with(warnings), previousDeployment.activity(), quotaUsage, previousDeployment.cost()); return with(newDeployment); } public Instance withJobPause(JobType jobType, OptionalLong pausedUntil) { Map<JobType, Instant> jobPauses = new HashMap<>(this.jobPauses); if (pausedUntil.isPresent()) jobPauses.put(jobType, Instant.ofEpochMilli(pausedUntil.getAsLong())); else jobPauses.remove(jobType); return new Instance(id, deployments.values(), jobPauses, rotations, rotationStatus, change); } public Instance recordActivityAt(Instant instant, ZoneId zone) { Deployment deployment = deployments.get(zone); if (deployment == null) return this; return with(deployment.recordActivityAt(instant)); } public Instance with(ZoneId zone, DeploymentMetrics deploymentMetrics) { Deployment deployment = deployments.get(zone); if (deployment == null) return this; return with(deployment.withMetrics(deploymentMetrics)); } public Instance withDeploymentCosts(Map<ZoneId, Double> costByZone) { Map<ZoneId, Deployment> deployments = this.deployments.entrySet().stream() .map(entry -> Optional.ofNullable(costByZone.get(entry.getKey())) .map(entry.getValue()::withCost) .orElseGet(entry.getValue()::withoutCost)) .collect(Collectors.toUnmodifiableMap(Deployment::zone, deployment -> deployment)); return with(deployments); } public Instance withoutDeploymentIn(ZoneId zone) { Map<ZoneId, Deployment> deployments = new LinkedHashMap<>(this.deployments); deployments.remove(zone); return with(deployments); } public Instance with(List<AssignedRotation> assignedRotations) { return new Instance(id, deployments.values(), jobPauses, assignedRotations, rotationStatus, change); } public Instance with(RotationStatus rotationStatus) { return new Instance(id, deployments.values(), jobPauses, rotations, rotationStatus, change); } public Instance withChange(Change change) { return new Instance(id, deployments.values(), jobPauses, rotations, rotationStatus, change); } private Instance with(Deployment deployment) { Map<ZoneId, Deployment> deployments = new LinkedHashMap<>(this.deployments); deployments.put(deployment.zone(), deployment); return with(deployments); } private Instance with(Map<ZoneId, Deployment> deployments) { return new Instance(id, deployments.values(), jobPauses, rotations, rotationStatus, change); } public ApplicationId id() { return id; } public InstanceName name() { return id.instance(); } /** Returns an immutable map of the current deployments of this */ public Map<ZoneId, Deployment> deployments() { return deployments; } /** * Returns an immutable map of the current *production* deployments of this * (deployments also includes manually deployed environments) */ public Map<ZoneId, Deployment> productionDeployments() { return deployments.values().stream() .filter(deployment -> deployment.zone().environment() == Environment.prod) .collect(Collectors.toUnmodifiableMap(Deployment::zone, Function.identity())); } /** Returns the instant until which the given job is paused, or empty. */ public Optional<Instant> jobPause(JobType jobType) { return Optional.ofNullable(jobPauses.get(jobType)); } /** Returns the set of instants until which any paused jobs of this instance should remain paused, indexed by job type. */ public Map<JobType, Instant> jobPauses() { return jobPauses; } /** Returns all rotations assigned to this */ public List<AssignedRotation> rotations() { return rotations; } /** Returns the status of the global rotation(s) assigned to this */ public RotationStatus rotationStatus() { return rotationStatus; } /** Returns the currently deploying change for this instance. */ public Change change() { return change; } /** Returns the total quota usage for this instance, excluding temporary deployments **/ public QuotaUsage quotaUsage() { return deployments.values().stream() .filter(d -> !d.zone().environment().isTest()) .map(Deployment::quota).reduce(QuotaUsage::add).orElse(QuotaUsage.none); } /** Returns the total quota usage for manual deployments for this instance **/ public QuotaUsage manualQuotaUsage() { return deployments.values().stream() .filter(d -> d.zone().environment().isManuallyDeployed()) .map(Deployment::quota).reduce(QuotaUsage::add).orElse(QuotaUsage.none); } /** Returns the total quota usage for this instance, excluding one specific deployment (and temporary deployments) */ public QuotaUsage quotaUsageExcluding(ApplicationId application, ZoneId zone) { return deployments.values().stream() .filter(d -> !d.zone().environment().isTest()) .filter(d -> !(application.equals(id) && d.zone().equals(zone))) .map(Deployment::quota).reduce(QuotaUsage::add).orElse(QuotaUsage.none); } @Override public boolean equals(Object o) { if (this == o) return true; if ( ! (o instanceof Instance)) return false; Instance that = (Instance) o; return id.equals(that.id); } @Override public int hashCode() { return id.hashCode(); } @Override }
class Instance { private final ApplicationId id; private final Map<ZoneId, Deployment> deployments; private final List<AssignedRotation> rotations; private final RotationStatus rotationStatus; private final Map<JobType, Instant> jobPauses; private final Change change; /** Creates an empty instance */ public Instance(ApplicationId id) { this(id, Set.of(), Map.of(), List.of(), RotationStatus.EMPTY, Change.empty()); } /** Creates an empty instance*/ public Instance(ApplicationId id, Collection<Deployment> deployments, Map<JobType, Instant> jobPauses, List<AssignedRotation> rotations, RotationStatus rotationStatus, Change change) { this.id = Objects.requireNonNull(id, "id cannot be null"); this.deployments = Objects.requireNonNull(deployments, "deployments cannot be null").stream() .collect(Collectors.toUnmodifiableMap(Deployment::zone, Function.identity())); this.jobPauses = Map.copyOf(Objects.requireNonNull(jobPauses, "deploymentJobs cannot be null")); this.rotations = List.copyOf(Objects.requireNonNull(rotations, "rotations cannot be null")); this.rotationStatus = Objects.requireNonNull(rotationStatus, "rotationStatus cannot be null"); this.change = Objects.requireNonNull(change, "change cannot be null"); } public Instance withNewDeployment(ZoneId zone, RevisionId revision, Version version, Instant instant, Map<DeploymentMetrics.Warning, Integer> warnings, QuotaUsage quotaUsage) { Deployment previousDeployment = deployments.getOrDefault(zone, new Deployment(zone, revision, version, instant, DeploymentMetrics.none, DeploymentActivity.none, QuotaUsage.none, OptionalDouble.empty())); Deployment newDeployment = new Deployment(zone, revision, version, instant, previousDeployment.metrics().with(warnings), previousDeployment.activity(), quotaUsage, previousDeployment.cost()); return with(newDeployment); } public Instance withJobPause(JobType jobType, OptionalLong pausedUntil) { Map<JobType, Instant> jobPauses = new HashMap<>(this.jobPauses); if (pausedUntil.isPresent()) jobPauses.put(jobType, Instant.ofEpochMilli(pausedUntil.getAsLong())); else jobPauses.remove(jobType); return new Instance(id, deployments.values(), jobPauses, rotations, rotationStatus, change); } public Instance recordActivityAt(Instant instant, ZoneId zone) { Deployment deployment = deployments.get(zone); if (deployment == null) return this; return with(deployment.recordActivityAt(instant)); } public Instance with(ZoneId zone, DeploymentMetrics deploymentMetrics) { Deployment deployment = deployments.get(zone); if (deployment == null) return this; return with(deployment.withMetrics(deploymentMetrics)); } public Instance withDeploymentCosts(Map<ZoneId, Double> costByZone) { Map<ZoneId, Deployment> deployments = this.deployments.entrySet().stream() .map(entry -> Optional.ofNullable(costByZone.get(entry.getKey())) .map(entry.getValue()::withCost) .orElseGet(entry.getValue()::withoutCost)) .collect(Collectors.toUnmodifiableMap(Deployment::zone, deployment -> deployment)); return with(deployments); } public Instance withoutDeploymentIn(ZoneId zone) { Map<ZoneId, Deployment> deployments = new LinkedHashMap<>(this.deployments); deployments.remove(zone); return with(deployments); } public Instance with(List<AssignedRotation> assignedRotations) { return new Instance(id, deployments.values(), jobPauses, assignedRotations, rotationStatus, change); } public Instance with(RotationStatus rotationStatus) { return new Instance(id, deployments.values(), jobPauses, rotations, rotationStatus, change); } public Instance withChange(Change change) { return new Instance(id, deployments.values(), jobPauses, rotations, rotationStatus, change); } private Instance with(Deployment deployment) { Map<ZoneId, Deployment> deployments = new LinkedHashMap<>(this.deployments); deployments.put(deployment.zone(), deployment); return with(deployments); } private Instance with(Map<ZoneId, Deployment> deployments) { return new Instance(id, deployments.values(), jobPauses, rotations, rotationStatus, change); } public ApplicationId id() { return id; } public InstanceName name() { return id.instance(); } /** Returns an immutable map of the current deployments of this */ public Map<ZoneId, Deployment> deployments() { return deployments; } /** * Returns an immutable map of the current *production* deployments of this * (deployments also includes manually deployed environments) */ public Map<ZoneId, Deployment> productionDeployments() { return deployments.values().stream() .filter(deployment -> deployment.zone().environment() == Environment.prod) .collect(Collectors.toUnmodifiableMap(Deployment::zone, Function.identity())); } /** Returns the instant until which the given job is paused, or empty. */ public Optional<Instant> jobPause(JobType jobType) { return Optional.ofNullable(jobPauses.get(jobType)); } /** Returns the set of instants until which any paused jobs of this instance should remain paused, indexed by job type. */ public Map<JobType, Instant> jobPauses() { return jobPauses; } /** Returns all rotations assigned to this */ public List<AssignedRotation> rotations() { return rotations; } /** Returns the status of the global rotation(s) assigned to this */ public RotationStatus rotationStatus() { return rotationStatus; } /** Returns the currently deploying change for this instance. */ public Change change() { return change; } /** Returns the total quota usage for this instance, excluding temporary deployments **/ public QuotaUsage quotaUsage() { return deployments.values().stream() .filter(d -> !d.zone().environment().isTest()) .map(Deployment::quota).reduce(QuotaUsage::add).orElse(QuotaUsage.none); } /** Returns the total quota usage for manual deployments for this instance **/ public QuotaUsage manualQuotaUsage() { return deployments.values().stream() .filter(d -> d.zone().environment().isManuallyDeployed()) .map(Deployment::quota).reduce(QuotaUsage::add).orElse(QuotaUsage.none); } /** Returns the total quota usage for this instance, excluding one specific deployment (and temporary deployments) */ public QuotaUsage quotaUsageExcluding(ApplicationId application, ZoneId zone) { return deployments.values().stream() .filter(d -> !d.zone().environment().isTest()) .filter(d -> !(application.equals(id) && d.zone().equals(zone))) .map(Deployment::quota).reduce(QuotaUsage::add).orElse(QuotaUsage.none); } @Override public boolean equals(Object o) { if (this == o) return true; if ( ! (o instanceof Instance)) return false; Instance that = (Instance) o; return id.equals(that.id); } @Override public int hashCode() { return id.hashCode(); } @Override }
```suggestion assertEquals("deployment-removal: application instance 'tenant.application.default' is deployed in us-west-1, " + ```
void testDeployment() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .explicitEnvironment(Environment.dev, Environment.perf) .region("us-west-1") .region("us-east-3") .build(); Version version1 = tester.configServer().initialVersion(); var context = tester.newDeploymentContext(); context.submit(applicationPackage); assertEquals(ApplicationVersion.from(RevisionId.forProduction(1), DeploymentContext.defaultSourceRevision, "a@b", new Version("6.1"), Instant.ofEpochSecond(1)), context.application().revisions().get(context.instance().change().revision().get()), "Application version is known from completion of initial job"); context.runJob(systemTest); context.runJob(stagingTest); RevisionId applicationVersion = context.instance().change().revision().get(); assertTrue(applicationVersion.isProduction(), "Application version has been set during deployment"); tester.triggerJobs(); tester.clock().advance(Duration.ofSeconds(1)); context.timeOutUpgrade(productionUsWest1); assertEquals(4, context.instanceJobs().size()); tester.triggerJobs(); tester.controllerTester().createNewController(); assertNotNull(tester.controller().tenants().get(TenantName.from("tenant1"))); assertNotNull(tester.controller().applications().requireInstance(context.instanceId())); context.submit(applicationPackage); context.runJob(systemTest); context.runJob(stagingTest); context.triggerJobs().jobAborted(productionUsWest1); context.runJob(productionUsWest1); tester.triggerJobs(); context.runJob(productionUsEast3); assertEquals(4, context.instanceJobs().size()); applicationPackage = new ApplicationPackageBuilder() .instances("hellO") .build(); try { context.submit(applicationPackage); fail("Expected exception due to illegal deployment spec."); } catch (IllegalArgumentException e) { assertEquals("Invalid id 'hellO'. Tenant, application and instance names must start with a letter, may contain no more than 20 characters, and may only contain lowercase letters, digits or dashes, but no double-dashes.", e.getMessage()); } applicationPackage = new ApplicationPackageBuilder() .region("deep-space-9") .build(); try { context.submit(applicationPackage); fail("Expected exception due to illegal deployment spec."); } catch (IllegalArgumentException e) { assertEquals("Zone prod.deep-space-9 in deployment spec was not found in this system!", e.getMessage()); } applicationPackage = new ApplicationPackageBuilder() .region("us-east-3") .build(); try { assertTrue(context.instance().deployments().containsKey(ZoneId.from("prod", "us-west-1"))); context.submit(applicationPackage); fail("Expected exception due to illegal production deployment removal"); } catch (IllegalArgumentException e) { assertEquals("deployment-removal: application instance 'tenant.application' is deployed in us-west-1, " + "but this instance and region combination is removed from deployment.xml. " + ValidationOverrides.toAllowMessage(ValidationId.deploymentRemoval), e.getMessage()); } assertNotNull(context.instance().deployments().get(productionUsWest1.zone()), "Zone was not removed"); applicationPackage = new ApplicationPackageBuilder() .allow(ValidationId.deploymentRemoval) .upgradePolicy("default") .region("us-east-3") .build(); context.submit(applicationPackage); assertNull(context.instance().deployments().get(productionUsWest1.zone()), "Zone was removed"); assertNull(context.instanceJobs().get(productionUsWest1), "Deployment job was removed"); assertNotNull(tester.controllerTester().serviceRegistry().applicationStore() .getMeta(context.instanceId()) .get(tester.clock().instant())); tester.clock().advance(Duration.ofSeconds(1)); context.submit(ApplicationPackage.deploymentRemoval()); tester.clock().advance(Duration.ofSeconds(1)); context.submit(ApplicationPackage.deploymentRemoval()); tester.applications().deleteApplication(context.application().id(), tester.controllerTester().credentialsFor(context.instanceId().tenant())); assertArrayEquals(new byte[0], tester.controllerTester().serviceRegistry().applicationStore() .getMeta(context.instanceId()) .get(tester.clock().instant())); assertNull(tester.controllerTester().serviceRegistry().applicationStore() .getMeta(context.deploymentIdIn(productionUsWest1.zone()))); }
assertEquals("deployment-removal: application instance 'tenant.application' is deployed in us-west-1, " +
void testDeployment() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .explicitEnvironment(Environment.dev, Environment.perf) .region("us-west-1") .region("us-east-3") .build(); Version version1 = tester.configServer().initialVersion(); var context = tester.newDeploymentContext(); context.submit(applicationPackage); assertEquals(ApplicationVersion.from(RevisionId.forProduction(1), DeploymentContext.defaultSourceRevision, "a@b", new Version("6.1"), Instant.ofEpochSecond(1)), context.application().revisions().get(context.instance().change().revision().get()), "Application version is known from completion of initial job"); context.runJob(systemTest); context.runJob(stagingTest); RevisionId applicationVersion = context.instance().change().revision().get(); assertTrue(applicationVersion.isProduction(), "Application version has been set during deployment"); tester.triggerJobs(); tester.clock().advance(Duration.ofSeconds(1)); context.timeOutUpgrade(productionUsWest1); assertEquals(4, context.instanceJobs().size()); tester.triggerJobs(); tester.controllerTester().createNewController(); assertNotNull(tester.controller().tenants().get(TenantName.from("tenant1"))); assertNotNull(tester.controller().applications().requireInstance(context.instanceId())); context.submit(applicationPackage); context.runJob(systemTest); context.runJob(stagingTest); context.triggerJobs().jobAborted(productionUsWest1); context.runJob(productionUsWest1); tester.triggerJobs(); context.runJob(productionUsEast3); assertEquals(4, context.instanceJobs().size()); applicationPackage = new ApplicationPackageBuilder() .instances("hellO") .build(); try { context.submit(applicationPackage); fail("Expected exception due to illegal deployment spec."); } catch (IllegalArgumentException e) { assertEquals("Invalid id 'hellO'. Tenant, application and instance names must start with a letter, may contain no more than 20 characters, and may only contain lowercase letters, digits or dashes, but no double-dashes.", e.getMessage()); } applicationPackage = new ApplicationPackageBuilder() .region("deep-space-9") .build(); try { context.submit(applicationPackage); fail("Expected exception due to illegal deployment spec."); } catch (IllegalArgumentException e) { assertEquals("Zone prod.deep-space-9 in deployment spec was not found in this system!", e.getMessage()); } applicationPackage = new ApplicationPackageBuilder() .region("us-east-3") .build(); try { assertTrue(context.instance().deployments().containsKey(ZoneId.from("prod", "us-west-1"))); context.submit(applicationPackage); fail("Expected exception due to illegal production deployment removal"); } catch (IllegalArgumentException e) { assertEquals("deployment-removal: application instance 'tenant.application.default' is deployed in us-west-1, " + "but this instance and region combination is removed from deployment.xml. " + ValidationOverrides.toAllowMessage(ValidationId.deploymentRemoval), e.getMessage()); } assertNotNull(context.instance().deployments().get(productionUsWest1.zone()), "Zone was not removed"); applicationPackage = new ApplicationPackageBuilder() .allow(ValidationId.deploymentRemoval) .upgradePolicy("default") .region("us-east-3") .build(); context.submit(applicationPackage); assertNull(context.instance().deployments().get(productionUsWest1.zone()), "Zone was removed"); assertNull(context.instanceJobs().get(productionUsWest1), "Deployment job was removed"); assertNotNull(tester.controllerTester().serviceRegistry().applicationStore() .getMeta(context.instanceId()) .get(tester.clock().instant())); tester.clock().advance(Duration.ofSeconds(1)); context.submit(ApplicationPackage.deploymentRemoval()); tester.clock().advance(Duration.ofSeconds(1)); context.submit(ApplicationPackage.deploymentRemoval()); tester.applications().deleteApplication(context.application().id(), tester.controllerTester().credentialsFor(context.instanceId().tenant())); assertArrayEquals(new byte[0], tester.controllerTester().serviceRegistry().applicationStore() .getMeta(context.instanceId()) .get(tester.clock().instant())); assertNull(tester.controllerTester().serviceRegistry().applicationStore() .getMeta(context.deploymentIdIn(productionUsWest1.zone()))); }
class ControllerTest { private final DeploymentTester tester = new DeploymentTester(); @Test @Test void testGlobalRotationStatus() { var context = tester.newDeploymentContext(); var zone1 = ZoneId.from("prod", "us-west-1"); var zone2 = ZoneId.from("prod", "us-east-3"); var applicationPackage = new ApplicationPackageBuilder() .region(zone1.region()) .region(zone2.region()) .endpoint("default", "default", zone1.region().value(), zone2.region().value()) .build(); context.submit(applicationPackage).deploy(); var deployment1 = context.deploymentIdIn(zone1); DeploymentRoutingContext routingContext = tester.controller().routing().of(deployment1); RoutingStatus status1 = routingContext.routingStatus(); assertEquals(RoutingStatus.Value.in, status1.value()); routingContext.setRoutingStatus(RoutingStatus.Value.out, RoutingStatus.Agent.operator); RoutingStatus status2 = routingContext.routingStatus(); assertEquals(RoutingStatus.Value.out, status2.value()); RoutingStatus status3 = tester.controller().routing().of(context.deploymentIdIn(zone2)).routingStatus(); assertEquals(RoutingStatus.Value.in, status3.value()); } @Test void testDnsUpdatesForGlobalEndpoint() { var betaContext = tester.newDeploymentContext("tenant1", "app1", "beta"); var defaultContext = tester.newDeploymentContext("tenant1", "app1", "default"); ZoneId usWest = ZoneId.from("prod.us-west-1"); ZoneId usCentral = ZoneId.from("prod.us-central-1"); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .instances("beta,default") .endpoint("default", "foo") .region(usWest.region()) .region(usCentral.region()) .build(); tester.controllerTester().zoneRegistry().setRoutingMethod(List.of(ZoneApiMock.from(usWest), ZoneApiMock.from(usCentral)), RoutingMethod.sharedLayer4); betaContext.submit(applicationPackage).deploy(); { Collection<Deployment> betaDeployments = betaContext.instance().deployments().values(); assertFalse(betaDeployments.isEmpty()); Set<ContainerEndpoint> containerEndpoints = Set.of(new ContainerEndpoint("foo", "global", List.of("beta.app1.tenant1.global.vespa.oath.cloud", "rotation-id-01"), OptionalInt.empty(), RoutingMethod.sharedLayer4)); for (Deployment deployment : betaDeployments) { assertEquals(containerEndpoints, tester.configServer().containerEndpoints() .get(betaContext.deploymentIdIn(deployment.zone()))); } betaContext.flushDnsUpdates(); } { Collection<Deployment> defaultDeployments = defaultContext.instance().deployments().values(); assertFalse(defaultDeployments.isEmpty()); Set<ContainerEndpoint> containerEndpoints = Set.of(new ContainerEndpoint("foo", "global", List.of("app1.tenant1.global.vespa.oath.cloud", "rotation-id-02"), OptionalInt.empty(), RoutingMethod.sharedLayer4)); for (Deployment deployment : defaultDeployments) { assertEquals(containerEndpoints, tester.configServer().containerEndpoints().get(defaultContext.deploymentIdIn(deployment.zone()))); } defaultContext.flushDnsUpdates(); } Map<String, String> rotationCnames = Map.of("beta.app1.tenant1.global.vespa.oath.cloud", "rotation-fqdn-01.", "app1.tenant1.global.vespa.oath.cloud", "rotation-fqdn-02."); rotationCnames.forEach((cname, data) -> { var record = tester.controllerTester().findCname(cname); assertTrue(record.isPresent()); assertEquals(cname, record.get().name().asString()); assertEquals(data, record.get().data().asString()); }); Map<ApplicationId, Set<String>> globalDnsNamesByInstance = Map.of(betaContext.instanceId(), Set.of("beta.app1.tenant1.global.vespa.oath.cloud"), defaultContext.instanceId(), Set.of("app1.tenant1.global.vespa.oath.cloud")); globalDnsNamesByInstance.forEach((instance, dnsNames) -> { Set<String> actualDnsNames = tester.controller().routing().readDeclaredEndpointsOf(instance) .scope(Endpoint.Scope.global) .asList().stream() .map(Endpoint::dnsName) .collect(Collectors.toSet()); assertEquals(dnsNames, actualDnsNames, "Global DNS names for " + instance); }); } @Test void testDnsUpdatesForGlobalEndpointLegacySyntax() { var context = tester.newDeploymentContext("tenant1", "app1", "default"); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .globalServiceId("foo") .region("us-west-1") .region("us-central-1") .build(); context.submit(applicationPackage).deploy(); Collection<Deployment> deployments = context.instance().deployments().values(); assertFalse(deployments.isEmpty()); for (Deployment deployment : deployments) { assertEquals(Set.of("rotation-id-01", "app1.tenant1.global.vespa.oath.cloud"), tester.configServer().containerEndpointNames(context.deploymentIdIn(deployment.zone())), "Rotation names are passed to config server in " + deployment.zone()); } context.flushDnsUpdates(); assertEquals(1, tester.controllerTester().nameService().records().size()); Optional<Record> record = tester.controllerTester().findCname("app1.tenant1.global.vespa.oath.cloud"); assertTrue(record.isPresent()); assertEquals("app1.tenant1.global.vespa.oath.cloud", record.get().name().asString()); assertEquals("rotation-fqdn-01.", record.get().data().asString()); List<String> globalDnsNames = tester.controller().routing().readDeclaredEndpointsOf(context.instanceId()) .scope(Endpoint.Scope.global) .sortedBy(Comparator.comparing(Endpoint::dnsName)) .mapToList(Endpoint::dnsName); assertEquals(List.of("app1.tenant1.global.vespa.oath.cloud"), globalDnsNames); } @Test void testDnsUpdatesForMultipleGlobalEndpoints() { var context = tester.newDeploymentContext("tenant1", "app1", "default"); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .endpoint("foobar", "qrs", "us-west-1", "us-central-1") .endpoint("default", "qrs", "us-west-1", "us-central-1") .endpoint("all", "qrs") .endpoint("west", "qrs", "us-west-1") .region("us-west-1") .region("us-central-1") .build(); context.submit(applicationPackage).deploy(); Collection<Deployment> deployments = context.instance().deployments().values(); assertFalse(deployments.isEmpty()); var notWest = Set.of( "rotation-id-01", "foobar.app1.tenant1.global.vespa.oath.cloud", "rotation-id-02", "app1.tenant1.global.vespa.oath.cloud", "rotation-id-03", "all.app1.tenant1.global.vespa.oath.cloud" ); var west = Sets.union(notWest, Set.of("rotation-id-04", "west.app1.tenant1.global.vespa.oath.cloud")); for (Deployment deployment : deployments) { assertEquals(ZoneId.from("prod.us-west-1").equals(deployment.zone()) ? west : notWest, tester.configServer().containerEndpointNames(context.deploymentIdIn(deployment.zone())), "Rotation names are passed to config server in " + deployment.zone()); } context.flushDnsUpdates(); assertEquals(4, tester.controllerTester().nameService().records().size()); var record1 = tester.controllerTester().findCname("app1.tenant1.global.vespa.oath.cloud"); assertTrue(record1.isPresent()); assertEquals("app1.tenant1.global.vespa.oath.cloud", record1.get().name().asString()); assertEquals("rotation-fqdn-02.", record1.get().data().asString()); var record2 = tester.controllerTester().findCname("foobar.app1.tenant1.global.vespa.oath.cloud"); assertTrue(record2.isPresent()); assertEquals("foobar.app1.tenant1.global.vespa.oath.cloud", record2.get().name().asString()); assertEquals("rotation-fqdn-01.", record2.get().data().asString()); var record3 = tester.controllerTester().findCname("all.app1.tenant1.global.vespa.oath.cloud"); assertTrue(record3.isPresent()); assertEquals("all.app1.tenant1.global.vespa.oath.cloud", record3.get().name().asString()); assertEquals("rotation-fqdn-03.", record3.get().data().asString()); var record4 = tester.controllerTester().findCname("west.app1.tenant1.global.vespa.oath.cloud"); assertTrue(record4.isPresent()); assertEquals("west.app1.tenant1.global.vespa.oath.cloud", record4.get().name().asString()); assertEquals("rotation-fqdn-04.", record4.get().data().asString()); } @Test void testDnsUpdatesForGlobalEndpointChanges() { var context = tester.newDeploymentContext("tenant1", "app1", "default"); var west = ZoneId.from("prod", "us-west-1"); var central = ZoneId.from("prod", "us-central-1"); var east = ZoneId.from("prod", "us-east-3"); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .endpoint("default", "qrs", west.region().value(), central.region().value()) .region(west.region().value()) .region(central.region().value()) .region(east.region().value()) .build(); context.submit(applicationPackage).deploy(); for (var zone : List.of(west, central)) { assertEquals( Set.of("rotation-id-01", "app1.tenant1.global.vespa.oath.cloud"), tester.configServer().containerEndpointNames(context.deploymentIdIn(zone)) , "Zone " + zone + " is a member of global endpoint"); } ApplicationPackage applicationPackage2 = new ApplicationPackageBuilder() .endpoint("default", "qrs", west.region().value(), central.region().value()) .endpoint("east", "qrs", east.region().value()) .region(west.region().value()) .region(central.region().value()) .region(east.region().value()) .build(); context.submit(applicationPackage2).deploy(); for (var zone : List.of(west, central)) { assertEquals( Set.of("rotation-id-01", "app1.tenant1.global.vespa.oath.cloud"), tester.configServer().containerEndpointNames(context.deploymentIdIn(zone)) , "Zone " + zone + " is a member of global endpoint"); } assertEquals( Set.of("rotation-id-02", "east.app1.tenant1.global.vespa.oath.cloud"), tester.configServer().containerEndpointNames(context.deploymentIdIn(east)) , "Zone " + east + " is a member of global endpoint"); ApplicationPackage applicationPackage3 = new ApplicationPackageBuilder() .endpoint("default", "qrs", west.region().value(), central.region().value(), east.region().value()) .endpoint("east", "qrs", east.region().value()) .region(west.region().value()) .region(central.region().value()) .region(east.region().value()) .build(); context.submit(applicationPackage3).deploy(); for (var zone : List.of(west, central, east)) { assertEquals( zone.equals(east) ? Set.of("rotation-id-01", "app1.tenant1.global.vespa.oath.cloud", "rotation-id-02", "east.app1.tenant1.global.vespa.oath.cloud") : Set.of("rotation-id-01", "app1.tenant1.global.vespa.oath.cloud"), tester.configServer().containerEndpointNames(context.deploymentIdIn(zone)) , "Zone " + zone + " is a member of global endpoint"); } ApplicationPackage applicationPackage4 = new ApplicationPackageBuilder() .endpoint("default", "qrs", west.region().value(), central.region().value()) .endpoint("east", "qrs", east.region().value()) .region(west.region().value()) .region(central.region().value()) .region(east.region().value()) .build(); try { context.submit(applicationPackage4); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("global-endpoint-change: application 'tenant1.app1' has endpoints " + "[endpoint 'default' (cluster qrs) -> us-central-1, us-east-3, us-west-1, endpoint 'east' (cluster qrs) -> us-east-3], " + "but does not include all of these in deployment.xml. Deploying given deployment.xml " + "will remove [endpoint 'default' (cluster qrs) -> us-central-1, us-east-3, us-west-1] " + "and add [endpoint 'default' (cluster qrs) -> us-central-1, us-west-1]. " + ValidationOverrides.toAllowMessage(ValidationId.globalEndpointChange), e.getMessage()); } ApplicationPackage applicationPackage5 = new ApplicationPackageBuilder() .endpoint("east", "qrs", east.region().value()) .region(west.region().value()) .region(central.region().value()) .region(east.region().value()) .build(); try { context.submit(applicationPackage5); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("global-endpoint-change: application 'tenant1.app1' has endpoints " + "[endpoint 'default' (cluster qrs) -> us-central-1, us-east-3, us-west-1, endpoint 'east' (cluster qrs) -> us-east-3], " + "but does not include all of these in deployment.xml. Deploying given deployment.xml " + "will remove [endpoint 'default' (cluster qrs) -> us-central-1, us-east-3, us-west-1]. " + ValidationOverrides.toAllowMessage(ValidationId.globalEndpointChange), e.getMessage()); } ApplicationPackage applicationPackage6 = new ApplicationPackageBuilder() .endpoint("east", "qrs", east.region().value()) .region(west.region().value()) .region(central.region().value()) .region(east.region().value()) .allow(ValidationId.globalEndpointChange) .build(); context.submit(applicationPackage6); } @Test void testUnassignRotations() { var context = tester.newDeploymentContext(); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .endpoint("default", "qrs", "us-west-1", "us-central-1") .region("us-west-1") .region("us-central-1") .build(); context.submit(applicationPackage).deploy(); ApplicationPackage applicationPackage2 = new ApplicationPackageBuilder() .region("us-west-1") .region("us-central-1") .allow(ValidationId.globalEndpointChange) .build(); context.submit(applicationPackage2).deploy(); assertEquals(List.of(), context.instance().rotations()); assertEquals( Set.of(), tester.configServer().containerEndpoints().get(context.deploymentIdIn(ZoneId.from("prod", "us-west-1"))) ); } @Test void testDnsUpdatesWithChangeInRotationAssignment() { String dnsName1 = "app1.tenant1.global.vespa.oath.cloud"; { var context = tester.newDeploymentContext("tenant1", "app1", "default"); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .endpoint("default", "foo") .region("us-west-1") .region("us-central-1") .build(); context.submit(applicationPackage).deploy(); assertEquals(1, tester.controllerTester().nameService().records().size()); { Optional<Record> record = tester.controllerTester().findCname(dnsName1); assertTrue(record.isPresent()); assertEquals(dnsName1, record.get().name().asString()); assertEquals("rotation-fqdn-01.", record.get().data().asString()); } applicationPackage = new ApplicationPackageBuilder() .allow(ValidationId.deploymentRemoval) .allow(ValidationId.globalEndpointChange) .build(); context.submit(applicationPackage); tester.applications().deleteApplication(context.application().id(), tester.controllerTester().credentialsFor(context.application().id().tenant())); try (RotationLock lock = tester.controller().routing().rotations().lock()) { assertTrue(tester.controller().routing().rotations().availableRotations(lock) .containsKey(new RotationId("rotation-id-01")), "Rotation is unassigned"); } context.flushDnsUpdates(); Optional<Record> record = tester.controllerTester().findCname(dnsName1); assertTrue(record.isEmpty(), dnsName1 + " is removed"); } String dnsName2 = "app2.tenant2.global.vespa.oath.cloud"; { var context = tester.newDeploymentContext("tenant2", "app2", "default"); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .endpoint("default", "foo") .region("us-west-1") .region("us-central-1") .build(); context.submit(applicationPackage).deploy(); assertEquals(1, tester.controllerTester().nameService().records().size()); var record = tester.controllerTester().findCname(dnsName2); assertTrue(record.isPresent()); assertEquals(dnsName2, record.get().name().asString()); assertEquals("rotation-fqdn-01.", record.get().data().asString()); } { var context = tester.newDeploymentContext("tenant1", "app1", "default"); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .endpoint("default", "foo") .region("us-west-1") .region("us-central-1") .build(); context.submit(applicationPackage).deploy(); assertEquals("rotation-id-02", context.instance().rotations().get(0).rotationId().asString()); assertEquals(2, tester.controllerTester().nameService().records().size()); var record1 = tester.controllerTester().findCname(dnsName1); assertTrue(record1.isPresent()); assertEquals("rotation-fqdn-02.", record1.get().data().asString()); var record2 = tester.controllerTester().findCname(dnsName2); assertTrue(record2.isPresent()); assertEquals("rotation-fqdn-01.", record2.get().data().asString()); } } @Test void testDnsUpdatesForApplicationEndpoint() { ApplicationId beta = ApplicationId.from("tenant1", "app1", "beta"); ApplicationId main = ApplicationId.from("tenant1", "app1", "main"); var context = tester.newDeploymentContext(beta); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .instances("beta,main") .region("us-west-1") .region("us-east-3") .applicationEndpoint("a", "default", "us-west-1", Map.of(beta.instance(), 2, main.instance(), 8)) .applicationEndpoint("b", "default", "us-west-1", Map.of(beta.instance(), 1, main.instance(), 1)) .applicationEndpoint("c", "default", "us-east-3", Map.of(beta.instance(), 4, main.instance(), 6)) .build(); context.submit(applicationPackage).deploy(); ZoneId usWest = ZoneId.from("prod", "us-west-1"); ZoneId usEast = ZoneId.from("prod", "us-east-3"); Map<DeploymentId, Map<String, Integer>> deploymentEndpoints = Map.of( new DeploymentId(beta, usWest), Map.of("a.app1.tenant1.us-west-1-r.vespa.oath.cloud", 2, "b.app1.tenant1.us-west-1-r.vespa.oath.cloud", 1), new DeploymentId(main, usWest), Map.of("a.app1.tenant1.us-west-1-r.vespa.oath.cloud", 8, "b.app1.tenant1.us-west-1-r.vespa.oath.cloud", 1), new DeploymentId(beta, usEast), Map.of("c.app1.tenant1.us-east-3-r.vespa.oath.cloud", 4), new DeploymentId(main, usEast), Map.of("c.app1.tenant1.us-east-3-r.vespa.oath.cloud", 6) ); deploymentEndpoints.forEach((deployment, endpoints) -> { Set<ContainerEndpoint> expected = endpoints.entrySet().stream() .map(kv -> new ContainerEndpoint("default", "application", List.of(kv.getKey()), OptionalInt.of(kv.getValue()), RoutingMethod.sharedLayer4)) .collect(Collectors.toSet()); assertEquals(expected, tester.configServer().containerEndpoints().get(deployment), "Endpoint names for " + deployment + " are passed to config server"); }); context.flushDnsUpdates(); Set<Record> records = tester.controllerTester().nameService().records(); assertEquals(Set.of(new Record(Record.Type.CNAME, RecordName.from("a.app1.tenant1.us-west-1-r.vespa.oath.cloud"), RecordData.from("vip.prod.us-west-1.")), new Record(Record.Type.CNAME, RecordName.from("b.app1.tenant1.us-west-1-r.vespa.oath.cloud"), RecordData.from("vip.prod.us-west-1.")), new Record(Record.Type.CNAME, RecordName.from("c.app1.tenant1.us-east-3-r.vespa.oath.cloud"), RecordData.from("vip.prod.us-east-3."))), records); List<String> endpointDnsNames = tester.controller().routing().declaredEndpointsOf(context.application()) .scope(Endpoint.Scope.application) .mapToList(Endpoint::dnsName); assertEquals(List.of("a.app1.tenant1.us-west-1-r.vespa.oath.cloud", "b.app1.tenant1.us-west-1-r.vespa.oath.cloud", "c.app1.tenant1.us-east-3-r.vespa.oath.cloud"), endpointDnsNames); } @Test void testDevDeployment() { ApplicationPackage applicationPackage = new ApplicationPackage(new byte[0]); var context = tester.newDeploymentContext(); ZoneId zone = ZoneId.from("dev", "us-east-1"); tester.controllerTester().zoneRegistry() .setRoutingMethod(ZoneApiMock.from(zone), RoutingMethod.sharedLayer4); context.runJob(zone, applicationPackage); assertTrue(tester.configServer().application(context.instanceId(), zone).get().activated(), "Application deployed and activated"); assertTrue(context.instanceJobs().isEmpty(), "No job status added"); assertEquals(DeploymentSpec.empty, context.application().deploymentSpec(), "DeploymentSpec is not stored"); Set<RoutingMethod> routingMethods = tester.controller().routing().readEndpointsOf(context.deploymentIdIn(zone)) .asList() .stream() .map(Endpoint::routingMethod) .collect(Collectors.toSet()); assertEquals(routingMethods, Set.of(RoutingMethod.sharedLayer4)); assertNotNull(tester.controllerTester().serviceRegistry().applicationStore() .getMeta(new DeploymentId(context.instanceId(), zone)) .get(tester.clock().instant())); tester.clock().advance(Duration.ofSeconds(1)); tester.controller().applications().deactivate(context.instanceId(), zone); assertArrayEquals(new byte[0], tester.controllerTester().serviceRegistry().applicationStore() .getMeta(new DeploymentId(context.instanceId(), zone)) .get(tester.clock().instant())); } @Test void testDevDeploymentWithIncompatibleVersions() { Version version1 = new Version("7"); Version version2 = new Version("7.5"); Version version3 = new Version("8"); var context = tester.newDeploymentContext(); tester.controllerTester().flagSource().withListFlag(PermanentFlags.INCOMPATIBLE_VERSIONS.id(), List.of("8"), String.class); tester.controllerTester().upgradeSystem(version2); tester.newDeploymentContext("keep", "v2", "alive").submit().deploy(); ZoneId zone = ZoneId.from("dev", "us-east-1"); context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version1).build()); assertEquals(version2, context.deployment(zone).version()); assertEquals(Optional.of(version1), context.application().revisions().get(context.deployment(zone).revision()).compileVersion()); try { context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version1).majorVersion(8).build()); fail("Should fail when specifying a major that does not yet exist"); } catch (IllegalArgumentException e) { assertEquals("no platforms were found for major version 8 specified in deployment.xml", e.getMessage()); } try { context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version3).build()); fail("Should fail when compiled against a version which is only compatible with not-yet-existent versions"); } catch (IllegalArgumentException e) { assertEquals("no platforms are compatible with compile version 8", e.getMessage()); } tester.controllerTester().upgradeSystem(version3); try { context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version1).majorVersion(8).build()); fail("Should fail when specifying a major which is incompatible with compile version"); } catch (IllegalArgumentException e) { assertEquals("no platforms on major version 8 specified in deployment.xml are compatible with compile version 7", e.getMessage()); } context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version3).majorVersion(8).build()); assertEquals(version3, context.deployment(zone).version()); assertEquals(Optional.of(version3), context.application().revisions().get(context.deployment(zone).revision()).compileVersion()); context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version3).build()); assertEquals(version3, context.deployment(zone).version()); assertEquals(Optional.of(version3), context.application().revisions().get(context.deployment(zone).revision()).compileVersion()); } @Test void testSuspension() { var context = tester.newDeploymentContext(); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .region("us-west-1") .region("us-east-3") .build(); context.submit(applicationPackage).deploy(); DeploymentId deployment1 = context.deploymentIdIn(ZoneId.from(Environment.prod, RegionName.from("us-west-1"))); DeploymentId deployment2 = context.deploymentIdIn(ZoneId.from(Environment.prod, RegionName.from("us-east-3"))); assertFalse(tester.configServer().isSuspended(deployment1)); assertFalse(tester.configServer().isSuspended(deployment2)); tester.configServer().setSuspension(deployment1, true); assertTrue(tester.configServer().isSuspended(deployment1)); assertFalse(tester.configServer().isSuspended(deployment2)); } @Test void testDeletingApplicationThatHasAlreadyBeenDeleted() { var context = tester.newDeploymentContext(); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .region("us-west-1") .build(); ZoneId zone = ZoneId.from(Environment.prod, RegionName.from("us-west-1")); context.submit(applicationPackage).runJob(zone, applicationPackage); tester.controller().applications().deactivate(context.instanceId(), zone); tester.controller().applications().deactivate(context.instanceId(), zone); } @Test void testDeployApplicationWithWarnings() { var context = tester.newDeploymentContext(); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .region("us-west-1") .build(); ZoneId zone = ZoneId.from("prod", "us-west-1"); int warnings = 3; tester.configServer().generateWarnings(context.deploymentIdIn(zone), warnings); context.submit(applicationPackage).deploy(); assertEquals(warnings, context.deployment(zone) .metrics().warnings().get(DeploymentMetrics.Warning.all).intValue()); } @Test void testDeploySelectivelyProvisionsCertificate() { Function<Instance, Optional<EndpointCertificateMetadata>> certificate = (application) -> tester.controller().curator().readEndpointCertificateMetadata(application.id()); var context1 = tester.newDeploymentContext("tenant1", "app1", "default"); var prodZone = ZoneId.from("prod", "us-west-1"); var stagingZone = ZoneId.from("staging", "us-east-3"); var testZone = ZoneId.from("test", "us-east-1"); tester.controllerTester().zoneRegistry().exclusiveRoutingIn(ZoneApiMock.from(prodZone)); var applicationPackage = new ApplicationPackageBuilder().athenzIdentity(AthenzDomain.from("domain"), AthenzService.from("service")) .region(prodZone.region()) .build(); context1.submit(applicationPackage).deploy(); var cert = certificate.apply(context1.instance()); assertTrue(cert.isPresent(), "Provisions certificate in " + Environment.prod); assertEquals(Stream.concat(Stream.of("vznqtz7a5ygwjkbhhj7ymxvlrekgt4l6g.vespa.oath.cloud", "app1.tenant1.global.vespa.oath.cloud", "*.app1.tenant1.global.vespa.oath.cloud"), Stream.of(prodZone, testZone, stagingZone) .flatMap(zone -> Stream.of("", "*.") .map(prefix -> prefix + "app1.tenant1." + zone.region().value() + (zone.environment() == Environment.prod ? "" : "." + zone.environment().value()) + ".vespa.oath.cloud"))) .collect(Collectors.toUnmodifiableSet()), Set.copyOf(tester.controllerTester().serviceRegistry().endpointCertificateMock().dnsNamesOf(context1.instanceId()))); context1.submit(applicationPackage).deploy(); assertEquals(cert, certificate.apply(context1.instance())); var context2 = tester.newDeploymentContext("tenant1", "app2", "default"); var devZone = ZoneId.from("dev", "us-east-1"); context2.runJob(devZone, applicationPackage); assertTrue(tester.configServer().application(context2.instanceId(), devZone).get().activated(), "Application deployed and activated"); assertTrue(certificate.apply(context2.instance()).isPresent(), "Provisions certificate also in zone with routing layer"); } @Test void testDeployWithGlobalEndpointsInMultipleClouds() { tester.controllerTester().zoneRegistry().setZones( ZoneApiMock.fromId("test.us-west-1"), ZoneApiMock.fromId("staging.us-west-1"), ZoneApiMock.fromId("prod.us-west-1"), ZoneApiMock.newBuilder().with(CloudName.AWS).withId("prod.aws-us-east-1").build() ); var context = tester.newDeploymentContext(); var applicationPackage = new ApplicationPackageBuilder() .region("aws-us-east-1") .region("us-west-1") .endpoint("default", "default") .build(); try { context.submit(applicationPackage); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("Endpoint 'default' in instance 'default' cannot contain regions in different clouds: [aws-us-east-1, us-west-1]", e.getMessage()); } var applicationPackage2 = new ApplicationPackageBuilder() .region("aws-us-east-1") .region("us-west-1") .endpoint("aws", "default", "aws-us-east-1") .endpoint("foo", "default", "aws-us-east-1", "us-west-1") .build(); try { context.submit(applicationPackage2); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("Endpoint 'foo' in instance 'default' cannot contain regions in different clouds: [aws-us-east-1, us-west-1]", e.getMessage()); } } @Test void testDeployWithGlobalEndpointsInGcp() { tester.controllerTester().zoneRegistry().setZones( ZoneApiMock.fromId("test.us-west-1"), ZoneApiMock.fromId("staging.us-west-1"), ZoneApiMock.newBuilder().with(CloudName.GCP).withId("prod.gcp-us-east1-b").build() ); var context = tester.newDeploymentContext(); var applicationPackage = new ApplicationPackageBuilder() .region("gcp-us-east1-b") .endpoint("default", "default") .build(); try { context.submit(applicationPackage); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("Endpoint 'default' in instance 'default' contains a Google Cloud region (gcp-us-east1-b), which is not yet supported", e.getMessage()); } var applicationPackage2 = new ApplicationPackageBuilder() .region("gcp-us-east1-b") .endpoint("gcp", "default", "gcp-us-east1-b") .build(); try { context.submit(applicationPackage2); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("Endpoint 'gcp' in instance 'default' contains a Google Cloud region (gcp-us-east1-b), which is not yet supported", e.getMessage()); } } @Test void testDeployWithoutSourceRevision() { var context = tester.newDeploymentContext(); var applicationPackage = new ApplicationPackageBuilder() .upgradePolicy("default") .region("us-west-1") .build(); context.submit(applicationPackage, Optional.empty()) .deploy(); assertEquals(1, context.instance().deployments().size(), "Deployed application"); } @Test void testDeployWithGlobalEndpointsAndMultipleRoutingMethods() { var context = tester.newDeploymentContext(); var zone1 = ZoneId.from("prod", "us-west-1"); var zone2 = ZoneId.from("prod", "us-east-3"); var applicationPackage = new ApplicationPackageBuilder() .athenzIdentity(AthenzDomain.from("domain"), AthenzService.from("service")) .endpoint("default", "default", zone1.region().value(), zone2.region().value()) .endpoint("east", "default", zone2.region().value()) .region(zone1.region()) .region(zone2.region()) .build(); tester.controllerTester().zoneRegistry().setRoutingMethod(ZoneApiMock.from(zone1), RoutingMethod.sharedLayer4); tester.controllerTester().zoneRegistry().setRoutingMethod(ZoneApiMock.from(zone2), RoutingMethod.exclusive); context.submit(applicationPackage).deploy(); var expectedRecords = List.of( new Record(Record.Type.ALIAS, RecordName.from("application.tenant.us-east-3-w.vespa.oath.cloud"), new WeightedAliasTarget(HostName.of("lb-0--tenant.application.default--prod.us-east-3"), "dns-zone-1", ZoneId.from("prod.us-east-3"), 1).pack()), new Record(Record.Type.ALIAS, RecordName.from("east.application.tenant.global.vespa.oath.cloud"), new LatencyAliasTarget(HostName.of("application.tenant.us-east-3-w.vespa.oath.cloud"), "dns-zone-1", ZoneId.from("prod.us-east-3")).pack()), new Record(Record.Type.CNAME, RecordName.from("application.tenant.us-east-3.vespa.oath.cloud"), RecordData.from("lb-0--tenant.application.default--prod.us-east-3."))); assertEquals(expectedRecords, List.copyOf(tester.controllerTester().nameService().records())); } @Test void testDeploymentDirectRouting() { DeploymentTester tester = new DeploymentTester(new ControllerTester(new RotationsConfig.Builder().build(), main)); var context = tester.newDeploymentContext(); var zone1 = ZoneId.from("prod", "us-west-1"); var zone2 = ZoneId.from("prod", "us-east-3"); var zone3 = ZoneId.from("prod", "eu-west-1"); tester.controllerTester().zoneRegistry() .exclusiveRoutingIn(ZoneApiMock.from(zone1), ZoneApiMock.from(zone2), ZoneApiMock.from(zone3)); var applicationPackageBuilder = new ApplicationPackageBuilder() .region(zone1.region()) .region(zone2.region()) .region(zone3.region()) .endpoint("default", "default") .endpoint("foo", "qrs") .endpoint("us", "default", zone1.region().value(), zone2.region().value()) .athenzIdentity(AthenzDomain.from("domain"), AthenzService.from("service")); context.submit(applicationPackageBuilder.build()).deploy(); for (var zone : List.of(zone1, zone2)) { assertEquals(Set.of("application.tenant.global.vespa.oath.cloud", "foo.application.tenant.global.vespa.oath.cloud", "us.application.tenant.global.vespa.oath.cloud"), tester.configServer().containerEndpointNames(context.deploymentIdIn(zone)), "Expected container endpoints in " + zone); } assertEquals(Set.of("application.tenant.global.vespa.oath.cloud", "foo.application.tenant.global.vespa.oath.cloud"), tester.configServer().containerEndpointNames(context.deploymentIdIn(zone3)), "Expected container endpoints in " + zone3); } @Test void testChangeEndpointCluster() { var context = tester.newDeploymentContext(); var west = ZoneId.from("prod", "us-west-1"); var east = ZoneId.from("prod", "us-east-3"); var applicationPackage = new ApplicationPackageBuilder() .endpoint("default", "foo") .region(west.region().value()) .region(east.region().value()) .build(); context.submit(applicationPackage).deploy(); assertEquals(ClusterSpec.Id.from("foo"), tester.applications().requireInstance(context.instanceId()) .rotations().get(0).clusterId()); applicationPackage = new ApplicationPackageBuilder() .endpoint("default", "bar") .region(west.region().value()) .region(east.region().value()) .build(); try { context.submit(applicationPackage).deploy(); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("global-endpoint-change: application 'tenant.application' has endpoints [endpoint " + "'default' (cluster foo) -> us-east-3, us-west-1], but does not include all of these in " + "deployment.xml. Deploying given deployment.xml will remove " + "[endpoint 'default' (cluster foo) -> us-east-3, us-west-1] and add " + "[endpoint 'default' (cluster bar) -> us-east-3, us-west-1]. To allow this add " + "<allow until='yyyy-mm-dd'>global-endpoint-change</allow> to validation-overrides.xml, see " + "https: } applicationPackage = new ApplicationPackageBuilder() .endpoint("default", "bar") .region(west.region().value()) .region(east.region().value()) .allow(ValidationId.globalEndpointChange) .build(); context.submit(applicationPackage).deploy(); assertEquals(ClusterSpec.Id.from("bar"), tester.applications().requireInstance(context.instanceId()) .rotations().get(0).clusterId()); } @Test void testReadableApplications() { var db = new MockCuratorDb(tester.controller().system()); var tester = new DeploymentTester(new ControllerTester(db)); var app1 = tester.newDeploymentContext("t1", "a1", "default") .submit() .deploy(); var app2 = tester.newDeploymentContext("t2", "a2", "default") .submit() .deploy(); assertEquals(2, tester.applications().readable().size()); db.curator().set(Path.fromString("/controller/v1/applications/" + app2.application().id().serialized()), new byte[]{(byte) 0xDE, (byte) 0xAD}); assertEquals(1, tester.applications().readable().size()); try { tester.applications().asList(); fail("Expected exception"); } catch (Exception ignored) { } app1.submit().deploy(); } @Test void testClashingEndpointIdAndInstanceName() { String deploymentXml = "<deployment version='1.0' athenz-domain='domain' athenz-service='service'>\n" + " <instance id=\"default\">\n" + " <prod>\n" + " <region active=\"true\">us-west-1</region>\n" + " </prod>\n" + " <endpoints>\n" + " <endpoint id=\"dev\" container-id=\"qrs\"/>\n" + " </endpoints>\n" + " </instance>\n" + " <instance id=\"dev\">\n" + " <prod>\n" + " <region active=\"true\">us-west-1</region>\n" + " </prod>\n" + " <endpoints>\n" + " <endpoint id=\"default\" container-id=\"qrs\"/>\n" + " </endpoints>\n" + " </instance>\n" + "</deployment>\n"; ApplicationPackage applicationPackage = ApplicationPackageBuilder.fromDeploymentXml(deploymentXml); try { tester.newDeploymentContext().submit(applicationPackage); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("Endpoint with ID 'default' in instance 'dev' clashes with endpoint 'dev' in instance 'default'", e.getMessage()); } } @Test void testTestPackageWarnings() { String deploymentXml = "<deployment version='1.0'>\n" + " <prod>\n" + " <region>us-west-1</region>\n" + " </prod>\n" + "</deployment>\n"; ApplicationPackage applicationPackage = ApplicationPackageBuilder.fromDeploymentXml(deploymentXml); byte[] testPackage = ApplicationPackage.filesZip(Map.of("tests/staging-test/foo.json", new byte[0])); var app = tester.newDeploymentContext(); tester.jobs().submit(app.application().id(), Submission.basic(applicationPackage, testPackage), 1); assertEquals(List.of(new Notification(tester.clock().instant(), Type.testPackage, Level.warning, NotificationSource.from(app.application().id()), List.of("test package has staging tests, so it should also include staging setup", "see https: tester.controller().notificationsDb().listNotifications(NotificationSource.from(app.application().id()), true)); } @Test void testCompileVersion() { DeploymentContext context = tester.newDeploymentContext(); ApplicationPackage applicationPackage = new ApplicationPackageBuilder().region("us-west-1").build(); TenantAndApplicationId application = TenantAndApplicationId.from(context.instanceId()); Version version0 = Version.fromString("7.1"); tester.controllerTester().upgradeSystem(version0); tester.upgrader().overrideConfidence(version0, Confidence.normal); tester.controllerTester().computeVersionStatus(); assertEquals(version0, tester.applications().compileVersion(application, OptionalInt.of(7))); assertEquals(version0, tester.applications().compileVersion(application, OptionalInt.empty())); assertEquals("this system has no available versions on specified major: 8", assertThrows(IllegalArgumentException.class, () -> tester.applications().compileVersion(application, OptionalInt.of(8))) .getMessage()); context.submit(applicationPackage).deploy(); Version version1 = Version.fromString("7.2"); tester.controllerTester().upgradeSystem(version1); tester.upgrader().overrideConfidence(version1, Confidence.normal); tester.controllerTester().computeVersionStatus(); assertEquals(version0, tester.applications().compileVersion(application, OptionalInt.empty())); tester.upgrader().maintain(); context.deployPlatform(version1); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty())); DeploymentContext legacyApp = tester.newDeploymentContext("avoid", "gc", "default").submit().deploy(); TenantAndApplicationId newApp = TenantAndApplicationId.from("new", "app"); Version version2 = Version.fromString("8.0"); tester.controllerTester().upgradeSystem(version2); tester.upgrader().overrideConfidence(version2, Confidence.low); tester.controllerTester().computeVersionStatus(); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty())); assertEquals("this system has no available versions on specified major: 8", assertThrows(IllegalArgumentException.class, () -> tester.applications().compileVersion(application, OptionalInt.of(8))) .getMessage()); tester.upgrader().overrideConfidence(version2, Confidence.normal); tester.controllerTester().computeVersionStatus(); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty())); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(8))); assertEquals(version2, tester.applications().compileVersion(newApp, OptionalInt.empty())); tester.controllerTester().flagSource().withListFlag(PermanentFlags.INCOMPATIBLE_VERSIONS.id(), List.of("8"), String.class); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7))); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty())); assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.of(8))); assertEquals(version2, tester.applications().compileVersion(newApp, OptionalInt.empty())); tester.upgrader().overrideConfidence(version2, Confidence.low); tester.controllerTester().computeVersionStatus(); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7))); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty())); assertEquals("this system has no available versions on specified major: 8", assertThrows(IllegalArgumentException.class, () -> tester.applications().compileVersion(application, OptionalInt.of(8))) .getMessage()); assertEquals(version1, tester.applications().compileVersion(newApp, OptionalInt.empty())); assertEquals(version1, tester.applications().compileVersion(newApp, OptionalInt.empty())); tester.upgrader().overrideConfidence(version2, Confidence.normal); tester.controllerTester().computeVersionStatus(); context.submit(new ApplicationPackageBuilder().region("us-west-1").compileVersion(version2).build()).deploy(); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7))); assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.empty())); tester.upgrader().overrideConfidence(version2, Confidence.low); tester.controllerTester().computeVersionStatus(); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7))); assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.empty())); assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.of(8))); tester.upgrader().overrideConfidence(version2, Confidence.broken); tester.controllerTester().computeVersionStatus(); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7))); assertEquals("no suitable, released compile version exists", assertThrows(IllegalArgumentException.class, () -> tester.applications().compileVersion(application, OptionalInt.empty())) .getMessage()); assertEquals("no suitable, released compile version exists for specified major: 8", assertThrows(IllegalArgumentException.class, () -> tester.applications().compileVersion(application, OptionalInt.of(8))) .getMessage()); tester.controllerTester().flagSource().withListFlag(PermanentFlags.INCOMPATIBLE_VERSIONS.id(), List.of(), String.class); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7))); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty())); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(8))); tester.upgrader().overrideConfidence(version2, Confidence.low); tester.controllerTester().computeVersionStatus(); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7))); assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.empty())); assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.of(8))); } @Test void testCloudAccount() { DeploymentContext context = tester.newDeploymentContext(); ZoneId devZone = devUsEast1.zone(); ZoneId prodZone = productionUsWest1.zone(); String cloudAccount = "012345678912"; var applicationPackage = new ApplicationPackageBuilder() .cloudAccount(cloudAccount) .region(prodZone.region()) .build(); context.submit(applicationPackage).runJobExpectingFailure(systemTest, "Requested cloud account '012345678912' is not valid for tenant 'tenant'"); tester.controllerTester().flagSource().withListFlag(PermanentFlags.CLOUD_ACCOUNTS.id(), List.of(cloudAccount), String.class); context.runJobExpectingFailure(systemTest, "Zone test.us-east-1 is not configured in requested cloud account '012345678912'") .abortJob(stagingTest); tester.controllerTester().zoneRegistry().configureCloudAccount(new CloudAccount(cloudAccount), systemTest.zone(), stagingTest.zone(), prodZone); context.submit(applicationPackage).deploy(); tester.controllerTester().zoneRegistry().configureCloudAccount(new CloudAccount(cloudAccount), devZone); context.runJob(devZone, applicationPackage); for (var zoneId : List.of(systemTest.zone(), stagingTest.zone(), devZone, prodZone)) { assertEquals(cloudAccount, tester.controllerTester().configServer() .cloudAccount(context.deploymentIdIn(zoneId)) .get().value()); } } @Test void testSubmitWithElementDeprecatedOnPreviousMajor() { DeploymentContext context = tester.newDeploymentContext(); var applicationPackage = new ApplicationPackageBuilder() .compileVersion(Version.fromString("8.1")) .region("us-west-1") .globalServiceId("qrs") .build(); try { context.submit(applicationPackage).deploy(); fail("Expected exception"); } catch (IllegalArgumentException e) { assertTrue(e.getMessage().contains("Element 'prod' contains attribute 'global-service-id' deprecated since major version 7")); } } }
class ControllerTest { private final DeploymentTester tester = new DeploymentTester(); @Test @Test void testGlobalRotationStatus() { var context = tester.newDeploymentContext(); var zone1 = ZoneId.from("prod", "us-west-1"); var zone2 = ZoneId.from("prod", "us-east-3"); var applicationPackage = new ApplicationPackageBuilder() .region(zone1.region()) .region(zone2.region()) .endpoint("default", "default", zone1.region().value(), zone2.region().value()) .build(); context.submit(applicationPackage).deploy(); var deployment1 = context.deploymentIdIn(zone1); DeploymentRoutingContext routingContext = tester.controller().routing().of(deployment1); RoutingStatus status1 = routingContext.routingStatus(); assertEquals(RoutingStatus.Value.in, status1.value()); routingContext.setRoutingStatus(RoutingStatus.Value.out, RoutingStatus.Agent.operator); RoutingStatus status2 = routingContext.routingStatus(); assertEquals(RoutingStatus.Value.out, status2.value()); RoutingStatus status3 = tester.controller().routing().of(context.deploymentIdIn(zone2)).routingStatus(); assertEquals(RoutingStatus.Value.in, status3.value()); } @Test void testDnsUpdatesForGlobalEndpoint() { var betaContext = tester.newDeploymentContext("tenant1", "app1", "beta"); var defaultContext = tester.newDeploymentContext("tenant1", "app1", "default"); ZoneId usWest = ZoneId.from("prod.us-west-1"); ZoneId usCentral = ZoneId.from("prod.us-central-1"); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .instances("beta,default") .endpoint("default", "foo") .region(usWest.region()) .region(usCentral.region()) .build(); tester.controllerTester().zoneRegistry().setRoutingMethod(List.of(ZoneApiMock.from(usWest), ZoneApiMock.from(usCentral)), RoutingMethod.sharedLayer4); betaContext.submit(applicationPackage).deploy(); { Collection<Deployment> betaDeployments = betaContext.instance().deployments().values(); assertFalse(betaDeployments.isEmpty()); Set<ContainerEndpoint> containerEndpoints = Set.of(new ContainerEndpoint("foo", "global", List.of("beta.app1.tenant1.global.vespa.oath.cloud", "rotation-id-01"), OptionalInt.empty(), RoutingMethod.sharedLayer4)); for (Deployment deployment : betaDeployments) { assertEquals(containerEndpoints, tester.configServer().containerEndpoints() .get(betaContext.deploymentIdIn(deployment.zone()))); } betaContext.flushDnsUpdates(); } { Collection<Deployment> defaultDeployments = defaultContext.instance().deployments().values(); assertFalse(defaultDeployments.isEmpty()); Set<ContainerEndpoint> containerEndpoints = Set.of(new ContainerEndpoint("foo", "global", List.of("app1.tenant1.global.vespa.oath.cloud", "rotation-id-02"), OptionalInt.empty(), RoutingMethod.sharedLayer4)); for (Deployment deployment : defaultDeployments) { assertEquals(containerEndpoints, tester.configServer().containerEndpoints().get(defaultContext.deploymentIdIn(deployment.zone()))); } defaultContext.flushDnsUpdates(); } Map<String, String> rotationCnames = Map.of("beta.app1.tenant1.global.vespa.oath.cloud", "rotation-fqdn-01.", "app1.tenant1.global.vespa.oath.cloud", "rotation-fqdn-02."); rotationCnames.forEach((cname, data) -> { var record = tester.controllerTester().findCname(cname); assertTrue(record.isPresent()); assertEquals(cname, record.get().name().asString()); assertEquals(data, record.get().data().asString()); }); Map<ApplicationId, Set<String>> globalDnsNamesByInstance = Map.of(betaContext.instanceId(), Set.of("beta.app1.tenant1.global.vespa.oath.cloud"), defaultContext.instanceId(), Set.of("app1.tenant1.global.vespa.oath.cloud")); globalDnsNamesByInstance.forEach((instance, dnsNames) -> { Set<String> actualDnsNames = tester.controller().routing().readDeclaredEndpointsOf(instance) .scope(Endpoint.Scope.global) .asList().stream() .map(Endpoint::dnsName) .collect(Collectors.toSet()); assertEquals(dnsNames, actualDnsNames, "Global DNS names for " + instance); }); } @Test void testDnsUpdatesForGlobalEndpointLegacySyntax() { var context = tester.newDeploymentContext("tenant1", "app1", "default"); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .globalServiceId("foo") .region("us-west-1") .region("us-central-1") .build(); context.submit(applicationPackage).deploy(); Collection<Deployment> deployments = context.instance().deployments().values(); assertFalse(deployments.isEmpty()); for (Deployment deployment : deployments) { assertEquals(Set.of("rotation-id-01", "app1.tenant1.global.vespa.oath.cloud"), tester.configServer().containerEndpointNames(context.deploymentIdIn(deployment.zone())), "Rotation names are passed to config server in " + deployment.zone()); } context.flushDnsUpdates(); assertEquals(1, tester.controllerTester().nameService().records().size()); Optional<Record> record = tester.controllerTester().findCname("app1.tenant1.global.vespa.oath.cloud"); assertTrue(record.isPresent()); assertEquals("app1.tenant1.global.vespa.oath.cloud", record.get().name().asString()); assertEquals("rotation-fqdn-01.", record.get().data().asString()); List<String> globalDnsNames = tester.controller().routing().readDeclaredEndpointsOf(context.instanceId()) .scope(Endpoint.Scope.global) .sortedBy(Comparator.comparing(Endpoint::dnsName)) .mapToList(Endpoint::dnsName); assertEquals(List.of("app1.tenant1.global.vespa.oath.cloud"), globalDnsNames); } @Test void testDnsUpdatesForMultipleGlobalEndpoints() { var context = tester.newDeploymentContext("tenant1", "app1", "default"); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .endpoint("foobar", "qrs", "us-west-1", "us-central-1") .endpoint("default", "qrs", "us-west-1", "us-central-1") .endpoint("all", "qrs") .endpoint("west", "qrs", "us-west-1") .region("us-west-1") .region("us-central-1") .build(); context.submit(applicationPackage).deploy(); Collection<Deployment> deployments = context.instance().deployments().values(); assertFalse(deployments.isEmpty()); var notWest = Set.of( "rotation-id-01", "foobar.app1.tenant1.global.vespa.oath.cloud", "rotation-id-02", "app1.tenant1.global.vespa.oath.cloud", "rotation-id-03", "all.app1.tenant1.global.vespa.oath.cloud" ); var west = Sets.union(notWest, Set.of("rotation-id-04", "west.app1.tenant1.global.vespa.oath.cloud")); for (Deployment deployment : deployments) { assertEquals(ZoneId.from("prod.us-west-1").equals(deployment.zone()) ? west : notWest, tester.configServer().containerEndpointNames(context.deploymentIdIn(deployment.zone())), "Rotation names are passed to config server in " + deployment.zone()); } context.flushDnsUpdates(); assertEquals(4, tester.controllerTester().nameService().records().size()); var record1 = tester.controllerTester().findCname("app1.tenant1.global.vespa.oath.cloud"); assertTrue(record1.isPresent()); assertEquals("app1.tenant1.global.vespa.oath.cloud", record1.get().name().asString()); assertEquals("rotation-fqdn-02.", record1.get().data().asString()); var record2 = tester.controllerTester().findCname("foobar.app1.tenant1.global.vespa.oath.cloud"); assertTrue(record2.isPresent()); assertEquals("foobar.app1.tenant1.global.vespa.oath.cloud", record2.get().name().asString()); assertEquals("rotation-fqdn-01.", record2.get().data().asString()); var record3 = tester.controllerTester().findCname("all.app1.tenant1.global.vespa.oath.cloud"); assertTrue(record3.isPresent()); assertEquals("all.app1.tenant1.global.vespa.oath.cloud", record3.get().name().asString()); assertEquals("rotation-fqdn-03.", record3.get().data().asString()); var record4 = tester.controllerTester().findCname("west.app1.tenant1.global.vespa.oath.cloud"); assertTrue(record4.isPresent()); assertEquals("west.app1.tenant1.global.vespa.oath.cloud", record4.get().name().asString()); assertEquals("rotation-fqdn-04.", record4.get().data().asString()); } @Test void testDnsUpdatesForGlobalEndpointChanges() { var context = tester.newDeploymentContext("tenant1", "app1", "default"); var west = ZoneId.from("prod", "us-west-1"); var central = ZoneId.from("prod", "us-central-1"); var east = ZoneId.from("prod", "us-east-3"); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .endpoint("default", "qrs", west.region().value(), central.region().value()) .region(west.region().value()) .region(central.region().value()) .region(east.region().value()) .build(); context.submit(applicationPackage).deploy(); for (var zone : List.of(west, central)) { assertEquals( Set.of("rotation-id-01", "app1.tenant1.global.vespa.oath.cloud"), tester.configServer().containerEndpointNames(context.deploymentIdIn(zone)) , "Zone " + zone + " is a member of global endpoint"); } ApplicationPackage applicationPackage2 = new ApplicationPackageBuilder() .endpoint("default", "qrs", west.region().value(), central.region().value()) .endpoint("east", "qrs", east.region().value()) .region(west.region().value()) .region(central.region().value()) .region(east.region().value()) .build(); context.submit(applicationPackage2).deploy(); for (var zone : List.of(west, central)) { assertEquals( Set.of("rotation-id-01", "app1.tenant1.global.vespa.oath.cloud"), tester.configServer().containerEndpointNames(context.deploymentIdIn(zone)) , "Zone " + zone + " is a member of global endpoint"); } assertEquals( Set.of("rotation-id-02", "east.app1.tenant1.global.vespa.oath.cloud"), tester.configServer().containerEndpointNames(context.deploymentIdIn(east)) , "Zone " + east + " is a member of global endpoint"); ApplicationPackage applicationPackage3 = new ApplicationPackageBuilder() .endpoint("default", "qrs", west.region().value(), central.region().value(), east.region().value()) .endpoint("east", "qrs", east.region().value()) .region(west.region().value()) .region(central.region().value()) .region(east.region().value()) .build(); context.submit(applicationPackage3).deploy(); for (var zone : List.of(west, central, east)) { assertEquals( zone.equals(east) ? Set.of("rotation-id-01", "app1.tenant1.global.vespa.oath.cloud", "rotation-id-02", "east.app1.tenant1.global.vespa.oath.cloud") : Set.of("rotation-id-01", "app1.tenant1.global.vespa.oath.cloud"), tester.configServer().containerEndpointNames(context.deploymentIdIn(zone)) , "Zone " + zone + " is a member of global endpoint"); } ApplicationPackage applicationPackage4 = new ApplicationPackageBuilder() .endpoint("default", "qrs", west.region().value(), central.region().value()) .endpoint("east", "qrs", east.region().value()) .region(west.region().value()) .region(central.region().value()) .region(east.region().value()) .build(); try { context.submit(applicationPackage4); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("global-endpoint-change: application 'tenant1.app1' has endpoints " + "[endpoint 'default' (cluster qrs) -> us-central-1, us-east-3, us-west-1, endpoint 'east' (cluster qrs) -> us-east-3], " + "but does not include all of these in deployment.xml. Deploying given deployment.xml " + "will remove [endpoint 'default' (cluster qrs) -> us-central-1, us-east-3, us-west-1] " + "and add [endpoint 'default' (cluster qrs) -> us-central-1, us-west-1]. " + ValidationOverrides.toAllowMessage(ValidationId.globalEndpointChange), e.getMessage()); } ApplicationPackage applicationPackage5 = new ApplicationPackageBuilder() .endpoint("east", "qrs", east.region().value()) .region(west.region().value()) .region(central.region().value()) .region(east.region().value()) .build(); try { context.submit(applicationPackage5); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("global-endpoint-change: application 'tenant1.app1' has endpoints " + "[endpoint 'default' (cluster qrs) -> us-central-1, us-east-3, us-west-1, endpoint 'east' (cluster qrs) -> us-east-3], " + "but does not include all of these in deployment.xml. Deploying given deployment.xml " + "will remove [endpoint 'default' (cluster qrs) -> us-central-1, us-east-3, us-west-1]. " + ValidationOverrides.toAllowMessage(ValidationId.globalEndpointChange), e.getMessage()); } ApplicationPackage applicationPackage6 = new ApplicationPackageBuilder() .endpoint("east", "qrs", east.region().value()) .region(west.region().value()) .region(central.region().value()) .region(east.region().value()) .allow(ValidationId.globalEndpointChange) .build(); context.submit(applicationPackage6); } @Test void testUnassignRotations() { var context = tester.newDeploymentContext(); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .endpoint("default", "qrs", "us-west-1", "us-central-1") .region("us-west-1") .region("us-central-1") .build(); context.submit(applicationPackage).deploy(); ApplicationPackage applicationPackage2 = new ApplicationPackageBuilder() .region("us-west-1") .region("us-central-1") .allow(ValidationId.globalEndpointChange) .build(); context.submit(applicationPackage2).deploy(); assertEquals(List.of(), context.instance().rotations()); assertEquals( Set.of(), tester.configServer().containerEndpoints().get(context.deploymentIdIn(ZoneId.from("prod", "us-west-1"))) ); } @Test void testDnsUpdatesWithChangeInRotationAssignment() { String dnsName1 = "app1.tenant1.global.vespa.oath.cloud"; { var context = tester.newDeploymentContext("tenant1", "app1", "default"); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .endpoint("default", "foo") .region("us-west-1") .region("us-central-1") .build(); context.submit(applicationPackage).deploy(); assertEquals(1, tester.controllerTester().nameService().records().size()); { Optional<Record> record = tester.controllerTester().findCname(dnsName1); assertTrue(record.isPresent()); assertEquals(dnsName1, record.get().name().asString()); assertEquals("rotation-fqdn-01.", record.get().data().asString()); } applicationPackage = new ApplicationPackageBuilder() .allow(ValidationId.deploymentRemoval) .allow(ValidationId.globalEndpointChange) .build(); context.submit(applicationPackage); tester.applications().deleteApplication(context.application().id(), tester.controllerTester().credentialsFor(context.application().id().tenant())); try (RotationLock lock = tester.controller().routing().rotations().lock()) { assertTrue(tester.controller().routing().rotations().availableRotations(lock) .containsKey(new RotationId("rotation-id-01")), "Rotation is unassigned"); } context.flushDnsUpdates(); Optional<Record> record = tester.controllerTester().findCname(dnsName1); assertTrue(record.isEmpty(), dnsName1 + " is removed"); } String dnsName2 = "app2.tenant2.global.vespa.oath.cloud"; { var context = tester.newDeploymentContext("tenant2", "app2", "default"); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .endpoint("default", "foo") .region("us-west-1") .region("us-central-1") .build(); context.submit(applicationPackage).deploy(); assertEquals(1, tester.controllerTester().nameService().records().size()); var record = tester.controllerTester().findCname(dnsName2); assertTrue(record.isPresent()); assertEquals(dnsName2, record.get().name().asString()); assertEquals("rotation-fqdn-01.", record.get().data().asString()); } { var context = tester.newDeploymentContext("tenant1", "app1", "default"); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .endpoint("default", "foo") .region("us-west-1") .region("us-central-1") .build(); context.submit(applicationPackage).deploy(); assertEquals("rotation-id-02", context.instance().rotations().get(0).rotationId().asString()); assertEquals(2, tester.controllerTester().nameService().records().size()); var record1 = tester.controllerTester().findCname(dnsName1); assertTrue(record1.isPresent()); assertEquals("rotation-fqdn-02.", record1.get().data().asString()); var record2 = tester.controllerTester().findCname(dnsName2); assertTrue(record2.isPresent()); assertEquals("rotation-fqdn-01.", record2.get().data().asString()); } } @Test void testDnsUpdatesForApplicationEndpoint() { ApplicationId beta = ApplicationId.from("tenant1", "app1", "beta"); ApplicationId main = ApplicationId.from("tenant1", "app1", "main"); var context = tester.newDeploymentContext(beta); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .instances("beta,main") .region("us-west-1") .region("us-east-3") .applicationEndpoint("a", "default", "us-west-1", Map.of(beta.instance(), 2, main.instance(), 8)) .applicationEndpoint("b", "default", "us-west-1", Map.of(beta.instance(), 1, main.instance(), 1)) .applicationEndpoint("c", "default", "us-east-3", Map.of(beta.instance(), 4, main.instance(), 6)) .build(); context.submit(applicationPackage).deploy(); ZoneId usWest = ZoneId.from("prod", "us-west-1"); ZoneId usEast = ZoneId.from("prod", "us-east-3"); Map<DeploymentId, Map<String, Integer>> deploymentEndpoints = Map.of( new DeploymentId(beta, usWest), Map.of("a.app1.tenant1.us-west-1-r.vespa.oath.cloud", 2, "b.app1.tenant1.us-west-1-r.vespa.oath.cloud", 1), new DeploymentId(main, usWest), Map.of("a.app1.tenant1.us-west-1-r.vespa.oath.cloud", 8, "b.app1.tenant1.us-west-1-r.vespa.oath.cloud", 1), new DeploymentId(beta, usEast), Map.of("c.app1.tenant1.us-east-3-r.vespa.oath.cloud", 4), new DeploymentId(main, usEast), Map.of("c.app1.tenant1.us-east-3-r.vespa.oath.cloud", 6) ); deploymentEndpoints.forEach((deployment, endpoints) -> { Set<ContainerEndpoint> expected = endpoints.entrySet().stream() .map(kv -> new ContainerEndpoint("default", "application", List.of(kv.getKey()), OptionalInt.of(kv.getValue()), RoutingMethod.sharedLayer4)) .collect(Collectors.toSet()); assertEquals(expected, tester.configServer().containerEndpoints().get(deployment), "Endpoint names for " + deployment + " are passed to config server"); }); context.flushDnsUpdates(); Set<Record> records = tester.controllerTester().nameService().records(); assertEquals(Set.of(new Record(Record.Type.CNAME, RecordName.from("a.app1.tenant1.us-west-1-r.vespa.oath.cloud"), RecordData.from("vip.prod.us-west-1.")), new Record(Record.Type.CNAME, RecordName.from("b.app1.tenant1.us-west-1-r.vespa.oath.cloud"), RecordData.from("vip.prod.us-west-1.")), new Record(Record.Type.CNAME, RecordName.from("c.app1.tenant1.us-east-3-r.vespa.oath.cloud"), RecordData.from("vip.prod.us-east-3."))), records); List<String> endpointDnsNames = tester.controller().routing().declaredEndpointsOf(context.application()) .scope(Endpoint.Scope.application) .mapToList(Endpoint::dnsName); assertEquals(List.of("a.app1.tenant1.us-west-1-r.vespa.oath.cloud", "b.app1.tenant1.us-west-1-r.vespa.oath.cloud", "c.app1.tenant1.us-east-3-r.vespa.oath.cloud"), endpointDnsNames); } @Test void testDevDeployment() { ApplicationPackage applicationPackage = new ApplicationPackage(new byte[0]); var context = tester.newDeploymentContext(); ZoneId zone = ZoneId.from("dev", "us-east-1"); tester.controllerTester().zoneRegistry() .setRoutingMethod(ZoneApiMock.from(zone), RoutingMethod.sharedLayer4); context.runJob(zone, applicationPackage); assertTrue(tester.configServer().application(context.instanceId(), zone).get().activated(), "Application deployed and activated"); assertTrue(context.instanceJobs().isEmpty(), "No job status added"); assertEquals(DeploymentSpec.empty, context.application().deploymentSpec(), "DeploymentSpec is not stored"); Set<RoutingMethod> routingMethods = tester.controller().routing().readEndpointsOf(context.deploymentIdIn(zone)) .asList() .stream() .map(Endpoint::routingMethod) .collect(Collectors.toSet()); assertEquals(routingMethods, Set.of(RoutingMethod.sharedLayer4)); assertNotNull(tester.controllerTester().serviceRegistry().applicationStore() .getMeta(new DeploymentId(context.instanceId(), zone)) .get(tester.clock().instant())); tester.clock().advance(Duration.ofSeconds(1)); tester.controller().applications().deactivate(context.instanceId(), zone); assertArrayEquals(new byte[0], tester.controllerTester().serviceRegistry().applicationStore() .getMeta(new DeploymentId(context.instanceId(), zone)) .get(tester.clock().instant())); } @Test void testDevDeploymentWithIncompatibleVersions() { Version version1 = new Version("7"); Version version2 = new Version("7.5"); Version version3 = new Version("8"); var context = tester.newDeploymentContext(); tester.controllerTester().flagSource().withListFlag(PermanentFlags.INCOMPATIBLE_VERSIONS.id(), List.of("8"), String.class); tester.controllerTester().upgradeSystem(version2); tester.newDeploymentContext("keep", "v2", "alive").submit().deploy(); ZoneId zone = ZoneId.from("dev", "us-east-1"); context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version1).build()); assertEquals(version2, context.deployment(zone).version()); assertEquals(Optional.of(version1), context.application().revisions().get(context.deployment(zone).revision()).compileVersion()); try { context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version1).majorVersion(8).build()); fail("Should fail when specifying a major that does not yet exist"); } catch (IllegalArgumentException e) { assertEquals("no platforms were found for major version 8 specified in deployment.xml", e.getMessage()); } try { context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version3).build()); fail("Should fail when compiled against a version which is only compatible with not-yet-existent versions"); } catch (IllegalArgumentException e) { assertEquals("no platforms are compatible with compile version 8", e.getMessage()); } tester.controllerTester().upgradeSystem(version3); try { context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version1).majorVersion(8).build()); fail("Should fail when specifying a major which is incompatible with compile version"); } catch (IllegalArgumentException e) { assertEquals("no platforms on major version 8 specified in deployment.xml are compatible with compile version 7", e.getMessage()); } context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version3).majorVersion(8).build()); assertEquals(version3, context.deployment(zone).version()); assertEquals(Optional.of(version3), context.application().revisions().get(context.deployment(zone).revision()).compileVersion()); context.runJob(zone, new ApplicationPackageBuilder().compileVersion(version3).build()); assertEquals(version3, context.deployment(zone).version()); assertEquals(Optional.of(version3), context.application().revisions().get(context.deployment(zone).revision()).compileVersion()); } @Test void testSuspension() { var context = tester.newDeploymentContext(); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .region("us-west-1") .region("us-east-3") .build(); context.submit(applicationPackage).deploy(); DeploymentId deployment1 = context.deploymentIdIn(ZoneId.from(Environment.prod, RegionName.from("us-west-1"))); DeploymentId deployment2 = context.deploymentIdIn(ZoneId.from(Environment.prod, RegionName.from("us-east-3"))); assertFalse(tester.configServer().isSuspended(deployment1)); assertFalse(tester.configServer().isSuspended(deployment2)); tester.configServer().setSuspension(deployment1, true); assertTrue(tester.configServer().isSuspended(deployment1)); assertFalse(tester.configServer().isSuspended(deployment2)); } @Test void testDeletingApplicationThatHasAlreadyBeenDeleted() { var context = tester.newDeploymentContext(); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .region("us-west-1") .build(); ZoneId zone = ZoneId.from(Environment.prod, RegionName.from("us-west-1")); context.submit(applicationPackage).runJob(zone, applicationPackage); tester.controller().applications().deactivate(context.instanceId(), zone); tester.controller().applications().deactivate(context.instanceId(), zone); } @Test void testDeployApplicationWithWarnings() { var context = tester.newDeploymentContext(); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .region("us-west-1") .build(); ZoneId zone = ZoneId.from("prod", "us-west-1"); int warnings = 3; tester.configServer().generateWarnings(context.deploymentIdIn(zone), warnings); context.submit(applicationPackage).deploy(); assertEquals(warnings, context.deployment(zone) .metrics().warnings().get(DeploymentMetrics.Warning.all).intValue()); } @Test void testDeploySelectivelyProvisionsCertificate() { Function<Instance, Optional<EndpointCertificateMetadata>> certificate = (application) -> tester.controller().curator().readEndpointCertificateMetadata(application.id()); var context1 = tester.newDeploymentContext("tenant1", "app1", "default"); var prodZone = ZoneId.from("prod", "us-west-1"); var stagingZone = ZoneId.from("staging", "us-east-3"); var testZone = ZoneId.from("test", "us-east-1"); tester.controllerTester().zoneRegistry().exclusiveRoutingIn(ZoneApiMock.from(prodZone)); var applicationPackage = new ApplicationPackageBuilder().athenzIdentity(AthenzDomain.from("domain"), AthenzService.from("service")) .region(prodZone.region()) .build(); context1.submit(applicationPackage).deploy(); var cert = certificate.apply(context1.instance()); assertTrue(cert.isPresent(), "Provisions certificate in " + Environment.prod); assertEquals(Stream.concat(Stream.of("vznqtz7a5ygwjkbhhj7ymxvlrekgt4l6g.vespa.oath.cloud", "app1.tenant1.global.vespa.oath.cloud", "*.app1.tenant1.global.vespa.oath.cloud"), Stream.of(prodZone, testZone, stagingZone) .flatMap(zone -> Stream.of("", "*.") .map(prefix -> prefix + "app1.tenant1." + zone.region().value() + (zone.environment() == Environment.prod ? "" : "." + zone.environment().value()) + ".vespa.oath.cloud"))) .collect(Collectors.toUnmodifiableSet()), Set.copyOf(tester.controllerTester().serviceRegistry().endpointCertificateMock().dnsNamesOf(context1.instanceId()))); context1.submit(applicationPackage).deploy(); assertEquals(cert, certificate.apply(context1.instance())); var context2 = tester.newDeploymentContext("tenant1", "app2", "default"); var devZone = ZoneId.from("dev", "us-east-1"); context2.runJob(devZone, applicationPackage); assertTrue(tester.configServer().application(context2.instanceId(), devZone).get().activated(), "Application deployed and activated"); assertTrue(certificate.apply(context2.instance()).isPresent(), "Provisions certificate also in zone with routing layer"); } @Test void testDeployWithGlobalEndpointsInMultipleClouds() { tester.controllerTester().zoneRegistry().setZones( ZoneApiMock.fromId("test.us-west-1"), ZoneApiMock.fromId("staging.us-west-1"), ZoneApiMock.fromId("prod.us-west-1"), ZoneApiMock.newBuilder().with(CloudName.AWS).withId("prod.aws-us-east-1").build() ); var context = tester.newDeploymentContext(); var applicationPackage = new ApplicationPackageBuilder() .region("aws-us-east-1") .region("us-west-1") .endpoint("default", "default") .build(); try { context.submit(applicationPackage); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("Endpoint 'default' in instance 'default' cannot contain regions in different clouds: [aws-us-east-1, us-west-1]", e.getMessage()); } var applicationPackage2 = new ApplicationPackageBuilder() .region("aws-us-east-1") .region("us-west-1") .endpoint("aws", "default", "aws-us-east-1") .endpoint("foo", "default", "aws-us-east-1", "us-west-1") .build(); try { context.submit(applicationPackage2); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("Endpoint 'foo' in instance 'default' cannot contain regions in different clouds: [aws-us-east-1, us-west-1]", e.getMessage()); } } @Test void testDeployWithGlobalEndpointsInGcp() { tester.controllerTester().zoneRegistry().setZones( ZoneApiMock.fromId("test.us-west-1"), ZoneApiMock.fromId("staging.us-west-1"), ZoneApiMock.newBuilder().with(CloudName.GCP).withId("prod.gcp-us-east1-b").build() ); var context = tester.newDeploymentContext(); var applicationPackage = new ApplicationPackageBuilder() .region("gcp-us-east1-b") .endpoint("default", "default") .build(); try { context.submit(applicationPackage); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("Endpoint 'default' in instance 'default' contains a Google Cloud region (gcp-us-east1-b), which is not yet supported", e.getMessage()); } var applicationPackage2 = new ApplicationPackageBuilder() .region("gcp-us-east1-b") .endpoint("gcp", "default", "gcp-us-east1-b") .build(); try { context.submit(applicationPackage2); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("Endpoint 'gcp' in instance 'default' contains a Google Cloud region (gcp-us-east1-b), which is not yet supported", e.getMessage()); } } @Test void testDeployWithoutSourceRevision() { var context = tester.newDeploymentContext(); var applicationPackage = new ApplicationPackageBuilder() .upgradePolicy("default") .region("us-west-1") .build(); context.submit(applicationPackage, Optional.empty()) .deploy(); assertEquals(1, context.instance().deployments().size(), "Deployed application"); } @Test void testDeployWithGlobalEndpointsAndMultipleRoutingMethods() { var context = tester.newDeploymentContext(); var zone1 = ZoneId.from("prod", "us-west-1"); var zone2 = ZoneId.from("prod", "us-east-3"); var applicationPackage = new ApplicationPackageBuilder() .athenzIdentity(AthenzDomain.from("domain"), AthenzService.from("service")) .endpoint("default", "default", zone1.region().value(), zone2.region().value()) .endpoint("east", "default", zone2.region().value()) .region(zone1.region()) .region(zone2.region()) .build(); tester.controllerTester().zoneRegistry().setRoutingMethod(ZoneApiMock.from(zone1), RoutingMethod.sharedLayer4); tester.controllerTester().zoneRegistry().setRoutingMethod(ZoneApiMock.from(zone2), RoutingMethod.exclusive); context.submit(applicationPackage).deploy(); var expectedRecords = List.of( new Record(Record.Type.ALIAS, RecordName.from("application.tenant.us-east-3-w.vespa.oath.cloud"), new WeightedAliasTarget(HostName.of("lb-0--tenant.application.default--prod.us-east-3"), "dns-zone-1", ZoneId.from("prod.us-east-3"), 1).pack()), new Record(Record.Type.ALIAS, RecordName.from("east.application.tenant.global.vespa.oath.cloud"), new LatencyAliasTarget(HostName.of("application.tenant.us-east-3-w.vespa.oath.cloud"), "dns-zone-1", ZoneId.from("prod.us-east-3")).pack()), new Record(Record.Type.CNAME, RecordName.from("application.tenant.us-east-3.vespa.oath.cloud"), RecordData.from("lb-0--tenant.application.default--prod.us-east-3."))); assertEquals(expectedRecords, List.copyOf(tester.controllerTester().nameService().records())); } @Test void testDeploymentDirectRouting() { DeploymentTester tester = new DeploymentTester(new ControllerTester(new RotationsConfig.Builder().build(), main)); var context = tester.newDeploymentContext(); var zone1 = ZoneId.from("prod", "us-west-1"); var zone2 = ZoneId.from("prod", "us-east-3"); var zone3 = ZoneId.from("prod", "eu-west-1"); tester.controllerTester().zoneRegistry() .exclusiveRoutingIn(ZoneApiMock.from(zone1), ZoneApiMock.from(zone2), ZoneApiMock.from(zone3)); var applicationPackageBuilder = new ApplicationPackageBuilder() .region(zone1.region()) .region(zone2.region()) .region(zone3.region()) .endpoint("default", "default") .endpoint("foo", "qrs") .endpoint("us", "default", zone1.region().value(), zone2.region().value()) .athenzIdentity(AthenzDomain.from("domain"), AthenzService.from("service")); context.submit(applicationPackageBuilder.build()).deploy(); for (var zone : List.of(zone1, zone2)) { assertEquals(Set.of("application.tenant.global.vespa.oath.cloud", "foo.application.tenant.global.vespa.oath.cloud", "us.application.tenant.global.vespa.oath.cloud"), tester.configServer().containerEndpointNames(context.deploymentIdIn(zone)), "Expected container endpoints in " + zone); } assertEquals(Set.of("application.tenant.global.vespa.oath.cloud", "foo.application.tenant.global.vespa.oath.cloud"), tester.configServer().containerEndpointNames(context.deploymentIdIn(zone3)), "Expected container endpoints in " + zone3); } @Test void testChangeEndpointCluster() { var context = tester.newDeploymentContext(); var west = ZoneId.from("prod", "us-west-1"); var east = ZoneId.from("prod", "us-east-3"); var applicationPackage = new ApplicationPackageBuilder() .endpoint("default", "foo") .region(west.region().value()) .region(east.region().value()) .build(); context.submit(applicationPackage).deploy(); assertEquals(ClusterSpec.Id.from("foo"), tester.applications().requireInstance(context.instanceId()) .rotations().get(0).clusterId()); applicationPackage = new ApplicationPackageBuilder() .endpoint("default", "bar") .region(west.region().value()) .region(east.region().value()) .build(); try { context.submit(applicationPackage).deploy(); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("global-endpoint-change: application 'tenant.application' has endpoints [endpoint " + "'default' (cluster foo) -> us-east-3, us-west-1], but does not include all of these in " + "deployment.xml. Deploying given deployment.xml will remove " + "[endpoint 'default' (cluster foo) -> us-east-3, us-west-1] and add " + "[endpoint 'default' (cluster bar) -> us-east-3, us-west-1]. To allow this add " + "<allow until='yyyy-mm-dd'>global-endpoint-change</allow> to validation-overrides.xml, see " + "https: } applicationPackage = new ApplicationPackageBuilder() .endpoint("default", "bar") .region(west.region().value()) .region(east.region().value()) .allow(ValidationId.globalEndpointChange) .build(); context.submit(applicationPackage).deploy(); assertEquals(ClusterSpec.Id.from("bar"), tester.applications().requireInstance(context.instanceId()) .rotations().get(0).clusterId()); } @Test void testReadableApplications() { var db = new MockCuratorDb(tester.controller().system()); var tester = new DeploymentTester(new ControllerTester(db)); var app1 = tester.newDeploymentContext("t1", "a1", "default") .submit() .deploy(); var app2 = tester.newDeploymentContext("t2", "a2", "default") .submit() .deploy(); assertEquals(2, tester.applications().readable().size()); db.curator().set(Path.fromString("/controller/v1/applications/" + app2.application().id().serialized()), new byte[]{(byte) 0xDE, (byte) 0xAD}); assertEquals(1, tester.applications().readable().size()); try { tester.applications().asList(); fail("Expected exception"); } catch (Exception ignored) { } app1.submit().deploy(); } @Test void testClashingEndpointIdAndInstanceName() { String deploymentXml = "<deployment version='1.0' athenz-domain='domain' athenz-service='service'>\n" + " <instance id=\"default\">\n" + " <prod>\n" + " <region active=\"true\">us-west-1</region>\n" + " </prod>\n" + " <endpoints>\n" + " <endpoint id=\"dev\" container-id=\"qrs\"/>\n" + " </endpoints>\n" + " </instance>\n" + " <instance id=\"dev\">\n" + " <prod>\n" + " <region active=\"true\">us-west-1</region>\n" + " </prod>\n" + " <endpoints>\n" + " <endpoint id=\"default\" container-id=\"qrs\"/>\n" + " </endpoints>\n" + " </instance>\n" + "</deployment>\n"; ApplicationPackage applicationPackage = ApplicationPackageBuilder.fromDeploymentXml(deploymentXml); try { tester.newDeploymentContext().submit(applicationPackage); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("Endpoint with ID 'default' in instance 'dev' clashes with endpoint 'dev' in instance 'default'", e.getMessage()); } } @Test void testTestPackageWarnings() { String deploymentXml = "<deployment version='1.0'>\n" + " <prod>\n" + " <region>us-west-1</region>\n" + " </prod>\n" + "</deployment>\n"; ApplicationPackage applicationPackage = ApplicationPackageBuilder.fromDeploymentXml(deploymentXml); byte[] testPackage = ApplicationPackage.filesZip(Map.of("tests/staging-test/foo.json", new byte[0])); var app = tester.newDeploymentContext(); tester.jobs().submit(app.application().id(), Submission.basic(applicationPackage, testPackage), 1); assertEquals(List.of(new Notification(tester.clock().instant(), Type.testPackage, Level.warning, NotificationSource.from(app.application().id()), List.of("test package has staging tests, so it should also include staging setup", "see https: tester.controller().notificationsDb().listNotifications(NotificationSource.from(app.application().id()), true)); } @Test void testCompileVersion() { DeploymentContext context = tester.newDeploymentContext(); ApplicationPackage applicationPackage = new ApplicationPackageBuilder().region("us-west-1").build(); TenantAndApplicationId application = TenantAndApplicationId.from(context.instanceId()); Version version0 = Version.fromString("7.1"); tester.controllerTester().upgradeSystem(version0); tester.upgrader().overrideConfidence(version0, Confidence.normal); tester.controllerTester().computeVersionStatus(); assertEquals(version0, tester.applications().compileVersion(application, OptionalInt.of(7))); assertEquals(version0, tester.applications().compileVersion(application, OptionalInt.empty())); assertEquals("this system has no available versions on specified major: 8", assertThrows(IllegalArgumentException.class, () -> tester.applications().compileVersion(application, OptionalInt.of(8))) .getMessage()); context.submit(applicationPackage).deploy(); Version version1 = Version.fromString("7.2"); tester.controllerTester().upgradeSystem(version1); tester.upgrader().overrideConfidence(version1, Confidence.normal); tester.controllerTester().computeVersionStatus(); assertEquals(version0, tester.applications().compileVersion(application, OptionalInt.empty())); tester.upgrader().maintain(); context.deployPlatform(version1); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty())); DeploymentContext legacyApp = tester.newDeploymentContext("avoid", "gc", "default").submit().deploy(); TenantAndApplicationId newApp = TenantAndApplicationId.from("new", "app"); Version version2 = Version.fromString("8.0"); tester.controllerTester().upgradeSystem(version2); tester.upgrader().overrideConfidence(version2, Confidence.low); tester.controllerTester().computeVersionStatus(); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty())); assertEquals("this system has no available versions on specified major: 8", assertThrows(IllegalArgumentException.class, () -> tester.applications().compileVersion(application, OptionalInt.of(8))) .getMessage()); tester.upgrader().overrideConfidence(version2, Confidence.normal); tester.controllerTester().computeVersionStatus(); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty())); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(8))); assertEquals(version2, tester.applications().compileVersion(newApp, OptionalInt.empty())); tester.controllerTester().flagSource().withListFlag(PermanentFlags.INCOMPATIBLE_VERSIONS.id(), List.of("8"), String.class); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7))); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty())); assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.of(8))); assertEquals(version2, tester.applications().compileVersion(newApp, OptionalInt.empty())); tester.upgrader().overrideConfidence(version2, Confidence.low); tester.controllerTester().computeVersionStatus(); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7))); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty())); assertEquals("this system has no available versions on specified major: 8", assertThrows(IllegalArgumentException.class, () -> tester.applications().compileVersion(application, OptionalInt.of(8))) .getMessage()); assertEquals(version1, tester.applications().compileVersion(newApp, OptionalInt.empty())); assertEquals(version1, tester.applications().compileVersion(newApp, OptionalInt.empty())); tester.upgrader().overrideConfidence(version2, Confidence.normal); tester.controllerTester().computeVersionStatus(); context.submit(new ApplicationPackageBuilder().region("us-west-1").compileVersion(version2).build()).deploy(); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7))); assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.empty())); tester.upgrader().overrideConfidence(version2, Confidence.low); tester.controllerTester().computeVersionStatus(); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7))); assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.empty())); assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.of(8))); tester.upgrader().overrideConfidence(version2, Confidence.broken); tester.controllerTester().computeVersionStatus(); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7))); assertEquals("no suitable, released compile version exists", assertThrows(IllegalArgumentException.class, () -> tester.applications().compileVersion(application, OptionalInt.empty())) .getMessage()); assertEquals("no suitable, released compile version exists for specified major: 8", assertThrows(IllegalArgumentException.class, () -> tester.applications().compileVersion(application, OptionalInt.of(8))) .getMessage()); tester.controllerTester().flagSource().withListFlag(PermanentFlags.INCOMPATIBLE_VERSIONS.id(), List.of(), String.class); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7))); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.empty())); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(8))); tester.upgrader().overrideConfidence(version2, Confidence.low); tester.controllerTester().computeVersionStatus(); assertEquals(version1, tester.applications().compileVersion(application, OptionalInt.of(7))); assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.empty())); assertEquals(version2, tester.applications().compileVersion(application, OptionalInt.of(8))); } @Test void testCloudAccount() { DeploymentContext context = tester.newDeploymentContext(); ZoneId devZone = devUsEast1.zone(); ZoneId prodZone = productionUsWest1.zone(); String cloudAccount = "012345678912"; var applicationPackage = new ApplicationPackageBuilder() .cloudAccount(cloudAccount) .region(prodZone.region()) .build(); context.submit(applicationPackage).runJobExpectingFailure(systemTest, "Requested cloud account '012345678912' is not valid for tenant 'tenant'"); tester.controllerTester().flagSource().withListFlag(PermanentFlags.CLOUD_ACCOUNTS.id(), List.of(cloudAccount), String.class); context.runJobExpectingFailure(systemTest, "Zone test.us-east-1 is not configured in requested cloud account '012345678912'") .abortJob(stagingTest); tester.controllerTester().zoneRegistry().configureCloudAccount(new CloudAccount(cloudAccount), systemTest.zone(), stagingTest.zone(), prodZone); context.submit(applicationPackage).deploy(); tester.controllerTester().zoneRegistry().configureCloudAccount(new CloudAccount(cloudAccount), devZone); context.runJob(devZone, applicationPackage); for (var zoneId : List.of(systemTest.zone(), stagingTest.zone(), devZone, prodZone)) { assertEquals(cloudAccount, tester.controllerTester().configServer() .cloudAccount(context.deploymentIdIn(zoneId)) .get().value()); } } @Test void testSubmitWithElementDeprecatedOnPreviousMajor() { DeploymentContext context = tester.newDeploymentContext(); var applicationPackage = new ApplicationPackageBuilder() .compileVersion(Version.fromString("8.1")) .region("us-west-1") .globalServiceId("qrs") .build(); try { context.submit(applicationPackage).deploy(); fail("Expected exception"); } catch (IllegalArgumentException e) { assertTrue(e.getMessage().contains("Element 'prod' contains attribute 'global-service-id' deprecated since major version 7")); } } }
This causes a + b + c + d to become an unbalanced tree width depth == number of operators. Not very stack friendly. (((a+b) + c) + d) I suggest that you put all children with same precedence in a single arithmetic node. If we require that precedence should be simpler too.
private ExpressionNode transformBooleanArithmetics(ArithmeticNode node) { Iterator<ExpressionNode> child = node.children().iterator(); Deque<ChildNode> stack = new ArrayDeque<>(); stack.push(new ChildNode(ArithmeticOperator.OR, child.next())); for (Iterator<ArithmeticOperator> it = node.operators().iterator(); it.hasNext() && child.hasNext();) { ArithmeticOperator op = it.next(); if ( ! stack.isEmpty()) { while (stack.size() > 1 && ! op.hasPrecedenceOver(stack.peek().op)) { popStack(stack); } } stack.push(new ChildNode(op, child.next())); } while (stack.size() > 1) popStack(stack); return stack.getFirst().child; }
stack.push(new ChildNode(ArithmeticOperator.OR, child.next()));
private ExpressionNode transformBooleanArithmetics(ArithmeticNode node) { Iterator<ExpressionNode> child = node.children().iterator(); Deque<ChildNode> stack = new ArrayDeque<>(); stack.push(new ChildNode(ArithmeticOperator.OR, child.next())); for (Iterator<ArithmeticOperator> it = node.operators().iterator(); it.hasNext() && child.hasNext();) { ArithmeticOperator op = it.next(); if ( ! stack.isEmpty()) { while (stack.size() > 1 && ! op.hasPrecedenceOver(stack.peek().op)) { popStack(stack); } } stack.push(new ChildNode(op, child.next())); } while (stack.size() > 1) popStack(stack); return stack.getFirst().child; }
class BooleanExpressionTransformer extends ExpressionTransformer<TransformContext> { @Override public ExpressionNode transform(ExpressionNode node, TransformContext context) { if (node instanceof CompositeNode composite) node = transformChildren(composite, context); if (node instanceof ArithmeticNode arithmetic) node = transformBooleanArithmetics(arithmetic); return node; } private void popStack(Deque<ChildNode> stack) { ChildNode rhs = stack.pop(); ChildNode lhs = stack.peek(); ExpressionNode combination; if (rhs.op == ArithmeticOperator.AND) combination = andByIfNode(lhs.child, rhs.child); else if (rhs.op == ArithmeticOperator.OR) combination = orByIfNode(lhs.child, rhs.child); else combination = new ArithmeticNode(List.of(lhs.child, rhs.child), List.of(rhs.op)); lhs.child = combination; } private IfNode andByIfNode(ExpressionNode a, ExpressionNode b) { return new IfNode(a, b, new ConstantNode(new BooleanValue(false))); } private IfNode orByIfNode(ExpressionNode a, ExpressionNode b) { return new IfNode(a, new ConstantNode(new BooleanValue(true)), b); } /** A child with the operator to be applied to it when combining it with the previous child. */ private static class ChildNode { final ArithmeticOperator op; ExpressionNode child; public ChildNode(ArithmeticOperator op, ExpressionNode child) { this.op = op; this.child = child; } @Override public String toString() { return child.toString(); } } }
class BooleanExpressionTransformer extends ExpressionTransformer<TransformContext> { @Override public ExpressionNode transform(ExpressionNode node, TransformContext context) { if (node instanceof CompositeNode composite) node = transformChildren(composite, context); if (node instanceof ArithmeticNode arithmetic) node = transformBooleanArithmetics(arithmetic); return node; } private void popStack(Deque<ChildNode> stack) { ChildNode rhs = stack.pop(); ChildNode lhs = stack.peek(); ExpressionNode combination; if (rhs.op == ArithmeticOperator.AND) combination = andByIfNode(lhs.child, rhs.child); else if (rhs.op == ArithmeticOperator.OR) combination = orByIfNode(lhs.child, rhs.child); else combination = new ArithmeticNode(List.of(lhs.child, rhs.child), List.of(rhs.op)); lhs.child = combination; } private IfNode andByIfNode(ExpressionNode a, ExpressionNode b) { return new IfNode(a, b, new ConstantNode(new BooleanValue(false))); } private IfNode orByIfNode(ExpressionNode a, ExpressionNode b) { return new IfNode(a, new ConstantNode(new BooleanValue(true)), b); } /** A child with the operator to be applied to it when combining it with the previous child. */ private static class ChildNode { final ArithmeticOperator op; ExpressionNode child; public ChildNode(ArithmeticOperator op, ExpressionNode child) { this.op = op; this.child = child; } @Override public String toString() { return child.toString(); } } }
It'll make this code uglier, and nobody is going to add together fifty thousand things like that test, but I'll give it a shot and see if it can be done reasonably nice without changing everything.
private ExpressionNode transformBooleanArithmetics(ArithmeticNode node) { Iterator<ExpressionNode> child = node.children().iterator(); Deque<ChildNode> stack = new ArrayDeque<>(); stack.push(new ChildNode(ArithmeticOperator.OR, child.next())); for (Iterator<ArithmeticOperator> it = node.operators().iterator(); it.hasNext() && child.hasNext();) { ArithmeticOperator op = it.next(); if ( ! stack.isEmpty()) { while (stack.size() > 1 && ! op.hasPrecedenceOver(stack.peek().op)) { popStack(stack); } } stack.push(new ChildNode(op, child.next())); } while (stack.size() > 1) popStack(stack); return stack.getFirst().child; }
stack.push(new ChildNode(ArithmeticOperator.OR, child.next()));
private ExpressionNode transformBooleanArithmetics(ArithmeticNode node) { Iterator<ExpressionNode> child = node.children().iterator(); Deque<ChildNode> stack = new ArrayDeque<>(); stack.push(new ChildNode(ArithmeticOperator.OR, child.next())); for (Iterator<ArithmeticOperator> it = node.operators().iterator(); it.hasNext() && child.hasNext();) { ArithmeticOperator op = it.next(); if ( ! stack.isEmpty()) { while (stack.size() > 1 && ! op.hasPrecedenceOver(stack.peek().op)) { popStack(stack); } } stack.push(new ChildNode(op, child.next())); } while (stack.size() > 1) popStack(stack); return stack.getFirst().child; }
class BooleanExpressionTransformer extends ExpressionTransformer<TransformContext> { @Override public ExpressionNode transform(ExpressionNode node, TransformContext context) { if (node instanceof CompositeNode composite) node = transformChildren(composite, context); if (node instanceof ArithmeticNode arithmetic) node = transformBooleanArithmetics(arithmetic); return node; } private void popStack(Deque<ChildNode> stack) { ChildNode rhs = stack.pop(); ChildNode lhs = stack.peek(); ExpressionNode combination; if (rhs.op == ArithmeticOperator.AND) combination = andByIfNode(lhs.child, rhs.child); else if (rhs.op == ArithmeticOperator.OR) combination = orByIfNode(lhs.child, rhs.child); else combination = new ArithmeticNode(List.of(lhs.child, rhs.child), List.of(rhs.op)); lhs.child = combination; } private IfNode andByIfNode(ExpressionNode a, ExpressionNode b) { return new IfNode(a, b, new ConstantNode(new BooleanValue(false))); } private IfNode orByIfNode(ExpressionNode a, ExpressionNode b) { return new IfNode(a, new ConstantNode(new BooleanValue(true)), b); } /** A child with the operator to be applied to it when combining it with the previous child. */ private static class ChildNode { final ArithmeticOperator op; ExpressionNode child; public ChildNode(ArithmeticOperator op, ExpressionNode child) { this.op = op; this.child = child; } @Override public String toString() { return child.toString(); } } }
class BooleanExpressionTransformer extends ExpressionTransformer<TransformContext> { @Override public ExpressionNode transform(ExpressionNode node, TransformContext context) { if (node instanceof CompositeNode composite) node = transformChildren(composite, context); if (node instanceof ArithmeticNode arithmetic) node = transformBooleanArithmetics(arithmetic); return node; } private void popStack(Deque<ChildNode> stack) { ChildNode rhs = stack.pop(); ChildNode lhs = stack.peek(); ExpressionNode combination; if (rhs.op == ArithmeticOperator.AND) combination = andByIfNode(lhs.child, rhs.child); else if (rhs.op == ArithmeticOperator.OR) combination = orByIfNode(lhs.child, rhs.child); else combination = new ArithmeticNode(List.of(lhs.child, rhs.child), List.of(rhs.op)); lhs.child = combination; } private IfNode andByIfNode(ExpressionNode a, ExpressionNode b) { return new IfNode(a, b, new ConstantNode(new BooleanValue(false))); } private IfNode orByIfNode(ExpressionNode a, ExpressionNode b) { return new IfNode(a, new ConstantNode(new BooleanValue(true)), b); } /** A child with the operator to be applied to it when combining it with the previous child. */ private static class ChildNode { final ArithmeticOperator op; ExpressionNode child; public ChildNode(ArithmeticOperator op, ExpressionNode child) { this.op = op; this.child = child; } @Override public String toString() { return child.toString(); } } }
I found a way that is just a minor change. PR is coming.
private ExpressionNode transformBooleanArithmetics(ArithmeticNode node) { Iterator<ExpressionNode> child = node.children().iterator(); Deque<ChildNode> stack = new ArrayDeque<>(); stack.push(new ChildNode(ArithmeticOperator.OR, child.next())); for (Iterator<ArithmeticOperator> it = node.operators().iterator(); it.hasNext() && child.hasNext();) { ArithmeticOperator op = it.next(); if ( ! stack.isEmpty()) { while (stack.size() > 1 && ! op.hasPrecedenceOver(stack.peek().op)) { popStack(stack); } } stack.push(new ChildNode(op, child.next())); } while (stack.size() > 1) popStack(stack); return stack.getFirst().child; }
stack.push(new ChildNode(ArithmeticOperator.OR, child.next()));
private ExpressionNode transformBooleanArithmetics(ArithmeticNode node) { Iterator<ExpressionNode> child = node.children().iterator(); Deque<ChildNode> stack = new ArrayDeque<>(); stack.push(new ChildNode(ArithmeticOperator.OR, child.next())); for (Iterator<ArithmeticOperator> it = node.operators().iterator(); it.hasNext() && child.hasNext();) { ArithmeticOperator op = it.next(); if ( ! stack.isEmpty()) { while (stack.size() > 1 && ! op.hasPrecedenceOver(stack.peek().op)) { popStack(stack); } } stack.push(new ChildNode(op, child.next())); } while (stack.size() > 1) popStack(stack); return stack.getFirst().child; }
class BooleanExpressionTransformer extends ExpressionTransformer<TransformContext> { @Override public ExpressionNode transform(ExpressionNode node, TransformContext context) { if (node instanceof CompositeNode composite) node = transformChildren(composite, context); if (node instanceof ArithmeticNode arithmetic) node = transformBooleanArithmetics(arithmetic); return node; } private void popStack(Deque<ChildNode> stack) { ChildNode rhs = stack.pop(); ChildNode lhs = stack.peek(); ExpressionNode combination; if (rhs.op == ArithmeticOperator.AND) combination = andByIfNode(lhs.child, rhs.child); else if (rhs.op == ArithmeticOperator.OR) combination = orByIfNode(lhs.child, rhs.child); else combination = new ArithmeticNode(List.of(lhs.child, rhs.child), List.of(rhs.op)); lhs.child = combination; } private IfNode andByIfNode(ExpressionNode a, ExpressionNode b) { return new IfNode(a, b, new ConstantNode(new BooleanValue(false))); } private IfNode orByIfNode(ExpressionNode a, ExpressionNode b) { return new IfNode(a, new ConstantNode(new BooleanValue(true)), b); } /** A child with the operator to be applied to it when combining it with the previous child. */ private static class ChildNode { final ArithmeticOperator op; ExpressionNode child; public ChildNode(ArithmeticOperator op, ExpressionNode child) { this.op = op; this.child = child; } @Override public String toString() { return child.toString(); } } }
class BooleanExpressionTransformer extends ExpressionTransformer<TransformContext> { @Override public ExpressionNode transform(ExpressionNode node, TransformContext context) { if (node instanceof CompositeNode composite) node = transformChildren(composite, context); if (node instanceof ArithmeticNode arithmetic) node = transformBooleanArithmetics(arithmetic); return node; } private void popStack(Deque<ChildNode> stack) { ChildNode rhs = stack.pop(); ChildNode lhs = stack.peek(); ExpressionNode combination; if (rhs.op == ArithmeticOperator.AND) combination = andByIfNode(lhs.child, rhs.child); else if (rhs.op == ArithmeticOperator.OR) combination = orByIfNode(lhs.child, rhs.child); else combination = new ArithmeticNode(List.of(lhs.child, rhs.child), List.of(rhs.op)); lhs.child = combination; } private IfNode andByIfNode(ExpressionNode a, ExpressionNode b) { return new IfNode(a, b, new ConstantNode(new BooleanValue(false))); } private IfNode orByIfNode(ExpressionNode a, ExpressionNode b) { return new IfNode(a, new ConstantNode(new BooleanValue(true)), b); } /** A child with the operator to be applied to it when combining it with the previous child. */ private static class ChildNode { final ArithmeticOperator op; ExpressionNode child; public ChildNode(ArithmeticOperator op, ExpressionNode child) { this.op = op; this.child = child; } @Override public String toString() { return child.toString(); } } }
Not sure the default should be limited, though? Well, maybe, since we use this only in hosted ...
public AsyncHttpResponse handle(HttpRequest request) { Instant from = Optional.ofNullable(request.getProperty("from")) .map(Long::valueOf).map(Instant::ofEpochMilli).orElse(Instant.MIN); Instant to = Optional.ofNullable(request.getProperty("to")) .map(Long::valueOf).map(Instant::ofEpochMilli).orElse(Instant.MAX); long maxLines = Optional.ofNullable(request.getProperty("maxLines")) .map(Long::valueOf).orElse(100_000L); Optional<String> hostname = Optional.ofNullable(request.getProperty("hostname")); return new AsyncHttpResponse(200) { @Override public long maxPendingBytes() { return MB; } @Override public void render(OutputStream output, ContentChannel networkChannel, CompletionHandler handler) { try (output) { logReader.writeLogs(output, from, to, maxLines, hostname); } catch (Throwable t) { log.log(Level.WARNING, "Failed reading logs from " + from + " to " + to, t); } finally { networkChannel.close(handler); } } }; }
long maxLines = Optional.ofNullable(request.getProperty("maxLines"))
public AsyncHttpResponse handle(HttpRequest request) { Instant from = Optional.ofNullable(request.getProperty("from")) .map(Long::valueOf).map(Instant::ofEpochMilli).orElse(Instant.MIN); Instant to = Optional.ofNullable(request.getProperty("to")) .map(Long::valueOf).map(Instant::ofEpochMilli).orElse(Instant.MAX); long maxLines = Optional.ofNullable(request.getProperty("maxLines")) .map(Long::valueOf).orElse(100_000L); Optional<String> hostname = Optional.ofNullable(request.getProperty("hostname")); return new AsyncHttpResponse(200) { @Override public long maxPendingBytes() { return MB; } @Override public void render(OutputStream output, ContentChannel networkChannel, CompletionHandler handler) { try (output) { logReader.writeLogs(output, from, to, maxLines, hostname); } catch (Throwable t) { log.log(Level.WARNING, "Failed reading logs from " + from + " to " + to, t); } finally { networkChannel.close(handler); } } }; }
class LogHandler extends ThreadedHttpRequestHandler { private final LogReader logReader; private static final long MB = 1024 * 1024; @Inject public LogHandler(Executor executor, LogHandlerConfig config) { this(executor, new LogReader(config.logDirectory(), config.logPattern())); } LogHandler(Executor executor, LogReader logReader) { super(executor); this.logReader = logReader; } @Override }
class LogHandler extends ThreadedHttpRequestHandler { private final LogReader logReader; private static final long MB = 1024 * 1024; @Inject public LogHandler(Executor executor, LogHandlerConfig config) { this(executor, new LogReader(config.logDirectory(), config.logPattern())); } LogHandler(Executor executor, LogReader logReader) { super(executor); this.logReader = logReader; } @Override }
It looks like the logserver node was dying (out of memory?) even though we have streaming, so having a limit by default makes sense. But I guess the primary user of this API is console anyway, which will set the limit anyway.
public AsyncHttpResponse handle(HttpRequest request) { Instant from = Optional.ofNullable(request.getProperty("from")) .map(Long::valueOf).map(Instant::ofEpochMilli).orElse(Instant.MIN); Instant to = Optional.ofNullable(request.getProperty("to")) .map(Long::valueOf).map(Instant::ofEpochMilli).orElse(Instant.MAX); long maxLines = Optional.ofNullable(request.getProperty("maxLines")) .map(Long::valueOf).orElse(100_000L); Optional<String> hostname = Optional.ofNullable(request.getProperty("hostname")); return new AsyncHttpResponse(200) { @Override public long maxPendingBytes() { return MB; } @Override public void render(OutputStream output, ContentChannel networkChannel, CompletionHandler handler) { try (output) { logReader.writeLogs(output, from, to, maxLines, hostname); } catch (Throwable t) { log.log(Level.WARNING, "Failed reading logs from " + from + " to " + to, t); } finally { networkChannel.close(handler); } } }; }
long maxLines = Optional.ofNullable(request.getProperty("maxLines"))
public AsyncHttpResponse handle(HttpRequest request) { Instant from = Optional.ofNullable(request.getProperty("from")) .map(Long::valueOf).map(Instant::ofEpochMilli).orElse(Instant.MIN); Instant to = Optional.ofNullable(request.getProperty("to")) .map(Long::valueOf).map(Instant::ofEpochMilli).orElse(Instant.MAX); long maxLines = Optional.ofNullable(request.getProperty("maxLines")) .map(Long::valueOf).orElse(100_000L); Optional<String> hostname = Optional.ofNullable(request.getProperty("hostname")); return new AsyncHttpResponse(200) { @Override public long maxPendingBytes() { return MB; } @Override public void render(OutputStream output, ContentChannel networkChannel, CompletionHandler handler) { try (output) { logReader.writeLogs(output, from, to, maxLines, hostname); } catch (Throwable t) { log.log(Level.WARNING, "Failed reading logs from " + from + " to " + to, t); } finally { networkChannel.close(handler); } } }; }
class LogHandler extends ThreadedHttpRequestHandler { private final LogReader logReader; private static final long MB = 1024 * 1024; @Inject public LogHandler(Executor executor, LogHandlerConfig config) { this(executor, new LogReader(config.logDirectory(), config.logPattern())); } LogHandler(Executor executor, LogReader logReader) { super(executor); this.logReader = logReader; } @Override }
class LogHandler extends ThreadedHttpRequestHandler { private final LogReader logReader; private static final long MB = 1024 * 1024; @Inject public LogHandler(Executor executor, LogHandlerConfig config) { this(executor, new LogReader(config.logDirectory(), config.logPattern())); } LogHandler(Executor executor, LogReader logReader) { super(executor); this.logReader = logReader; } @Override }
Not really any point of evaluating at all without this since all expressions will just be a bunch of operations on zeros, which will all come out as zero.
public void testNotSkewingNonBoolean() throws Exception { assertTransformed("a + b + c * d + e + f", "a + b + c * d + e + f"); var expr = new BooleanExpressionTransformer() .transform(new RankingExpression("a + b + c * d + e + f"), new TransformContext(Map.of(), new MapTypeContext())); assertTrue(expr.getRoot() instanceof ArithmeticNode); ArithmeticNode root = (ArithmeticNode) expr.getRoot(); assertEquals(5, root.operators().size()); assertEquals(6, root.children().size()); }
var expr = new BooleanExpressionTransformer()
public void testNotSkewingNonBoolean() throws Exception { assertTransformed("a + b + c * d + e + f", "a + b + c * d + e + f"); var expr = new BooleanExpressionTransformer() .transform(new RankingExpression("a + b + c * d + e + f"), new TransformContext(Map.of(), new MapTypeContext())); assertTrue(expr.getRoot() instanceof ArithmeticNode); ArithmeticNode root = (ArithmeticNode) expr.getRoot(); assertEquals(5, root.operators().size()); assertEquals(6, root.children().size()); }
class BooleanExpressionTransformerTestCase { @Test public void testTransformer() throws Exception { assertTransformed("if (a, b, false)", "a && b"); assertTransformed("if (a, true, b)", "a || b"); assertTransformed("if (a, true, b + c)", "a || b + c"); assertTransformed("if (c + a, true, b)", "c + a || b"); assertTransformed("if (c + a, true, b + c)", "c + a || b + c"); assertTransformed("if (a + b, true, if (c - d * e, f, false))", "a + b || c - d * e && f"); assertTransformed("if (a, true, if (b, c, false))", "a || b && c"); assertTransformed("if (a + b, true, if (if (c, d, false), e * f - g, false))", "a + b || c && d && e * f - g"); assertTransformed("if(1 - 1, true, 1 - 1)", "1 - 1 || 1 - 1"); } @Test public void testIt() throws Exception { assertTransformed("if(1 - 1, true, 1 - 1)", "1 - 1 || 1 - 1"); } @Test @Test public void testTransformPreservesPrecedence() throws Exception { assertUnTransformed("a"); assertUnTransformed("a + b"); assertUnTransformed("a + b + c"); assertUnTransformed("a * b"); assertUnTransformed("a + b * c + d"); assertUnTransformed("a + b + c * d + e + f"); assertUnTransformed("a * b + c + d + e * f"); assertUnTransformed("(a * b) + c + d + e * f"); assertUnTransformed("(a * b + c) + d + e * f"); assertUnTransformed("a * (b + c) + d + e * f"); assertUnTransformed("(a * b) + (c + (d + e)) * f"); } private void assertUnTransformed(String input) throws Exception { assertTransformed(input, input); } private void assertTransformed(String expected, String input) throws Exception { var transformedExpression = new BooleanExpressionTransformer() .transform(new RankingExpression(input), new TransformContext(Map.of(), new MapTypeContext())); assertEquals(new RankingExpression(expected), transformedExpression, "Transformed as expected"); var inputExpression = new RankingExpression(input); assertEquals(inputExpression.evaluate(new MapContext()).asBoolean(), transformedExpression.evaluate(new MapContext()).asBoolean(), "Transform and original input are equivalent"); } }
class BooleanExpressionTransformerTestCase { @Test public void testTransformer() throws Exception { assertTransformed("if (a, b, false)", "a && b"); assertTransformed("if (a, true, b)", "a || b"); assertTransformed("if (a, true, b + c)", "a || b + c"); assertTransformed("if (c + a, true, b)", "c + a || b"); assertTransformed("if (c + a, true, b + c)", "c + a || b + c"); assertTransformed("if (a + b, true, if (c - d * e, f, false))", "a + b || c - d * e && f"); assertTransformed("if (a, true, if (b, c, false))", "a || b && c"); assertTransformed("if (a + b, true, if (if (c, d, false), e * f - g, false))", "a + b || c && d && e * f - g"); assertTransformed("if(1 - 1, true, 1 - 1)", "1 - 1 || 1 - 1"); } @Test public void testIt() throws Exception { assertTransformed("if(1 - 1, true, 1 - 1)", "1 - 1 || 1 - 1"); } @Test @Test public void testTransformPreservesPrecedence() throws Exception { assertUnTransformed("a"); assertUnTransformed("a + b"); assertUnTransformed("a + b + c"); assertUnTransformed("a * b"); assertUnTransformed("a + b * c + d"); assertUnTransformed("a + b + c * d + e + f"); assertUnTransformed("a * b + c + d + e * f"); assertUnTransformed("(a * b) + c + d + e * f"); assertUnTransformed("(a * b + c) + d + e * f"); assertUnTransformed("a * (b + c) + d + e * f"); assertUnTransformed("(a * b) + (c + (d + e)) * f"); } private void assertUnTransformed(String input) throws Exception { assertTransformed(input, input); } private void assertTransformed(String expected, String input) throws Exception { var transformedExpression = new BooleanExpressionTransformer() .transform(new RankingExpression(input), new TransformContext(Map.of(), new MapTypeContext())); assertEquals(new RankingExpression(expected), transformedExpression, "Transformed as expected"); MapContext context = contextWithSingleLetterVariables(); var inputExpression = new RankingExpression(input); assertEquals(inputExpression.evaluate(context).asBoolean(), transformedExpression.evaluate(context).asBoolean(), "Transform and original input are equivalent"); } private MapContext contextWithSingleLetterVariables() { var context = new MapContext(); for (int i = 0; i < 26; i++) context.put(Character.toString(i + 97), Math.floorMod(i, 2)); return context; } }
I suspected so :) I removed since it was not used. Now it is actually used too.
public void testNotSkewingNonBoolean() throws Exception { assertTransformed("a + b + c * d + e + f", "a + b + c * d + e + f"); var expr = new BooleanExpressionTransformer() .transform(new RankingExpression("a + b + c * d + e + f"), new TransformContext(Map.of(), new MapTypeContext())); assertTrue(expr.getRoot() instanceof ArithmeticNode); ArithmeticNode root = (ArithmeticNode) expr.getRoot(); assertEquals(5, root.operators().size()); assertEquals(6, root.children().size()); }
var expr = new BooleanExpressionTransformer()
public void testNotSkewingNonBoolean() throws Exception { assertTransformed("a + b + c * d + e + f", "a + b + c * d + e + f"); var expr = new BooleanExpressionTransformer() .transform(new RankingExpression("a + b + c * d + e + f"), new TransformContext(Map.of(), new MapTypeContext())); assertTrue(expr.getRoot() instanceof ArithmeticNode); ArithmeticNode root = (ArithmeticNode) expr.getRoot(); assertEquals(5, root.operators().size()); assertEquals(6, root.children().size()); }
class BooleanExpressionTransformerTestCase { @Test public void testTransformer() throws Exception { assertTransformed("if (a, b, false)", "a && b"); assertTransformed("if (a, true, b)", "a || b"); assertTransformed("if (a, true, b + c)", "a || b + c"); assertTransformed("if (c + a, true, b)", "c + a || b"); assertTransformed("if (c + a, true, b + c)", "c + a || b + c"); assertTransformed("if (a + b, true, if (c - d * e, f, false))", "a + b || c - d * e && f"); assertTransformed("if (a, true, if (b, c, false))", "a || b && c"); assertTransformed("if (a + b, true, if (if (c, d, false), e * f - g, false))", "a + b || c && d && e * f - g"); assertTransformed("if(1 - 1, true, 1 - 1)", "1 - 1 || 1 - 1"); } @Test public void testIt() throws Exception { assertTransformed("if(1 - 1, true, 1 - 1)", "1 - 1 || 1 - 1"); } @Test @Test public void testTransformPreservesPrecedence() throws Exception { assertUnTransformed("a"); assertUnTransformed("a + b"); assertUnTransformed("a + b + c"); assertUnTransformed("a * b"); assertUnTransformed("a + b * c + d"); assertUnTransformed("a + b + c * d + e + f"); assertUnTransformed("a * b + c + d + e * f"); assertUnTransformed("(a * b) + c + d + e * f"); assertUnTransformed("(a * b + c) + d + e * f"); assertUnTransformed("a * (b + c) + d + e * f"); assertUnTransformed("(a * b) + (c + (d + e)) * f"); } private void assertUnTransformed(String input) throws Exception { assertTransformed(input, input); } private void assertTransformed(String expected, String input) throws Exception { var transformedExpression = new BooleanExpressionTransformer() .transform(new RankingExpression(input), new TransformContext(Map.of(), new MapTypeContext())); assertEquals(new RankingExpression(expected), transformedExpression, "Transformed as expected"); var inputExpression = new RankingExpression(input); assertEquals(inputExpression.evaluate(new MapContext()).asBoolean(), transformedExpression.evaluate(new MapContext()).asBoolean(), "Transform and original input are equivalent"); } }
class BooleanExpressionTransformerTestCase { @Test public void testTransformer() throws Exception { assertTransformed("if (a, b, false)", "a && b"); assertTransformed("if (a, true, b)", "a || b"); assertTransformed("if (a, true, b + c)", "a || b + c"); assertTransformed("if (c + a, true, b)", "c + a || b"); assertTransformed("if (c + a, true, b + c)", "c + a || b + c"); assertTransformed("if (a + b, true, if (c - d * e, f, false))", "a + b || c - d * e && f"); assertTransformed("if (a, true, if (b, c, false))", "a || b && c"); assertTransformed("if (a + b, true, if (if (c, d, false), e * f - g, false))", "a + b || c && d && e * f - g"); assertTransformed("if(1 - 1, true, 1 - 1)", "1 - 1 || 1 - 1"); } @Test public void testIt() throws Exception { assertTransformed("if(1 - 1, true, 1 - 1)", "1 - 1 || 1 - 1"); } @Test @Test public void testTransformPreservesPrecedence() throws Exception { assertUnTransformed("a"); assertUnTransformed("a + b"); assertUnTransformed("a + b + c"); assertUnTransformed("a * b"); assertUnTransformed("a + b * c + d"); assertUnTransformed("a + b + c * d + e + f"); assertUnTransformed("a * b + c + d + e * f"); assertUnTransformed("(a * b) + c + d + e * f"); assertUnTransformed("(a * b + c) + d + e * f"); assertUnTransformed("a * (b + c) + d + e * f"); assertUnTransformed("(a * b) + (c + (d + e)) * f"); } private void assertUnTransformed(String input) throws Exception { assertTransformed(input, input); } private void assertTransformed(String expected, String input) throws Exception { var transformedExpression = new BooleanExpressionTransformer() .transform(new RankingExpression(input), new TransformContext(Map.of(), new MapTypeContext())); assertEquals(new RankingExpression(expected), transformedExpression, "Transformed as expected"); MapContext context = contextWithSingleLetterVariables(); var inputExpression = new RankingExpression(input); assertEquals(inputExpression.evaluate(context).asBoolean(), transformedExpression.evaluate(context).asBoolean(), "Transform and original input are equivalent"); } private MapContext contextWithSingleLetterVariables() { var context = new MapContext(); for (int i = 0; i < 26; i++) context.put(Character.toString(i + 97), Math.floorMod(i, 2)); return context; } }
The condition should be either if we are in public, or if we can do `OAuthCredentials.fromOktaRequestContext(request.context())`
private void ensureApplicationExists(TenantAndApplicationId id, HttpRequest request) { if (controller.applications().getApplication(id).isEmpty()) { if (controller.system().isPublic()) { log.fine("Application does not exist in public, creating: " + id); var credentials = accessControlRequests.credentials(id.tenant(), null /* not used on public */ , request.getJDiscRequest()); controller.applications().createApplication(id, credentials); } else { log.fine("Application does not exist in hosted, failing: " + id); throw new IllegalArgumentException("Application does not exist. Create application in Console first."); } } }
if (controller.system().isPublic()) {
private void ensureApplicationExists(TenantAndApplicationId id, HttpRequest request) { if (controller.applications().getApplication(id).isEmpty()) { if (controller.system().isPublic() || hasOktaContext(request)) { log.fine("Application does not exist in public, creating: " + id); var credentials = accessControlRequests.credentials(id.tenant(), null /* not used on public */ , request.getJDiscRequest()); controller.applications().createApplication(id, credentials); } else { log.fine("Application does not exist in hosted, failing: " + id); throw new IllegalArgumentException("Application does not exist. Create application in Console first."); } } }
class ApplicationApiHandler extends AuditLoggingRequestHandler { private static final ObjectMapper jsonMapper = new ObjectMapper(); private final Controller controller; private final AccessControlRequests accessControlRequests; private final TestConfigSerializer testConfigSerializer; @Inject public ApplicationApiHandler(ThreadedHttpRequestHandler.Context parentCtx, Controller controller, AccessControlRequests accessControlRequests) { super(parentCtx, controller.auditLogger()); this.controller = controller; this.accessControlRequests = accessControlRequests; this.testConfigSerializer = new TestConfigSerializer(controller.system()); } @Override public Duration getTimeout() { return Duration.ofMinutes(20); } @Override public HttpResponse auditAndHandle(HttpRequest request) { try { Path path = new Path(request.getUri()); return switch (request.getMethod()) { case GET: yield handleGET(path, request); case PUT: yield handlePUT(path, request); case POST: yield handlePOST(path, request); case PATCH: yield handlePATCH(path, request); case DELETE: yield handleDELETE(path, request); case OPTIONS: yield handleOPTIONS(); default: yield ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported"); }; } catch (RestApiException.Forbidden e) { return ErrorResponse.forbidden(Exceptions.toMessageString(e)); } catch (RestApiException.Unauthorized e) { return ErrorResponse.unauthorized(Exceptions.toMessageString(e)); } catch (NotExistsException e) { return ErrorResponse.notFoundError(Exceptions.toMessageString(e)); } catch (IllegalArgumentException e) { return ErrorResponse.badRequest(Exceptions.toMessageString(e)); } catch (ConfigServerException e) { return switch (e.code()) { case NOT_FOUND: yield ErrorResponse.notFoundError(Exceptions.toMessageString(e)); case ACTIVATION_CONFLICT: yield new ErrorResponse(CONFLICT, e.code().name(), Exceptions.toMessageString(e)); case INTERNAL_SERVER_ERROR: yield ErrorResponses.logThrowing(request, log, e); default: yield new ErrorResponse(BAD_REQUEST, e.code().name(), Exceptions.toMessageString(e)); }; } catch (RuntimeException e) { return ErrorResponses.logThrowing(request, log, e); } } private HttpResponse handleGET(Path path, HttpRequest request) { if (path.matches("/application/v4/")) return root(request); if (path.matches("/application/v4/notifications")) return notifications(request, Optional.ofNullable(request.getProperty("tenant")), true); if (path.matches("/application/v4/tenant")) return tenants(request); if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/request/operator")) return accessRequests(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/info")) return tenantInfo(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/info/profile")) return withCloudTenant(path.get("tenant"), this::tenantInfoProfile); if (path.matches("/application/v4/tenant/{tenant}/info/billing")) return withCloudTenant(path.get("tenant"), this::tenantInfoBilling); if (path.matches("/application/v4/tenant/{tenant}/info/contacts")) return withCloudTenant(path.get("tenant"), this::tenantInfoContacts); if (path.matches("/application/v4/tenant/{tenant}/notifications")) return notifications(request, Optional.of(path.get("tenant")), false); if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}/validate")) return validateSecretStore(path.get("tenant"), path.get("name"), request); if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), Optional.empty(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/compile-version")) return compileVersion(path.get("tenant"), path.get("application"), request.getProperty("allowMajor")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return JobControllerApiHandlerHelper.overviewResponse(controller, TenantAndApplicationId.from(path.get("tenant"), path.get("application")), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/package")) return applicationPackage(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/diff/{number}")) return applicationPackageDiff(path.get("tenant"), path.get("application"), path.get("number")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploying(path.get("tenant"), path.get("application"), "default", request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), "default", request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance")) return applications(path.get("tenant"), Optional.of(path.get("application")), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return instance(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.applications().requireApplication(TenantAndApplicationId.from(path.get("tenant"), path.get("application"))), controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)).descendingMap(), Optional.ofNullable(request.getProperty("limit")), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/package")) return devApplicationPackage(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/diff/{number}")) return devApplicationPackageDiff(runIdFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/test-config")) return testConfig(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}/logs")) return JobControllerApiHandlerHelper.vespaLogsResponse(controller.jobController(), runIdFromPath(path), asLong(request.getProperty("from"), 0), request.getBooleanProperty("tester")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return getReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{host}/status/{*}")) return status(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{host}/state/v1/{*}")) return stateV1(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/orchestrator")) return orchestrator(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/content/{*}")) return content(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return supportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/node/{node}/service-dump")) return getServiceDump(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("node"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/scaling")) return scaling(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/metrics")) return metrics(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId"))); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{host}/status/{*}")) return status(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId"))); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePUT(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/request/operator")) return requestSshAccess(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/approve/operator")) return approveAccessRequest(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/managed/operator")) return addManagedAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/info")) return updateTenantInfo(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/info/profile")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoProfile); if (path.matches("/application/v4/tenant/{tenant}/info/billing")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoBilling); if (path.matches("/application/v4/tenant/{tenant}/info/contacts")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoContacts); if (path.matches("/application/v4/tenant/{tenant}/archive-access")) return allowAwsArchiveAccess(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/archive-access/aws")) return allowAwsArchiveAccess(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/archive-access/gcp")) return allowGcpArchiveAccess(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return addSecretStore(path.get("tenant"), path.get("name"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePOST(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/key")) return addDeveloperKey(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), "default", false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), "default", true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), "default", request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return addDeployKey(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return submit(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return createInstance(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploy/{jobtype}")) return jobDeploy(appIdFromPath(path), jobTypeFromPath(path), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/submit")) return submit(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return trigger(appIdFromPath(path), jobTypeFromPath(path), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return pause(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindex")) return reindex(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return enableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return allowSupportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/node/{node}/service-dump")) return requestServiceDump(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("node"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePATCH(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return patchApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return patchApplication(path.get("tenant"), path.get("application"), request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleDELETE(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/managed/operator")) return removeManagedAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/key")) return removeDeveloperKey(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/archive-access")) return removeAwsArchiveAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/archive-access/aws")) return removeAwsArchiveAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/archive-access/gcp")) return removeGcpArchiveAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return deleteSecretStore(path.get("tenant"), path.get("name"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return removeAllProdDeployments(path.get("tenant"), path.get("application")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", "all"); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", path.get("choice")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return removeDeployKey(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit/{build}")) return cancelBuild(path.get("tenant"), path.get("application"), path.get("build")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return deleteInstance(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), "all"); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("choice")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.abortJobResponse(controller.jobController(), request, appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return resume(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return disableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return disallowSupportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleOPTIONS() { EmptyResponse response = new EmptyResponse(); response.headers().put("Allow", "GET,PUT,POST,PATCH,DELETE,OPTIONS"); return response; } private HttpResponse recursiveRoot(HttpRequest request) { Slime slime = new Slime(); Cursor tenantArray = slime.setArray(); List<Application> applications = controller.applications().asList(); for (Tenant tenant : controller.tenants().asList(includeDeleted(request))) toSlime(tenantArray.addObject(), tenant, applications.stream().filter(app -> app.id().tenant().equals(tenant.name())).collect(toList()), request); return new SlimeJsonResponse(slime); } private HttpResponse root(HttpRequest request) { return recurseOverTenants(request) ? recursiveRoot(request) : new ResourceResponse(request, "tenant"); } private HttpResponse tenants(HttpRequest request) { Slime slime = new Slime(); Cursor response = slime.setArray(); for (Tenant tenant : controller.tenants().asList(includeDeleted(request))) tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject()); return new SlimeJsonResponse(slime); } private HttpResponse tenant(String tenantName, HttpRequest request) { return controller.tenants().get(TenantName.from(tenantName), includeDeleted(request)) .map(tenant -> tenant(tenant, request)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist")); } private HttpResponse tenant(Tenant tenant, HttpRequest request) { Slime slime = new Slime(); toSlime(slime.setObject(), tenant, controller.applications().asList(tenant.name()), request); return new SlimeJsonResponse(slime); } private HttpResponse accessRequests(String tenantName, HttpRequest request) { var tenant = TenantName.from(tenantName); if (controller.tenants().require(tenant).type() != Tenant.Type.cloud) return ErrorResponse.badRequest("Can only see access requests for cloud tenants"); var accessControlService = controller.serviceRegistry().accessControlService(); var accessRoleInformation = accessControlService.getAccessRoleInformation(tenant); var managedAccess = accessControlService.getManagedAccess(tenant); var slime = new Slime(); var cursor = slime.setObject(); cursor.setBool("managedAccess", managedAccess); accessRoleInformation.getPendingRequest() .ifPresent(membershipRequest -> { var requestCursor = cursor.setObject("pendingRequest"); requestCursor.setString("requestTime", membershipRequest.getCreationTime()); requestCursor.setString("reason", membershipRequest.getReason()); }); var auditLogCursor = cursor.setArray("auditLog"); accessRoleInformation.getAuditLog() .forEach(auditLogEntry -> { var entryCursor = auditLogCursor.addObject(); entryCursor.setString("created", auditLogEntry.getCreationTime()); entryCursor.setString("approver", auditLogEntry.getApprover()); entryCursor.setString("reason", auditLogEntry.getReason()); entryCursor.setString("status", auditLogEntry.getAction()); }); return new SlimeJsonResponse(slime); } private HttpResponse requestSshAccess(String tenantName, HttpRequest request) { if (!isOperator(request)) { return ErrorResponse.forbidden("Only operators are allowed to request ssh access"); } if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) return ErrorResponse.badRequest("Can only request access for cloud tenants"); controller.serviceRegistry().accessControlService().requestSshAccess(TenantName.from(tenantName)); return new MessageResponse("OK"); } private HttpResponse approveAccessRequest(String tenantName, HttpRequest request) { var tenant = TenantName.from(tenantName); if (controller.tenants().require(tenant).type() != Tenant.Type.cloud) return ErrorResponse.badRequest("Can only see access requests for cloud tenants"); var inspector = toSlime(request.getData()).get(); var expiry = inspector.field("expiry").valid() ? Instant.ofEpochMilli(inspector.field("expiry").asLong()) : Instant.now().plus(1, ChronoUnit.DAYS); var approve = inspector.field("approve").asBool(); controller.serviceRegistry().accessControlService().decideSshAccess(tenant, expiry, OAuthCredentials.fromAuth0RequestContext(request.getJDiscRequest().context()), approve); return new MessageResponse("OK"); } private HttpResponse addManagedAccess(String tenantName) { return setManagedAccess(tenantName, true); } private HttpResponse removeManagedAccess(String tenantName) { return setManagedAccess(tenantName, false); } private HttpResponse setManagedAccess(String tenantName, boolean managedAccess) { var tenant = TenantName.from(tenantName); if (controller.tenants().require(tenant).type() != Tenant.Type.cloud) return ErrorResponse.badRequest("Can only set access privel for cloud tenants"); controller.serviceRegistry().accessControlService().setManagedAccess(tenant, managedAccess); var slime = new Slime(); slime.setObject().setBool("managedAccess", managedAccess); return new SlimeJsonResponse(slime); } private HttpResponse tenantInfo(String tenantName, HttpRequest request) { return controller.tenants().get(TenantName.from(tenantName)) .filter(tenant -> tenant.type() == Tenant.Type.cloud) .map(tenant -> tenantInfo(((CloudTenant)tenant).info(), request)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private HttpResponse withCloudTenant(String tenantName, Function<CloudTenant, SlimeJsonResponse> handler) { return controller.tenants().get(TenantName.from(tenantName)) .filter(tenant -> tenant.type() == Tenant.Type.cloud) .map(tenant -> handler.apply((CloudTenant) tenant)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private SlimeJsonResponse tenantInfo(TenantInfo info, HttpRequest request) { Slime slime = new Slime(); Cursor infoCursor = slime.setObject(); if (!info.isEmpty()) { infoCursor.setString("name", info.name()); infoCursor.setString("email", info.email()); infoCursor.setString("website", info.website()); infoCursor.setString("contactName", info.contact().name()); infoCursor.setString("contactEmail", info.contact().email()); toSlime(info.address(), infoCursor); toSlime(info.billingContact(), infoCursor); toSlime(info.contacts(), infoCursor); } return new SlimeJsonResponse(slime); } private SlimeJsonResponse tenantInfoProfile(CloudTenant cloudTenant) { var slime = new Slime(); var root = slime.setObject(); var info = cloudTenant.info(); if (!info.isEmpty()) { var contact = root.setObject("contact"); contact.setString("name", info.contact().name()); contact.setString("email", info.contact().email()); var tenant = root.setObject("tenant"); tenant.setString("company", info.name()); tenant.setString("website", info.website()); toSlime(info.address(), root); } return new SlimeJsonResponse(slime); } private SlimeJsonResponse withCloudTenant(String tenantName, HttpRequest request, BiFunction<CloudTenant, Inspector, SlimeJsonResponse> handler) { return controller.tenants().get(tenantName) .map(tenant -> handler.apply((CloudTenant) tenant, toSlime(request.getData()).get())) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private SlimeJsonResponse putTenantInfoProfile(CloudTenant cloudTenant, Inspector inspector) { var info = cloudTenant.info(); var mergedContact = TenantContact.empty() .withName(getString(inspector.field("contact").field("name"), info.contact().name())) .withEmail(getString(inspector.field("contact").field("email"), info.contact().email())); var mergedAddress = updateTenantInfoAddress(inspector.field("address"), info.address()); var mergedInfo = info .withName(getString(inspector.field("tenant").field("name"), info.name())) .withWebsite(getString(inspector.field("tenant").field("website"), info.website())) .withContact(mergedContact) .withAddress(mergedAddress); validateMergedTenantInfo(mergedInfo); controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private SlimeJsonResponse tenantInfoBilling(CloudTenant cloudTenant) { var slime = new Slime(); var root = slime.setObject(); var info = cloudTenant.info(); if (!info.isEmpty()) { var billingContact = info.billingContact(); var contact = root.setObject("contact"); contact.setString("name", billingContact.contact().name()); contact.setString("email", billingContact.contact().email()); contact.setString("phone", billingContact.contact().phone()); toSlime(billingContact.address(), root); } return new SlimeJsonResponse(slime); } private SlimeJsonResponse putTenantInfoBilling(CloudTenant cloudTenant, Inspector inspector) { var info = cloudTenant.info(); var contact = info.billingContact().contact(); var address = info.billingContact().address(); var mergedContact = updateTenantInfoContact(inspector.field("contact"), contact); var mergedAddress = updateTenantInfoAddress(inspector.field("address"), info.billingContact().address()); var mergedBilling = info.billingContact() .withContact(mergedContact) .withAddress(mergedAddress); var mergedInfo = info.withBilling(mergedBilling); controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private SlimeJsonResponse tenantInfoContacts(CloudTenant cloudTenant) { var slime = new Slime(); var root = slime.setObject(); toSlime(cloudTenant.info().contacts(), root); return new SlimeJsonResponse(slime); } private SlimeJsonResponse putTenantInfoContacts(CloudTenant cloudTenant, Inspector inspector) { var mergedInfo = cloudTenant.info() .withContacts(updateTenantInfoContacts(inspector.field("contacts"), cloudTenant.info().contacts())); controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private void validateMergedTenantInfo(TenantInfo mergedInfo) { if (mergedInfo.contact().name().isBlank()) { throw new IllegalArgumentException("'contactName' cannot be empty"); } if (mergedInfo.contact().email().isBlank()) { throw new IllegalArgumentException("'contactEmail' cannot be empty"); } if (! mergedInfo.contact().email().contains("@")) { throw new IllegalArgumentException("'contactEmail' needs to be an email address"); } if (! mergedInfo.website().isBlank()) { try { new URL(mergedInfo.website()); } catch (MalformedURLException e) { throw new IllegalArgumentException("'website' needs to be a valid address"); } } } private void toSlime(TenantAddress address, Cursor parentCursor) { if (address.isEmpty()) return; Cursor addressCursor = parentCursor.setObject("address"); addressCursor.setString("addressLines", address.address()); addressCursor.setString("postalCodeOrZip", address.code()); addressCursor.setString("city", address.city()); addressCursor.setString("stateRegionProvince", address.region()); addressCursor.setString("country", address.country()); } private void toSlime(TenantBilling billingContact, Cursor parentCursor) { if (billingContact.isEmpty()) return; Cursor addressCursor = parentCursor.setObject("billingContact"); addressCursor.setString("name", billingContact.contact().name()); addressCursor.setString("email", billingContact.contact().email()); addressCursor.setString("phone", billingContact.contact().phone()); toSlime(billingContact.address(), addressCursor); } private void toSlime(TenantContacts contacts, Cursor parentCursor) { Cursor contactsCursor = parentCursor.setArray("contacts"); contacts.all().forEach(contact -> { Cursor contactCursor = contactsCursor.addObject(); Cursor audiencesArray = contactCursor.setArray("audiences"); contact.audiences().forEach(audience -> audiencesArray.addString(toAudience(audience))); switch (contact.type()) { case EMAIL: var email = (TenantContacts.EmailContact) contact; contactCursor.setString("email", email.email()); return; default: throw new IllegalArgumentException("Serialization for contact type not implemented: " + contact.type()); } }); } private static TenantContacts.Audience fromAudience(String value) { return switch (value) { case "tenant": yield TenantContacts.Audience.TENANT; case "notifications": yield TenantContacts.Audience.NOTIFICATIONS; default: throw new IllegalArgumentException("Unknown contact audience '" + value + "'."); }; } private static String toAudience(TenantContacts.Audience audience) { return switch (audience) { case TENANT: yield "tenant"; case NOTIFICATIONS: yield "notifications"; }; } private HttpResponse updateTenantInfo(String tenantName, HttpRequest request) { return controller.tenants().get(TenantName.from(tenantName)) .filter(tenant -> tenant.type() == Tenant.Type.cloud) .map(tenant -> updateTenantInfo(((CloudTenant)tenant), request)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private String getString(Inspector field, String defaultVale) { return field.valid() ? field.asString().trim() : defaultVale; } private SlimeJsonResponse updateTenantInfo(CloudTenant tenant, HttpRequest request) { TenantInfo oldInfo = tenant.info(); Inspector insp = toSlime(request.getData()).get(); TenantContact mergedContact = TenantContact.empty() .withName(getString(insp.field("contactName"), oldInfo.contact().name())) .withEmail(getString(insp.field("contactEmail"), oldInfo.contact().email())); TenantInfo mergedInfo = TenantInfo.empty() .withName(getString(insp.field("name"), oldInfo.name())) .withEmail(getString(insp.field("email"), oldInfo.email())) .withWebsite(getString(insp.field("website"), oldInfo.website())) .withContact(mergedContact) .withAddress(updateTenantInfoAddress(insp.field("address"), oldInfo.address())) .withBilling(updateTenantInfoBillingContact(insp.field("billingContact"), oldInfo.billingContact())) .withContacts(updateTenantInfoContacts(insp.field("contacts"), oldInfo.contacts())); validateMergedTenantInfo(mergedInfo); controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private TenantAddress updateTenantInfoAddress(Inspector insp, TenantAddress oldAddress) { if (!insp.valid()) return oldAddress; TenantAddress address = TenantAddress.empty() .withCountry(getString(insp.field("country"), oldAddress.country())) .withRegion(getString(insp.field("stateRegionProvince"), oldAddress.region())) .withCity(getString(insp.field("city"), oldAddress.city())) .withCode(getString(insp.field("postalCodeOrZip"), oldAddress.code())) .withAddress(getString(insp.field("addressLines"), oldAddress.address())); List<String> fields = List.of(address.address(), address.code(), address.country(), address.city(), address.region()); if (fields.stream().allMatch(String::isBlank) || fields.stream().noneMatch(String::isBlank)) return address; throw new IllegalArgumentException("All address fields must be set"); } private TenantContact updateTenantInfoContact(Inspector insp, TenantContact oldContact) { if (!insp.valid()) return oldContact; String email = getString(insp.field("email"), oldContact.email()); if (!email.isBlank() && !email.contains("@")) { throw new IllegalArgumentException("'email' needs to be an email address"); } return TenantContact.empty() .withName(getString(insp.field("name"), oldContact.name())) .withEmail(getString(insp.field("email"), oldContact.email())) .withPhone(getString(insp.field("phone"), oldContact.phone())); } private TenantBilling updateTenantInfoBillingContact(Inspector insp, TenantBilling oldContact) { if (!insp.valid()) return oldContact; return TenantBilling.empty() .withContact(updateTenantInfoContact(insp, oldContact.contact())) .withAddress(updateTenantInfoAddress(insp.field("address"), oldContact.address())); } private TenantContacts updateTenantInfoContacts(Inspector insp, TenantContacts oldContacts) { if (!insp.valid()) return oldContacts; List<TenantContacts.EmailContact> contacts = SlimeUtils.entriesStream(insp).map(inspector -> { String email = inspector.field("email").asString().trim(); List<TenantContacts.Audience> audiences = SlimeUtils.entriesStream(inspector.field("audiences")) .map(audience -> fromAudience(audience.asString())) .toList(); if (!email.contains("@")) { throw new IllegalArgumentException("'email' needs to be an email address"); } return new TenantContacts.EmailContact(audiences, email); }).toList(); return new TenantContacts(contacts); } private HttpResponse notifications(HttpRequest request, Optional<String> tenant, boolean includeTenantFieldInResponse) { boolean productionOnly = showOnlyProductionInstances(request); boolean excludeMessages = "true".equals(request.getProperty("excludeMessages")); Slime slime = new Slime(); Cursor notificationsArray = slime.setObject().setArray("notifications"); tenant.map(t -> Stream.of(TenantName.from(t))) .orElseGet(() -> controller.notificationsDb().listTenantsWithNotifications().stream()) .flatMap(tenantName -> controller.notificationsDb().listNotifications(NotificationSource.from(tenantName), productionOnly).stream()) .filter(notification -> propertyEquals(request, "application", ApplicationName::from, notification.source().application()) && propertyEquals(request, "instance", InstanceName::from, notification.source().instance()) && propertyEquals(request, "zone", ZoneId::from, notification.source().zoneId()) && propertyEquals(request, "job", job -> JobType.fromJobName(job, controller.zoneRegistry()), notification.source().jobType()) && propertyEquals(request, "type", Notification.Type::valueOf, Optional.of(notification.type())) && propertyEquals(request, "level", Notification.Level::valueOf, Optional.of(notification.level()))) .forEach(notification -> toSlime(notificationsArray.addObject(), notification, includeTenantFieldInResponse, excludeMessages)); return new SlimeJsonResponse(slime); } private static <T> boolean propertyEquals(HttpRequest request, String property, Function<String, T> mapper, Optional<T> value) { return Optional.ofNullable(request.getProperty(property)) .map(propertyValue -> value.isPresent() && mapper.apply(propertyValue).equals(value.get())) .orElse(true); } private static void toSlime(Cursor cursor, Notification notification, boolean includeTenantFieldInResponse, boolean excludeMessages) { cursor.setLong("at", notification.at().toEpochMilli()); cursor.setString("level", notificationLevelAsString(notification.level())); cursor.setString("type", notificationTypeAsString(notification.type())); if (!excludeMessages) { Cursor messagesArray = cursor.setArray("messages"); notification.messages().forEach(messagesArray::addString); } if (includeTenantFieldInResponse) cursor.setString("tenant", notification.source().tenant().value()); notification.source().application().ifPresent(application -> cursor.setString("application", application.value())); notification.source().instance().ifPresent(instance -> cursor.setString("instance", instance.value())); notification.source().zoneId().ifPresent(zoneId -> { cursor.setString("environment", zoneId.environment().value()); cursor.setString("region", zoneId.region().value()); }); notification.source().clusterId().ifPresent(clusterId -> cursor.setString("clusterId", clusterId.value())); notification.source().jobType().ifPresent(jobType -> cursor.setString("jobName", jobType.jobName())); notification.source().runNumber().ifPresent(runNumber -> cursor.setLong("runNumber", runNumber)); } private static String notificationTypeAsString(Notification.Type type) { return switch (type) { case submission, applicationPackage: yield "applicationPackage"; case testPackage: yield "testPackage"; case deployment: yield "deployment"; case feedBlock: yield "feedBlock"; case reindex: yield "reindex"; }; } private static String notificationLevelAsString(Notification.Level level) { return switch (level) { case info: yield "info"; case warning: yield "warning"; case error: yield "error"; }; } private HttpResponse applications(String tenantName, Optional<String> applicationName, HttpRequest request) { TenantName tenant = TenantName.from(tenantName); getTenantOrThrow(tenantName); List<Application> applications = applicationName.isEmpty() ? controller.applications().asList(tenant) : controller.applications().getApplication(TenantAndApplicationId.from(tenantName, applicationName.get())) .map(List::of) .orElseThrow(() -> new NotExistsException("Application '" + applicationName.get() + "' does not exist")); Slime slime = new Slime(); Cursor applicationArray = slime.setArray(); for (Application application : applications) { Cursor applicationObject = applicationArray.addObject(); applicationObject.setString("tenant", application.id().tenant().value()); applicationObject.setString("application", application.id().application().value()); applicationObject.setString("url", withPath("/application/v4" + "/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value(), request.getUri()).toString()); Cursor instanceArray = applicationObject.setArray("instances"); for (InstanceName instance : showOnlyProductionInstances(request) ? application.productionInstances().keySet() : application.instances().keySet()) { Cursor instanceObject = instanceArray.addObject(); instanceObject.setString("instance", instance.value()); instanceObject.setString("url", withPath("/application/v4" + "/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value() + "/instance/" + instance.value(), request.getUri()).toString()); } } return new SlimeJsonResponse(slime); } private HttpResponse devApplicationPackage(ApplicationId id, JobType type) { ZoneId zone = type.zone(); RevisionId revision = controller.jobController().last(id, type).get().versions().targetRevision(); byte[] applicationPackage = controller.applications().applicationStore().get(new DeploymentId(id, zone), revision); return new ZipResponse(id.toFullString() + "." + zone.value() + ".zip", applicationPackage); } private HttpResponse devApplicationPackageDiff(RunId runId) { DeploymentId deploymentId = new DeploymentId(runId.application(), runId.job().type().zone()); return controller.applications().applicationStore().getDevDiff(deploymentId, runId.number()) .map(ByteArrayResponse::new) .orElseThrow(() -> new NotExistsException("No application package diff found for " + runId)); } private HttpResponse applicationPackage(String tenantName, String applicationName, HttpRequest request) { TenantAndApplicationId tenantAndApplication = TenantAndApplicationId.from(tenantName, applicationName); final long build; String requestedBuild = request.getProperty("build"); if (requestedBuild != null) { if (requestedBuild.equals("latestDeployed")) { build = controller.applications().requireApplication(tenantAndApplication).latestDeployedRevision() .map(RevisionId::number) .orElseThrow(() -> new NotExistsException("no application package has been deployed in production for " + tenantAndApplication)); } else { try { build = Validation.requireAtLeast(Long.parseLong(request.getProperty("build")), "build number", 1L); } catch (NumberFormatException e) { throw new IllegalArgumentException("invalid value for request parameter 'build'", e); } } } else { build = controller.applications().requireApplication(tenantAndApplication).revisions().last() .map(version -> version.id().number()) .orElseThrow(() -> new NotExistsException("no application package has been submitted for " + tenantAndApplication)); } RevisionId revision = RevisionId.forProduction(build); boolean tests = request.getBooleanProperty("tests"); byte[] applicationPackage = tests ? controller.applications().applicationStore().getTester(tenantAndApplication.tenant(), tenantAndApplication.application(), revision) : controller.applications().applicationStore().get(new DeploymentId(tenantAndApplication.defaultInstance(), ZoneId.defaultId()), revision); String filename = tenantAndApplication + (tests ? "-tests" : "-build") + revision.number() + ".zip"; return new ZipResponse(filename, applicationPackage); } private HttpResponse applicationPackageDiff(String tenant, String application, String number) { TenantAndApplicationId tenantAndApplication = TenantAndApplicationId.from(tenant, application); return controller.applications().applicationStore().getDiff(tenantAndApplication.tenant(), tenantAndApplication.application(), Long.parseLong(number)) .map(ByteArrayResponse::new) .orElseThrow(() -> new NotExistsException("No application package diff found for '" + tenantAndApplication + "' with build number " + number)); } private HttpResponse application(String tenantName, String applicationName, HttpRequest request) { Slime slime = new Slime(); toSlime(slime.setObject(), getApplication(tenantName, applicationName), request); return new SlimeJsonResponse(slime); } private HttpResponse compileVersion(String tenantName, String applicationName, String allowMajorParam) { Slime slime = new Slime(); OptionalInt allowMajor = OptionalInt.empty(); if (allowMajorParam != null) { try { allowMajor = OptionalInt.of(Integer.parseInt(allowMajorParam)); } catch (NumberFormatException e) { throw new IllegalArgumentException("Invalid major version '" + allowMajorParam + "'", e); } } Version compileVersion = controller.applications().compileVersion(TenantAndApplicationId.from(tenantName, applicationName), allowMajor); slime.setObject().setString("compileVersion", compileVersion.toFullString()); return new SlimeJsonResponse(slime); } private HttpResponse instance(String tenantName, String applicationName, String instanceName, HttpRequest request) { Slime slime = new Slime(); toSlime(slime.setObject(), getInstance(tenantName, applicationName, instanceName), controller.jobController().deploymentStatus(getApplication(tenantName, applicationName)), request); return new SlimeJsonResponse(slime); } private HttpResponse addDeveloperKey(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); Principal user = request.getJDiscRequest().getUserPrincipal(); String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString(); PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey); Slime root = new Slime(); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> { tenant = tenant.withDeveloperKey(developerKey, user); toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys()); controller.tenants().store(tenant); }); return new SlimeJsonResponse(root); } private HttpResponse validateSecretStore(String tenantName, String secretStoreName, HttpRequest request) { var awsRegion = request.getProperty("aws-region"); var parameterName = request.getProperty("parameter-name"); var applicationId = ApplicationId.fromFullString(request.getProperty("application-id")); if (!applicationId.tenant().equals(TenantName.from(tenantName))) return ErrorResponse.badRequest("Invalid application id"); var zoneId = requireZone(ZoneId.from(request.getProperty("zone"))); var deploymentId = new DeploymentId(applicationId, zoneId); var tenant = controller.tenants().require(applicationId.tenant(), CloudTenant.class); var tenantSecretStore = tenant.tenantSecretStores() .stream() .filter(secretStore -> secretStore.getName().equals(secretStoreName)) .findFirst(); if (tenantSecretStore.isEmpty()) return ErrorResponse.notFoundError("No secret store '" + secretStoreName + "' configured for tenant '" + tenantName + "'"); var response = controller.serviceRegistry().configServer().validateSecretStore(deploymentId, tenantSecretStore.get(), awsRegion, parameterName); try { var responseRoot = new Slime(); var responseCursor = responseRoot.setObject(); responseCursor.setString("target", deploymentId.toString()); var responseResultCursor = responseCursor.setObject("result"); var responseSlime = SlimeUtils.jsonToSlime(response); SlimeUtils.copyObject(responseSlime.get(), responseResultCursor); return new SlimeJsonResponse(responseRoot); } catch (JsonParseException e) { return ErrorResponses.logThrowing(request, log, e); } } private HttpResponse removeDeveloperKey(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString(); PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey); Principal user = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class).developerKeys().get(developerKey); Slime root = new Slime(); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> { tenant = tenant.withoutDeveloperKey(developerKey); toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys()); controller.tenants().store(tenant); }); return new SlimeJsonResponse(root); } private void toSlime(Cursor keysArray, Map<PublicKey, ? extends Principal> keys) { keys.forEach((key, principal) -> { Cursor keyObject = keysArray.addObject(); keyObject.setString("key", KeyUtils.toPem(key)); keyObject.setString("user", principal.getName()); }); } private HttpResponse addDeployKey(String tenantName, String applicationName, HttpRequest request) { String pemDeployKey = toSlime(request.getData()).get().field("key").asString(); PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey); Slime root = new Slime(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> { application = application.withDeployKey(deployKey); application.get().deployKeys().stream() .map(KeyUtils::toPem) .forEach(root.setObject().setArray("keys")::addString); controller.applications().store(application); }); return new SlimeJsonResponse(root); } private HttpResponse removeDeployKey(String tenantName, String applicationName, HttpRequest request) { String pemDeployKey = toSlime(request.getData()).get().field("key").asString(); PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey); Slime root = new Slime(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> { application = application.withoutDeployKey(deployKey); application.get().deployKeys().stream() .map(KeyUtils::toPem) .forEach(root.setObject().setArray("keys")::addString); controller.applications().store(application); }); return new SlimeJsonResponse(root); } private HttpResponse addSecretStore(String tenantName, String name, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); var data = toSlime(request.getData()).get(); var awsId = mandatory("awsId", data).asString(); var externalId = mandatory("externalId", data).asString(); var role = mandatory("role", data).asString(); var tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class); var tenantSecretStore = new TenantSecretStore(name, awsId, role); if (!tenantSecretStore.isValid()) { return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is invalid"); } if (tenant.tenantSecretStores().contains(tenantSecretStore)) { return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is already configured"); } controller.serviceRegistry().roleService().createTenantPolicy(TenantName.from(tenantName), name, awsId, role); controller.serviceRegistry().tenantSecretService().addSecretStore(tenant.name(), tenantSecretStore, externalId); controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withSecretStore(tenantSecretStore); controller.tenants().store(lockedTenant); }); tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class); var slime = new Slime(); toSlime(slime.setObject(), tenant.tenantSecretStores()); return new SlimeJsonResponse(slime); } private HttpResponse deleteSecretStore(String tenantName, String name, HttpRequest request) { var tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class); var optionalSecretStore = tenant.tenantSecretStores().stream() .filter(secretStore -> secretStore.getName().equals(name)) .findFirst(); if (optionalSecretStore.isEmpty()) return ErrorResponse.notFoundError("Could not delete secret store '" + name + "': Secret store not found"); var tenantSecretStore = optionalSecretStore.get(); controller.serviceRegistry().tenantSecretService().deleteSecretStore(tenant.name(), tenantSecretStore); controller.serviceRegistry().roleService().deleteTenantPolicy(tenant.name(), tenantSecretStore.getName(), tenantSecretStore.getRole()); controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withoutSecretStore(tenantSecretStore); controller.tenants().store(lockedTenant); }); tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class); var slime = new Slime(); toSlime(slime.setObject(), tenant.tenantSecretStores()); return new SlimeJsonResponse(slime); } private HttpResponse allowAwsArchiveAccess(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); var data = toSlime(request.getData()).get(); var role = mandatory("role", data).asString(); if (role.isBlank()) { return ErrorResponse.badRequest("AWS archive access role can't be whitespace only"); } controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> { var access = lockedTenant.get().archiveAccess(); lockedTenant = lockedTenant.withArchiveAccess(access.withAWSRole(role)); controller.tenants().store(lockedTenant); }); return new MessageResponse("AWS archive access role set to '" + role + "' for tenant " + tenantName + "."); } private HttpResponse removeAwsArchiveAccess(String tenantName) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> { var access = lockedTenant.get().archiveAccess(); lockedTenant = lockedTenant.withArchiveAccess(access.removeAWSRole()); controller.tenants().store(lockedTenant); }); return new MessageResponse("AWS archive access role removed for tenant " + tenantName + "."); } private HttpResponse allowGcpArchiveAccess(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); var data = toSlime(request.getData()).get(); var member = mandatory("member", data).asString(); if (member.isBlank()) { return ErrorResponse.badRequest("GCP archive access role can't be whitespace only"); } controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> { var access = lockedTenant.get().archiveAccess(); lockedTenant = lockedTenant.withArchiveAccess(access.withGCPMember(member)); controller.tenants().store(lockedTenant); }); return new MessageResponse("GCP archive access member set to '" + member + "' for tenant " + tenantName + "."); } private HttpResponse removeGcpArchiveAccess(String tenantName) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> { var access = lockedTenant.get().archiveAccess(); lockedTenant = lockedTenant.withArchiveAccess(access.removeGCPMember()); controller.tenants().store(lockedTenant); }); return new MessageResponse("GCP archive access member removed for tenant " + tenantName + "."); } private HttpResponse patchApplication(String tenantName, String applicationName, HttpRequest request) { Inspector requestObject = toSlime(request.getData()).get(); StringJoiner messageBuilder = new StringJoiner("\n").setEmptyValue("No applicable changes."); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> { Inspector majorVersionField = requestObject.field("majorVersion"); if (majorVersionField.valid()) { Integer majorVersion = majorVersionField.asLong() == 0 ? null : (int) majorVersionField.asLong(); application = application.withMajorVersion(majorVersion); messageBuilder.add("Set major version to " + (majorVersion == null ? "empty" : majorVersion)); } Inspector pemDeployKeyField = requestObject.field("pemDeployKey"); if (pemDeployKeyField.valid()) { String pemDeployKey = pemDeployKeyField.asString(); PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey); application = application.withDeployKey(deployKey); messageBuilder.add("Added deploy key " + pemDeployKey); } controller.applications().store(application); }); return new MessageResponse(messageBuilder.toString()); } private Application getApplication(String tenantName, String applicationName) { TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName); return controller.applications().getApplication(applicationId) .orElseThrow(() -> new NotExistsException(applicationId + " not found")); } private Instance getInstance(String tenantName, String applicationName, String instanceName) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); return controller.applications().getInstance(applicationId) .orElseThrow(() -> new NotExistsException(applicationId + " not found")); } private HttpResponse nodes(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone, NodeFilter.all().applications(id)); Slime slime = new Slime(); Cursor nodesArray = slime.setObject().setArray("nodes"); for (Node node : nodes) { Cursor nodeObject = nodesArray.addObject(); nodeObject.setString("hostname", node.hostname().value()); nodeObject.setString("state", valueOf(node.state())); node.reservedTo().ifPresent(tenant -> nodeObject.setString("reservedTo", tenant.value())); nodeObject.setString("orchestration", valueOf(node.serviceState())); nodeObject.setString("version", node.currentVersion().toString()); node.flavor().ifPresent(flavor -> nodeObject.setString("flavor", flavor)); toSlime(node.resources(), nodeObject); nodeObject.setString("clusterId", node.clusterId()); nodeObject.setString("clusterType", valueOf(node.clusterType())); nodeObject.setBool("down", node.down()); nodeObject.setBool("retired", node.retired() || node.wantToRetire()); nodeObject.setBool("restarting", node.wantedRestartGeneration() > node.restartGeneration()); nodeObject.setBool("rebooting", node.wantedRebootGeneration() > node.rebootGeneration()); nodeObject.setString("group", node.group()); nodeObject.setLong("index", node.index()); } return new SlimeJsonResponse(slime); } private HttpResponse clusters(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); com.yahoo.vespa.hosted.controller.api.integration.configserver.Application application = controller.serviceRegistry().configServer().nodeRepository().getApplication(zone, id); Slime slime = new Slime(); Cursor clustersObject = slime.setObject().setObject("clusters"); for (Cluster cluster : application.clusters().values()) { Cursor clusterObject = clustersObject.setObject(cluster.id().value()); clusterObject.setString("type", cluster.type().name()); toSlime(cluster.min(), clusterObject.setObject("min")); toSlime(cluster.max(), clusterObject.setObject("max")); toSlime(cluster.current(), clusterObject.setObject("current")); if (cluster.target().isPresent() && ! cluster.target().get().justNumbers().equals(cluster.current().justNumbers())) toSlime(cluster.target().get(), clusterObject.setObject("target")); cluster.suggested().ifPresent(suggested -> toSlime(suggested, clusterObject.setObject("suggested"))); utilizationToSlime(cluster.utilization(), clusterObject.setObject("utilization")); scalingEventsToSlime(cluster.scalingEvents(), clusterObject.setArray("scalingEvents")); clusterObject.setString("autoscalingStatusCode", cluster.autoscalingStatusCode()); clusterObject.setString("autoscalingStatus", cluster.autoscalingStatus()); clusterObject.setLong("scalingDuration", cluster.scalingDuration().toMillis()); clusterObject.setDouble("maxQueryGrowthRate", cluster.maxQueryGrowthRate()); clusterObject.setDouble("currentQueryFractionOfMax", cluster.currentQueryFractionOfMax()); } return new SlimeJsonResponse(slime); } private static String valueOf(Node.State state) { return switch (state) { case failed: yield "failed"; case parked: yield "parked"; case dirty: yield "dirty"; case ready: yield "ready"; case active: yield "active"; case inactive: yield "inactive"; case reserved: yield "reserved"; case provisioned: yield "provisioned"; case breakfixed: yield "breakfixed"; case deprovisioned: yield "deprovisioned"; default: throw new IllegalArgumentException("Unexpected node state '" + state + "'."); }; } static String valueOf(Node.ServiceState state) { switch (state) { case expectedUp: return "expectedUp"; case allowedDown: return "allowedDown"; case permanentlyDown: return "permanentlyDown"; case unorchestrated: return "unorchestrated"; case unknown: break; } return "unknown"; } private static String valueOf(Node.ClusterType type) { return switch (type) { case admin: yield "admin"; case content: yield "content"; case container: yield "container"; case combined: yield "combined"; case unknown: throw new IllegalArgumentException("Unexpected node cluster type '" + type + "'."); }; } private static String valueOf(NodeResources.DiskSpeed diskSpeed) { return switch (diskSpeed) { case fast : yield "fast"; case slow : yield "slow"; case any : yield "any"; }; } private static String valueOf(NodeResources.StorageType storageType) { return switch (storageType) { case remote : yield "remote"; case local : yield "local"; case any : yield "any"; }; } private HttpResponse logs(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) { ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); DeploymentId deployment = new DeploymentId(application, zone); InputStream logStream = controller.serviceRegistry().configServer().getLogs(deployment, queryParameters); return new HttpResponse(200) { @Override public void render(OutputStream outputStream) throws IOException { try (logStream) { logStream.transferTo(outputStream); } } @Override public long maxPendingBytes() { return 1 << 26; } }; } private HttpResponse supportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) { DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); SupportAccess supportAccess = controller.supportAccess().forDeployment(deployment); return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(supportAccess, controller.clock().instant())); } private HttpResponse allowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Principal principal = requireUserPrincipal(request); Instant now = controller.clock().instant(); SupportAccess allowed = controller.supportAccess().allow(deployment, now.plus(7, ChronoUnit.DAYS), principal.getName()); return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(allowed, now)); } private HttpResponse disallowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Principal principal = requireUserPrincipal(request); SupportAccess disallowed = controller.supportAccess().disallow(deployment, principal.getName()); controller.applications().deploymentTrigger().reTriggerOrAddToQueue(deployment, "re-triggered to disallow support access, by " + request.getJDiscRequest().getUserPrincipal().getName()); return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(disallowed, controller.clock().instant())); } private HttpResponse metrics(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); DeploymentId deployment = new DeploymentId(application, zone); List<ProtonMetrics> protonMetrics = controller.serviceRegistry().configServer().getProtonMetrics(deployment); return buildResponseFromProtonMetrics(protonMetrics); } private HttpResponse scaling(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { var from = Optional.ofNullable(request.getProperty("from")) .map(Long::valueOf) .map(Instant::ofEpochSecond) .orElse(Instant.EPOCH); var until = Optional.ofNullable(request.getProperty("until")) .map(Long::valueOf) .map(Instant::ofEpochSecond) .orElse(Instant.now(controller.clock())); var application = ApplicationId.from(tenantName, applicationName, instanceName); var zone = requireZone(environment, region); var deployment = new DeploymentId(application, zone); var events = controller.serviceRegistry().resourceDatabase().scalingEvents(from, until, deployment); var slime = new Slime(); var root = slime.setObject(); for (var entry : events.entrySet()) { var serviceRoot = root.setArray(entry.getKey().clusterId().value()); scalingEventsToSlime(entry.getValue(), serviceRoot); } return new SlimeJsonResponse(slime); } private JsonResponse buildResponseFromProtonMetrics(List<ProtonMetrics> protonMetrics) { try { var jsonObject = jsonMapper.createObjectNode(); var jsonArray = jsonMapper.createArrayNode(); for (ProtonMetrics metrics : protonMetrics) { jsonArray.add(metrics.toJson()); } jsonObject.set("metrics", jsonArray); return new JsonResponse(200, jsonMapper.writerWithDefaultPrettyPrinter().writeValueAsString(jsonObject)); } catch (JsonProcessingException e) { log.log(Level.WARNING, "Unable to build JsonResponse with Proton data: " + e.getMessage(), e); return new JsonResponse(500, ""); } } private HttpResponse trigger(ApplicationId id, JobType type, HttpRequest request) { ZoneRegistry zones = controller.zoneRegistry(); type = switch (type.environment()) { case test -> JobType.systemTest(zones, zones.systemZone().getCloudName()); case staging -> JobType.stagingTest(zones, zones.systemZone().getCloudName()); default -> type; }; Inspector requestObject = toSlime(request.getData()).get(); boolean requireTests = ! requestObject.field("skipTests").asBool(); boolean reTrigger = requestObject.field("reTrigger").asBool(); boolean upgradeRevision = ! requestObject.field("skipRevision").asBool(); boolean upgradePlatform = ! requestObject.field("skipUpgrade").asBool(); String triggered = reTrigger ? controller.applications().deploymentTrigger() .reTrigger(id, type, "re-triggered by " + request.getJDiscRequest().getUserPrincipal().getName()).type().jobName() : controller.applications().deploymentTrigger() .forceTrigger(id, type, "triggered by " + request.getJDiscRequest().getUserPrincipal().getName(), requireTests, upgradeRevision, upgradePlatform) .stream().map(job -> job.type().jobName()).collect(joining(", ")); String suppressedUpgrades = ( ! upgradeRevision || ! upgradePlatform ? ", without " : "") + (upgradeRevision ? "" : "revision") + ( ! upgradeRevision && ! upgradePlatform ? " and " : "") + (upgradePlatform ? "" : "platform") + ( ! upgradeRevision || ! upgradePlatform ? " upgrade" : ""); return new MessageResponse(triggered.isEmpty() ? "Job " + type.jobName() + " for " + id + " not triggered" : "Triggered " + triggered + " for " + id + suppressedUpgrades); } private HttpResponse pause(ApplicationId id, JobType type) { Instant until = controller.clock().instant().plus(DeploymentTrigger.maxPause); controller.applications().deploymentTrigger().pauseJob(id, type, until); return new MessageResponse(type.jobName() + " for " + id + " paused for " + DeploymentTrigger.maxPause); } private HttpResponse resume(ApplicationId id, JobType type) { controller.applications().deploymentTrigger().resumeJob(id, type); return new MessageResponse(type.jobName() + " for " + id + " resumed"); } private void toSlime(Cursor object, Application application, HttpRequest request) { object.setString("tenant", application.id().tenant().value()); object.setString("application", application.id().application().value()); object.setString("deployments", withPath("/application/v4" + "/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value() + "/job/", request.getUri()).toString()); DeploymentStatus status = controller.jobController().deploymentStatus(application); application.revisions().last().ifPresent(version -> JobControllerApiHandlerHelper.toSlime(object.setObject("latestVersion"), version)); application.projectId().ifPresent(id -> object.setLong("projectId", id)); application.instances().values().stream().findFirst().ifPresent(instance -> { if ( ! instance.change().isEmpty()) toSlime(object.setObject("deploying"), instance.change(), application); if ( ! status.outstandingChange(instance.name()).isEmpty()) toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), application); }); application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion)); Cursor instancesArray = object.setArray("instances"); for (Instance instance : showOnlyProductionInstances(request) ? application.productionInstances().values() : application.instances().values()) toSlime(instancesArray.addObject(), status, instance, application.deploymentSpec(), request); application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString); Cursor metricsObject = object.setObject("metrics"); metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality()); metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality()); Cursor activity = object.setObject("activity"); application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value())); application.owner().ifPresent(owner -> object.setString("owner", owner.username())); application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value())); } private void toSlime(Cursor object, DeploymentStatus status, Instance instance, DeploymentSpec deploymentSpec, HttpRequest request) { object.setString("instance", instance.name().value()); if (deploymentSpec.instance(instance.name()).isPresent()) { Collection<JobStatus> jobStatus = status.instanceJobs(instance.name()).values(); if ( ! instance.change().isEmpty()) toSlime(object.setObject("deploying"), instance.change(), status.application()); if ( ! status.outstandingChange(instance.name()).isEmpty()) toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), status.application()); Cursor changeBlockers = object.setArray("changeBlockers"); deploymentSpec.instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> { Cursor changeBlockerObject = changeBlockers.addObject(); changeBlockerObject.setBool("versions", changeBlocker.blocksVersions()); changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions()); changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId()); Cursor days = changeBlockerObject.setArray("days"); changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong); Cursor hours = changeBlockerObject.setArray("hours"); changeBlocker.window().hours().forEach(hours::addLong); })); } addRotationId(object, instance); List<Deployment> deployments = deploymentSpec.instance(instance.name()) .map(spec -> sortedDeployments(instance.deployments().values(), spec)) .orElse(List.copyOf(instance.deployments().values())); Cursor deploymentsArray = object.setArray("deployments"); for (Deployment deployment : deployments) { Cursor deploymentObject = deploymentsArray.addObject(); if (deployment.zone().environment() == Environment.prod && ! instance.rotations().isEmpty()) toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject); if (recurseOverDeployments(request)) toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request); else { deploymentObject.setString("environment", deployment.zone().environment().value()); deploymentObject.setString("region", deployment.zone().region().value()); deploymentObject.setString("url", withPath(request.getUri().getPath() + "/instance/" + instance.name().value() + "/environment/" + deployment.zone().environment().value() + "/region/" + deployment.zone().region().value(), request.getUri()).toString()); } } } private void addRotationId(Cursor object, Instance instance) { instance.rotations().stream() .map(AssignedRotation::rotationId) .findFirst() .ifPresent(rotation -> object.setString("rotationId", rotation.asString())); } private void toSlime(Cursor object, Instance instance, DeploymentStatus status, HttpRequest request) { Application application = status.application(); object.setString("tenant", instance.id().tenant().value()); object.setString("application", instance.id().application().value()); object.setString("instance", instance.id().instance().value()); object.setString("deployments", withPath("/application/v4" + "/tenant/" + instance.id().tenant().value() + "/application/" + instance.id().application().value() + "/instance/" + instance.id().instance().value() + "/job/", request.getUri()).toString()); application.revisions().last().ifPresent(version -> { version.sourceUrl().ifPresent(url -> object.setString("sourceUrl", url)); version.commit().ifPresent(commit -> object.setString("commit", commit)); }); application.projectId().ifPresent(id -> object.setLong("projectId", id)); if (application.deploymentSpec().instance(instance.name()).isPresent()) { Collection<JobStatus> jobStatus = status.instanceJobs(instance.name()).values(); if ( ! instance.change().isEmpty()) toSlime(object.setObject("deploying"), instance.change(), application); if ( ! status.outstandingChange(instance.name()).isEmpty()) toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), application); Cursor changeBlockers = object.setArray("changeBlockers"); application.deploymentSpec().instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> { Cursor changeBlockerObject = changeBlockers.addObject(); changeBlockerObject.setBool("versions", changeBlocker.blocksVersions()); changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions()); changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId()); Cursor days = changeBlockerObject.setArray("days"); changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong); Cursor hours = changeBlockerObject.setArray("hours"); changeBlocker.window().hours().forEach(hours::addLong); })); } application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion)); addRotationId(object, instance); List<Deployment> deployments = application.deploymentSpec().instance(instance.name()) .map(spec -> sortedDeployments(instance.deployments().values(), spec)) .orElse(List.copyOf(instance.deployments().values())); Cursor instancesArray = object.setArray("instances"); for (Deployment deployment : deployments) { Cursor deploymentObject = instancesArray.addObject(); if (deployment.zone().environment() == Environment.prod) { if (instance.rotations().size() == 1) { toSlime(instance.rotationStatus().of(instance.rotations().get(0).rotationId(), deployment), deploymentObject); } if ( ! recurseOverDeployments(request) && ! instance.rotations().isEmpty()) { toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject); } } if (recurseOverDeployments(request)) toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request); else { deploymentObject.setString("environment", deployment.zone().environment().value()); deploymentObject.setString("region", deployment.zone().region().value()); deploymentObject.setString("instance", instance.id().instance().value()); deploymentObject.setString("url", withPath(request.getUri().getPath() + "/environment/" + deployment.zone().environment().value() + "/region/" + deployment.zone().region().value(), request.getUri()).toString()); } } Stream.concat(status.jobSteps().keySet().stream() .filter(job -> job.application().instance().equals(instance.name())) .filter(job -> job.type().isProduction() && job.type().isDeployment()), controller.jobController().active(instance.id()).stream() .map(run -> run.id().job()) .filter(job -> job.type().environment().isManuallyDeployed())) .map(job -> job.type().zone()) .filter(zone -> ! instance.deployments().containsKey(zone)) .forEach(zone -> { Cursor deploymentObject = instancesArray.addObject(); deploymentObject.setString("environment", zone.environment().value()); deploymentObject.setString("region", zone.region().value()); }); application.deployKeys().stream().findFirst().ifPresent(key -> object.setString("pemDeployKey", KeyUtils.toPem(key))); application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString); Cursor metricsObject = object.setObject("metrics"); metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality()); metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality()); Cursor activity = object.setObject("activity"); application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value())); application.owner().ifPresent(owner -> object.setString("owner", owner.username())); application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value())); } private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); Instance instance = controller.applications().getInstance(id) .orElseThrow(() -> new NotExistsException(id + " not found")); DeploymentId deploymentId = new DeploymentId(instance.id(), requireZone(environment, region)); Deployment deployment = instance.deployments().get(deploymentId.zoneId()); if (deployment == null) throw new NotExistsException(instance + " is not deployed in " + deploymentId.zoneId()); Slime slime = new Slime(); toSlime(slime.setObject(), deploymentId, deployment, request); return new SlimeJsonResponse(slime); } private void toSlime(Cursor object, Change change, Application application) { change.platform().ifPresent(version -> object.setString("version", version.toString())); change.revision().ifPresent(revision -> JobControllerApiHandlerHelper.toSlime(object.setObject("revision"), application.revisions().get(revision))); } private void toSlime(Endpoint endpoint, Cursor object) { object.setString("cluster", endpoint.cluster().value()); object.setBool("tls", endpoint.tls()); object.setString("url", endpoint.url().toString()); object.setString("scope", endpointScopeString(endpoint.scope())); object.setString("routingMethod", routingMethodString(endpoint.routingMethod())); object.setBool("legacy", endpoint.legacy()); } private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) { response.setString("tenant", deploymentId.applicationId().tenant().value()); response.setString("application", deploymentId.applicationId().application().value()); response.setString("instance", deploymentId.applicationId().instance().value()); response.setString("environment", deploymentId.zoneId().environment().value()); response.setString("region", deploymentId.zoneId().region().value()); var application = controller.applications().requireApplication(TenantAndApplicationId.from(deploymentId.applicationId())); boolean legacyEndpoints = request.getBooleanProperty("includeLegacyEndpoints"); var endpointArray = response.setArray("endpoints"); EndpointList zoneEndpoints = controller.routing().readEndpointsOf(deploymentId) .scope(Endpoint.Scope.zone); if (!legacyEndpoints) { zoneEndpoints = zoneEndpoints.not().legacy().direct(); } for (var endpoint : zoneEndpoints) { toSlime(endpoint, endpointArray.addObject()); } EndpointList declaredEndpoints = controller.routing().declaredEndpointsOf(application) .targets(deploymentId); if (!legacyEndpoints) { declaredEndpoints = declaredEndpoints.not().legacy().direct(); } for (var endpoint : declaredEndpoints) { toSlime(endpoint, endpointArray.addObject()); } response.setString("clusters", withPath(toPath(deploymentId) + "/clusters", request.getUri()).toString()); response.setString("nodes", withPathAndQuery("/zone/v2/" + deploymentId.zoneId().environment() + "/" + deploymentId.zoneId().region() + "/nodes/v2/node/", "recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString()); response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString()); response.setString("version", deployment.version().toFullString()); response.setString("revision", application.revisions().get(deployment.revision()).stringId()); response.setLong("build", deployment.revision().number()); Instant lastDeploymentStart = controller.jobController().lastDeploymentStart(deploymentId.applicationId(), deployment); response.setLong("deployTimeEpochMs", lastDeploymentStart.toEpochMilli()); controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zoneId()) .ifPresent(deploymentTimeToLive -> response.setLong("expiryTimeEpochMs", lastDeploymentStart.plus(deploymentTimeToLive).toEpochMilli())); application.projectId().ifPresent(i -> response.setString("screwdriverId", String.valueOf(i))); var instance = application.instances().get(deploymentId.applicationId().instance()); if (instance != null) { if (!instance.rotations().isEmpty() && deployment.zone().environment() == Environment.prod) toSlime(instance.rotations(), instance.rotationStatus(), deployment, response); if (!deployment.zone().environment().isManuallyDeployed()) { DeploymentStatus status = controller.jobController().deploymentStatus(application); JobId jobId = new JobId(instance.id(), JobType.deploymentTo(deployment.zone())); Optional.ofNullable(status.jobSteps().get(jobId)) .ifPresent(stepStatus -> { JobControllerApiHandlerHelper.toSlime(response.setObject("applicationVersion"), application.revisions().get(deployment.revision())); if ( ! status.jobsToRun().containsKey(stepStatus.job().get())) response.setString("status", "complete"); else if (stepStatus.readyAt(instance.change()).map(controller.clock().instant()::isBefore).orElse(true)) response.setString("status", "pending"); else response.setString("status", "running"); }); } else { var deploymentRun = controller.jobController().last(deploymentId.applicationId(), JobType.deploymentTo(deploymentId.zoneId())); deploymentRun.ifPresent(run -> { response.setString("status", run.hasEnded() ? "complete" : "running"); }); } } response.setDouble("quota", deployment.quota().rate()); deployment.cost().ifPresent(cost -> response.setDouble("cost", cost)); controller.archiveBucketDb().archiveUriFor(deploymentId.zoneId(), deploymentId.applicationId().tenant(), false) .ifPresent(archiveUri -> response.setString("archiveUri", archiveUri.toString())); Cursor activity = response.setObject("activity"); deployment.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); deployment.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); deployment.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); deployment.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); DeploymentMetrics metrics = deployment.metrics(); Cursor metricsObject = response.setObject("metrics"); metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond()); metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond()); metricsObject.setDouble("documentCount", metrics.documentCount()); metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis()); metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis()); metrics.instant().ifPresent(instant -> metricsObject.setLong("lastUpdated", instant.toEpochMilli())); } private void toSlime(RotationState state, Cursor object) { Cursor bcpStatus = object.setObject("bcpStatus"); bcpStatus.setString("rotationStatus", rotationStateString(state)); } private void toSlime(List<AssignedRotation> rotations, RotationStatus status, Deployment deployment, Cursor object) { var array = object.setArray("endpointStatus"); for (var rotation : rotations) { var statusObject = array.addObject(); var targets = status.of(rotation.rotationId()); statusObject.setString("endpointId", rotation.endpointId().id()); statusObject.setString("rotationId", rotation.rotationId().asString()); statusObject.setString("clusterId", rotation.clusterId().value()); statusObject.setString("status", rotationStateString(status.of(rotation.rotationId(), deployment))); statusObject.setLong("lastUpdated", targets.lastUpdated().toEpochMilli()); } } private URI monitoringSystemUri(DeploymentId deploymentId) { return controller.zoneRegistry().getMonitoringSystemUri(deploymentId); } private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) { Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName)); ZoneId zone = requireZone(environment, region); Deployment deployment = instance.deployments().get(zone); if (deployment == null) { throw new NotExistsException(instance + " has no deployment in " + zone); } DeploymentId deploymentId = new DeploymentId(instance.id(), zone); RoutingStatus.Agent agent = isOperator(request) ? RoutingStatus.Agent.operator : RoutingStatus.Agent.tenant; RoutingStatus.Value status = inService ? RoutingStatus.Value.in : RoutingStatus.Value.out; controller.routing().of(deploymentId).setRoutingStatus(status, agent); return new MessageResponse(Text.format("Successfully set %s in %s %s service", instance.id().toShortString(), zone, inService ? "in" : "out of")); } private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Slime slime = new Slime(); Cursor array = slime.setObject().setArray("globalrotationoverride"); Optional<Endpoint> primaryEndpoint = controller.routing().readDeclaredEndpointsOf(deploymentId.applicationId()) .requiresRotation() .primary(); if (primaryEndpoint.isPresent()) { DeploymentRoutingContext context = controller.routing().of(deploymentId); RoutingStatus status = context.routingStatus(); array.addString(primaryEndpoint.get().upstreamName(deploymentId)); Cursor statusObject = array.addObject(); statusObject.setString("status", status.value().name()); statusObject.setString("reason", ""); statusObject.setString("agent", status.agent().name()); statusObject.setLong("timestamp", status.changedAt().getEpochSecond()); } return new SlimeJsonResponse(slime); } private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region, Optional<String> endpointId) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); Instance instance = controller.applications().requireInstance(applicationId); ZoneId zone = requireZone(environment, region); RotationId rotation = findRotationId(instance, endpointId); Deployment deployment = instance.deployments().get(zone); if (deployment == null) { throw new NotExistsException(instance + " has no deployment in " + zone); } Slime slime = new Slime(); Cursor response = slime.setObject(); toSlime(instance.rotationStatus().of(rotation, deployment), response); return new SlimeJsonResponse(slime); } private HttpResponse deploying(String tenantName, String applicationName, String instanceName, HttpRequest request) { Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName)); Slime slime = new Slime(); Cursor root = slime.setObject(); if ( ! instance.change().isEmpty()) { instance.change().platform().ifPresent(version -> root.setString("platform", version.toString())); instance.change().revision().ifPresent(revision -> root.setString("application", revision.toString())); root.setBool("pinned", instance.change().isPinned()); } return new SlimeJsonResponse(slime); } private HttpResponse suspended(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); boolean suspended = controller.applications().isSuspended(deploymentId); Slime slime = new Slime(); Cursor response = slime.setObject(); response.setBool("suspended", suspended); return new SlimeJsonResponse(slime); } private HttpResponse status(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String host, HttpURL.Path restPath, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); return controller.serviceRegistry().configServer().getServiceNodePage(deploymentId, serviceName, DomainName.of(host), HttpURL.Path.parse("/status").append(restPath), Query.empty().add(request.getJDiscRequest().parameters())); } private HttpResponse orchestrator(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); return controller.serviceRegistry().configServer().getServiceNodes(deploymentId); } private HttpResponse stateV1(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String host, HttpURL.Path rest, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Query query = Query.empty().add(request.getJDiscRequest().parameters()); query = query.set("forwarded-url", HttpURL.from(request.getUri()).withQuery(Query.empty()).asURI().toString()); return controller.serviceRegistry().configServer().getServiceNodePage( deploymentId, serviceName, DomainName.of(host), HttpURL.Path.parse("/state/v1").append(rest), query); } private HttpResponse content(String tenantName, String applicationName, String instanceName, String environment, String region, HttpURL.Path restPath, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); return controller.serviceRegistry().configServer().getApplicationPackageContent(deploymentId, restPath, request.getUri()); } private HttpResponse updateTenant(String tenantName, HttpRequest request) { getTenantOrThrow(tenantName); TenantName tenant = TenantName.from(tenantName); Inspector requestObject = toSlime(request.getData()).get(); controller.tenants().update(accessControlRequests.specification(tenant, requestObject), accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest())); return tenant(controller.tenants().require(TenantName.from(tenantName)), request); } private HttpResponse createTenant(String tenantName, HttpRequest request) { TenantName tenant = TenantName.from(tenantName); Inspector requestObject = toSlime(request.getData()).get(); controller.tenants().create(accessControlRequests.specification(tenant, requestObject), accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest())); if (controller.system().isPublic()) { User user = getAttribute(request, User.ATTRIBUTE_NAME, User.class); TenantInfo info = controller.tenants().require(tenant, CloudTenant.class) .info() .withContact(TenantContact.from(user.name(), user.email())); controller.tenants().lockOrThrow(tenant, LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(info); controller.tenants().store(lockedTenant); }); } return tenant(controller.tenants().require(TenantName.from(tenantName)), request); } private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) { Inspector requestObject = toSlime(request.getData()).get(); TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); Credentials credentials = accessControlRequests.credentials(id.tenant(), requestObject, request.getJDiscRequest()); Application application = controller.applications().createApplication(id, credentials); Slime slime = new Slime(); toSlime(id, slime.setObject(), request); return new SlimeJsonResponse(slime); } private HttpResponse createInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) { TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName); if (controller.applications().getApplication(applicationId).isEmpty()) createApplication(tenantName, applicationName, request); controller.applications().createInstance(applicationId.instance(instanceName)); Slime slime = new Slime(); toSlime(applicationId.instance(instanceName), slime.setObject(), request); return new SlimeJsonResponse(slime); } /** Trigger deployment of the given Vespa version if a valid one is given, e.g., "7.8.9". */ private HttpResponse deployPlatform(String tenantName, String applicationName, String instanceName, boolean pin, HttpRequest request) { String versionString = readToString(request.getData()); ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); StringBuilder response = new StringBuilder(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { Version version = Version.fromString(versionString); VersionStatus versionStatus = controller.readVersionStatus(); if (version.equals(Version.emptyVersion)) version = controller.systemVersion(versionStatus); if (!versionStatus.isActive(version)) throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " + "Version is not active in this system. " + "Active versions: " + versionStatus.versions() .stream() .map(VespaVersion::versionNumber) .map(Version::toString) .collect(joining(", "))); Change change = Change.of(version); if (pin) change = change.withPin(); controller.applications().deploymentTrigger().forceChange(id, change, isOperator(request)); response.append("Triggered ").append(change).append(" for ").append(id); }); return new MessageResponse(response.toString()); } /** Trigger deployment to the last known application package for the given application. */ private HttpResponse deployApplication(String tenantName, String applicationName, String instanceName, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); Inspector buildField = toSlime(request.getData()).get().field("build"); long build = buildField.valid() ? buildField.asLong() : -1; StringBuilder response = new StringBuilder(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { RevisionId revision = build == -1 ? application.get().revisions().last().get().id() : getRevision(application.get(), build); Change change = Change.of(revision); controller.applications().deploymentTrigger().forceChange(id, change, isOperator(request)); response.append("Triggered ").append(change).append(" for ").append(id); }); return new MessageResponse(response.toString()); } private RevisionId getRevision(Application application, long build) { return application.revisions().withPackage().stream() .map(ApplicationVersion::id) .filter(version -> version.number() == build) .findFirst() .filter(version -> controller.applications().applicationStore().hasBuild(application.id().tenant(), application.id().application(), build)) .orElseThrow(() -> new IllegalArgumentException("Build number '" + build + "' was not found")); } private HttpResponse cancelBuild(String tenantName, String applicationName, String build){ TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); RevisionId revision = RevisionId.forProduction(Long.parseLong(build)); controller.applications().lockApplicationOrThrow(id, application -> { controller.applications().store(application.withRevisions(revisions -> revisions.with(revisions.get(revision).skipped()))); for (Instance instance : application.get().instances().values()) if (instance.change().revision().equals(Optional.of(revision))) controller.applications().deploymentTrigger().cancelChange(instance.id(), ChangesToCancel.APPLICATION); }); return new MessageResponse("Marked build '" + build + "' as non-deployable"); } /** Cancel ongoing change for given application, e.g., everything with {"cancel":"all"} */ private HttpResponse cancelDeploy(String tenantName, String applicationName, String instanceName, String choice) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); StringBuilder response = new StringBuilder(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { Change change = application.get().require(id.instance()).change(); if (change.isEmpty()) { response.append("No deployment in progress for ").append(id).append(" at this time"); return; } ChangesToCancel cancel = ChangesToCancel.valueOf(choice.toUpperCase()); controller.applications().deploymentTrigger().cancelChange(id, cancel); response.append("Changed deployment from '").append(change).append("' to '").append(controller.applications().requireInstance(id).change()).append("' for ").append(id); }); return new MessageResponse(response.toString()); } /** Schedule reindexing of an application, or a subset of clusters, possibly on a subset of documents. */ private HttpResponse reindex(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); List<String> clusterNames = Optional.ofNullable(request.getProperty("clusterId")).stream() .flatMap(clusters -> Stream.of(clusters.split(","))) .filter(cluster -> ! cluster.isBlank()) .toList(); List<String> documentTypes = Optional.ofNullable(request.getProperty("documentType")).stream() .flatMap(types -> Stream.of(types.split(","))) .filter(type -> ! type.isBlank()) .toList(); Double speed = request.hasProperty("speed") ? Double.parseDouble(request.getProperty("speed")) : null; boolean indexedOnly = request.getBooleanProperty("indexedOnly"); controller.applications().reindex(id, zone, clusterNames, documentTypes, indexedOnly, speed); return new MessageResponse("Requested reindexing of " + id + " in " + zone + (clusterNames.isEmpty() ? "" : ", on clusters " + String.join(", ", clusterNames)) + (documentTypes.isEmpty() ? "" : ", for types " + String.join(", ", documentTypes)) + (indexedOnly ? ", for indexed types" : "") + (speed != null ? ", with speed " + speed : "")); } /** Gets reindexing status of an application in a zone. */ private HttpResponse getReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); ApplicationReindexing reindexing = controller.applications().applicationReindexing(id, zone); Slime slime = new Slime(); Cursor root = slime.setObject(); root.setBool("enabled", reindexing.enabled()); Cursor clustersArray = root.setArray("clusters"); reindexing.clusters().entrySet().stream().sorted(comparingByKey()) .forEach(cluster -> { Cursor clusterObject = clustersArray.addObject(); clusterObject.setString("name", cluster.getKey()); Cursor pendingArray = clusterObject.setArray("pending"); cluster.getValue().pending().entrySet().stream().sorted(comparingByKey()) .forEach(pending -> { Cursor pendingObject = pendingArray.addObject(); pendingObject.setString("type", pending.getKey()); pendingObject.setLong("requiredGeneration", pending.getValue()); }); Cursor readyArray = clusterObject.setArray("ready"); cluster.getValue().ready().entrySet().stream().sorted(comparingByKey()) .forEach(ready -> { Cursor readyObject = readyArray.addObject(); readyObject.setString("type", ready.getKey()); setStatus(readyObject, ready.getValue()); }); }); return new SlimeJsonResponse(slime); } void setStatus(Cursor statusObject, ApplicationReindexing.Status status) { status.readyAt().ifPresent(readyAt -> statusObject.setLong("readyAtMillis", readyAt.toEpochMilli())); status.startedAt().ifPresent(startedAt -> statusObject.setLong("startedAtMillis", startedAt.toEpochMilli())); status.endedAt().ifPresent(endedAt -> statusObject.setLong("endedAtMillis", endedAt.toEpochMilli())); status.state().map(ApplicationApiHandler::toString).ifPresent(state -> statusObject.setString("state", state)); status.message().ifPresent(message -> statusObject.setString("message", message)); status.progress().ifPresent(progress -> statusObject.setDouble("progress", progress)); status.speed().ifPresent(speed -> statusObject.setDouble("speed", speed)); } private static String toString(ApplicationReindexing.State state) { return switch (state) { case PENDING: yield "pending"; case RUNNING: yield "running"; case FAILED: yield "failed"; case SUCCESSFUL: yield "successful"; }; } /** Enables reindexing of an application in a zone. */ private HttpResponse enableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); controller.applications().enableReindexing(id, zone); return new MessageResponse("Enabled reindexing of " + id + " in " + zone); } /** Disables reindexing of an application in a zone. */ private HttpResponse disableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); controller.applications().disableReindexing(id, zone); return new MessageResponse("Disabled reindexing of " + id + " in " + zone); } /** Schedule restart of deployment, or specific host in a deployment */ private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); RestartFilter restartFilter = new RestartFilter() .withHostName(Optional.ofNullable(request.getProperty("hostname")).map(HostName::of)) .withClusterType(Optional.ofNullable(request.getProperty("clusterType")).map(ClusterSpec.Type::from)) .withClusterId(Optional.ofNullable(request.getProperty("clusterId")).map(ClusterSpec.Id::from)); controller.applications().restart(deploymentId, restartFilter); return new MessageResponse("Requested restart of " + deploymentId); } /** Set suspension status of the given deployment. */ private HttpResponse suspend(String tenantName, String applicationName, String instanceName, String environment, String region, boolean suspend) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); controller.applications().setSuspension(deploymentId, suspend); return new MessageResponse((suspend ? "Suspended" : "Resumed") + " orchestration of " + deploymentId); } private HttpResponse jobDeploy(ApplicationId id, JobType type, HttpRequest request) { if ( ! type.environment().isManuallyDeployed() && ! isOperator(request)) throw new IllegalArgumentException("Direct deployments are only allowed to manually deployed environments."); Map<String, byte[]> dataParts = parseDataParts(request); if ( ! dataParts.containsKey("applicationZip")) throw new IllegalArgumentException("Missing required form part 'applicationZip'"); ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP)); controller.applications().verifyApplicationIdentityConfiguration(id.tenant(), Optional.of(id.instance()), Optional.of(type.zone()), applicationPackage, Optional.of(requireUserPrincipal(request))); Optional<Version> version = Optional.ofNullable(dataParts.get("deployOptions")) .map(json -> SlimeUtils.jsonToSlime(json).get()) .flatMap(options -> optional("vespaVersion", options)) .map(Version::fromString); ensureApplicationExists(TenantAndApplicationId.from(id), request); boolean dryRun = Optional.ofNullable(dataParts.get("deployOptions")) .map(json -> SlimeUtils.jsonToSlime(json).get()) .flatMap(options -> optional("dryRun", options)) .map(Boolean::valueOf) .orElse(false); controller.jobController().deploy(id, type, version, applicationPackage, dryRun, isOperator(request)); RunId runId = controller.jobController().last(id, type).get().id(); Slime slime = new Slime(); Cursor rootObject = slime.setObject(); rootObject.setString("message", "Deployment started in " + runId + ". This may take about 15 minutes the first time."); rootObject.setLong("run", runId.number()); return new SlimeJsonResponse(slime); } private HttpResponse deploy(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); Map<String, byte[]> dataParts = parseDataParts(request); if ( ! dataParts.containsKey("deployOptions")) return ErrorResponse.badRequest("Missing required form part 'deployOptions'"); Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get(); Optional<SystemApplication> systemApplication = SystemApplication.matching(applicationId); if (systemApplication.isEmpty() || !systemApplication.get().hasApplicationPackage()) { return ErrorResponse.badRequest("Deployment of " + applicationId + " is not supported through this API"); } String vespaVersion = deployOptions.field("vespaVersion").asString(); if (!vespaVersion.isEmpty() && !vespaVersion.equals("null")) { return ErrorResponse.badRequest("Specifying version for " + applicationId + " is not permitted"); } VersionStatus versionStatus = controller.readVersionStatus(); if (versionStatus.isUpgrading()) { throw new IllegalArgumentException("Deployment of system applications during a system upgrade is not allowed"); } Optional<VespaVersion> systemVersion = versionStatus.systemVersion(); if (systemVersion.isEmpty()) { throw new IllegalArgumentException("Deployment of system applications is not permitted until system version is determined"); } ActivateResult result = controller.applications() .deploySystemApplicationPackage(systemApplication.get(), zone, systemVersion.get().versionNumber()); return new SlimeJsonResponse(toSlime(result)); } private HttpResponse deleteTenant(String tenantName, HttpRequest request) { boolean forget = request.getBooleanProperty("forget"); if (forget && !isOperator(request)) return ErrorResponse.forbidden("Only operators can forget a tenant"); controller.tenants().delete(TenantName.from(tenantName), Optional.of(accessControlRequests.credentials(TenantName.from(tenantName), toSlime(request.getData()).get(), request.getJDiscRequest())), forget); return new MessageResponse("Deleted tenant " + tenantName); } private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) { TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest()); controller.applications().deleteApplication(id, credentials); return new MessageResponse("Deleted application " + id); } private HttpResponse deleteInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) { TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); controller.applications().deleteInstance(id.instance(instanceName)); if (controller.applications().requireApplication(id).instances().isEmpty()) { Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest()); controller.applications().deleteApplication(id, credentials); } return new MessageResponse("Deleted instance " + id.instance(instanceName).toFullString()); } private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId id = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); controller.applications().deactivate(id.applicationId(), id.zoneId()); controller.jobController().last(id.applicationId(), JobType.deploymentTo(id.zoneId())) .filter(run -> ! run.hasEnded()) .ifPresent(last -> controller.jobController().abort(last.id(), "deployment deactivated by " + request.getJDiscRequest().getUserPrincipal().getName())); return new MessageResponse("Deactivated " + id); } /** Returns test config for indicated job, with production deployments of the default instance if the given is not in deployment spec. */ private HttpResponse testConfig(ApplicationId id, JobType type) { Application application = controller.applications().requireApplication(TenantAndApplicationId.from(id)); ApplicationId prodInstanceId = application.deploymentSpec().instance(id.instance()).isPresent() ? id : TenantAndApplicationId.from(id).defaultInstance(); HashSet<DeploymentId> deployments = controller.applications() .getInstance(prodInstanceId).stream() .flatMap(instance -> instance.productionDeployments().keySet().stream()) .map(zone -> new DeploymentId(prodInstanceId, zone)) .collect(Collectors.toCollection(HashSet::new)); ApplicationId toTest = type.isProduction() ? prodInstanceId : id; if ( ! type.isProduction()) deployments.add(new DeploymentId(toTest, type.zone())); Deployment deployment = application.require(toTest.instance()).deployments().get(type.zone()); if (deployment == null) throw new NotExistsException(toTest + " is not deployed in " + type.zone()); return new SlimeJsonResponse(testConfigSerializer.configSlime(id, type, false, deployment.version(), deployment.revision(), deployment.at(), controller.routing().readTestRunnerEndpointsOf(deployments), controller.applications().reachableContentClustersByZone(deployments))); } private HttpResponse requestServiceDump(String tenant, String application, String instance, String environment, String region, String hostname, HttpRequest request) { NodeRepository nodeRepository = controller.serviceRegistry().configServer().nodeRepository(); ZoneId zone = requireZone(environment, region); Slime report = getReport(nodeRepository, zone, tenant, application, instance, hostname).orElse(null); if (report != null) { Cursor cursor = report.get(); boolean force = request.getBooleanProperty("force"); if (!force && cursor.field("failedAt").asLong() == 0 && cursor.field("completedAt").asLong() == 0) { throw new IllegalArgumentException("Service dump already in progress for " + cursor.field("configId").asString()); } } Slime requestPayload; try { requestPayload = SlimeUtils.jsonToSlimeOrThrow(request.getData().readAllBytes()); } catch (Exception e) { throw new IllegalArgumentException("Missing or invalid JSON in request content", e); } Cursor requestPayloadCursor = requestPayload.get(); String configId = requestPayloadCursor.field("configId").asString(); long expiresAt = requestPayloadCursor.field("expiresAt").asLong(); if (configId.isEmpty()) { throw new IllegalArgumentException("Missing configId"); } Cursor artifactsCursor = requestPayloadCursor.field("artifacts"); int artifactEntries = artifactsCursor.entries(); if (artifactEntries == 0) { throw new IllegalArgumentException("Missing or empty 'artifacts'"); } Slime dumpRequest = new Slime(); Cursor dumpRequestCursor = dumpRequest.setObject(); dumpRequestCursor.setLong("createdMillis", controller.clock().millis()); dumpRequestCursor.setString("configId", configId); Cursor dumpRequestArtifactsCursor = dumpRequestCursor.setArray("artifacts"); for (int i = 0; i < artifactEntries; i++) { dumpRequestArtifactsCursor.addString(artifactsCursor.entry(i).asString()); } if (expiresAt > 0) { dumpRequestCursor.setLong("expiresAt", expiresAt); } Cursor dumpOptionsCursor = requestPayloadCursor.field("dumpOptions"); if (dumpOptionsCursor.children() > 0) { SlimeUtils.copyObject(dumpOptionsCursor, dumpRequestCursor.setObject("dumpOptions")); } var reportsUpdate = Map.of("serviceDump", new String(uncheck(() -> SlimeUtils.toJsonBytes(dumpRequest)))); nodeRepository.updateReports(zone, hostname, reportsUpdate); boolean wait = request.getBooleanProperty("wait"); if (!wait) return new MessageResponse("Request created"); return waitForServiceDumpResult(nodeRepository, zone, tenant, application, instance, hostname); } private HttpResponse getServiceDump(String tenant, String application, String instance, String environment, String region, String hostname, HttpRequest request) { NodeRepository nodeRepository = controller.serviceRegistry().configServer().nodeRepository(); ZoneId zone = requireZone(environment, region); Slime report = getReport(nodeRepository, zone, tenant, application, instance, hostname) .orElseThrow(() -> new NotExistsException("No service dump for node " + hostname)); return new SlimeJsonResponse(report); } private HttpResponse waitForServiceDumpResult(NodeRepository nodeRepository, ZoneId zone, String tenant, String application, String instance, String hostname) { int pollInterval = 2; Slime report; while (true) { report = getReport(nodeRepository, zone, tenant, application, instance, hostname).get(); Cursor cursor = report.get(); if (cursor.field("completedAt").asLong() > 0 || cursor.field("failedAt").asLong() > 0) { break; } final Slime copyForLambda = report; log.fine(() -> uncheck(() -> new String(SlimeUtils.toJsonBytes(copyForLambda)))); log.fine("Sleeping " + pollInterval + " seconds before checking report status again"); controller.sleeper().sleep(Duration.ofSeconds(pollInterval)); } return new SlimeJsonResponse(report); } private Optional<Slime> getReport(NodeRepository nodeRepository, ZoneId zone, String tenant, String application, String instance, String hostname) { Node node; try { node = nodeRepository.getNode(zone, hostname); } catch (IllegalArgumentException e) { throw new NotExistsException(hostname); } ApplicationId app = ApplicationId.from(tenant, application, instance); ApplicationId owner = node.owner().orElseThrow(() -> new IllegalArgumentException("Node has no owner")); if (!app.equals(owner)) { throw new IllegalArgumentException("Node is not owned by " + app.toFullString()); } String json = node.reports().get("serviceDump"); if (json == null) return Optional.empty(); return Optional.of(SlimeUtils.jsonToSlimeOrThrow(json)); } private static SourceRevision toSourceRevision(Inspector object) { if (!object.field("repository").valid() || !object.field("branch").valid() || !object.field("commit").valid()) { throw new IllegalArgumentException("Must specify \"repository\", \"branch\", and \"commit\"."); } return new SourceRevision(object.field("repository").asString(), object.field("branch").asString(), object.field("commit").asString()); } private Tenant getTenantOrThrow(String tenantName) { return controller.tenants().get(tenantName) .orElseThrow(() -> new NotExistsException(new TenantId(tenantName))); } private void toSlime(Cursor object, Tenant tenant, List<Application> applications, HttpRequest request) { object.setString("tenant", tenant.name().value()); object.setString("type", tenantType(tenant)); switch (tenant.type()) { case athenz: AthenzTenant athenzTenant = (AthenzTenant) tenant; object.setString("athensDomain", athenzTenant.domain().getName()); object.setString("property", athenzTenant.property().id()); athenzTenant.propertyId().ifPresent(id -> object.setString("propertyId", id.toString())); athenzTenant.contact().ifPresent(c -> { object.setString("propertyUrl", c.propertyUrl().toString()); object.setString("contactsUrl", c.url().toString()); object.setString("issueCreationUrl", c.issueTrackerUrl().toString()); Cursor contactsArray = object.setArray("contacts"); c.persons().forEach(persons -> { Cursor personArray = contactsArray.addArray(); persons.forEach(personArray::addString); }); }); break; case cloud: { CloudTenant cloudTenant = (CloudTenant) tenant; cloudTenant.creator().ifPresent(creator -> object.setString("creator", creator.getName())); Cursor pemDeveloperKeysArray = object.setArray("pemDeveloperKeys"); cloudTenant.developerKeys().forEach((key, user) -> { Cursor keyObject = pemDeveloperKeysArray.addObject(); keyObject.setString("key", KeyUtils.toPem(key)); keyObject.setString("user", user.getName()); }); toSlime(object, cloudTenant.tenantSecretStores()); toSlime(object.setObject("integrations").setObject("aws"), controller.serviceRegistry().roleService().getTenantRole(tenant.name()), cloudTenant.tenantSecretStores()); try { var usedQuota = applications.stream() .map(Application::quotaUsage) .reduce(QuotaUsage.none, QuotaUsage::add); toSlime(object.setObject("quota"), usedQuota); } catch (Exception e) { log.warning(String.format("Failed to get quota for tenant %s: %s", tenant.name(), Exceptions.toMessageString(e))); } cloudTenant.archiveAccess().awsRole().ifPresent(role -> object.setString("archiveAccessRole", role)); toSlime(cloudTenant.archiveAccess(), object.setObject("archiveAccess")); break; } case deleted: break; default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'."); } Cursor applicationArray = object.setArray("applications"); for (Application application : applications) { DeploymentStatus status = null; Collection<Instance> instances = showOnlyProductionInstances(request) ? application.productionInstances().values() : application.instances().values(); if (instances.isEmpty() && !showOnlyActiveInstances(request)) toSlime(application.id(), applicationArray.addObject(), request); for (Instance instance : instances) { if (showOnlyActiveInstances(request) && instance.deployments().isEmpty()) continue; if (recurseOverApplications(request)) { if (status == null) status = controller.jobController().deploymentStatus(application); toSlime(applicationArray.addObject(), instance, status, request); } else { toSlime(instance.id(), applicationArray.addObject(), request); } } } tenantMetaDataToSlime(tenant, applications, object.setObject("metaData")); } private void toSlime(ArchiveAccess archiveAccess, Cursor object) { archiveAccess.awsRole().ifPresent(role -> object.setString("awsRole", role)); archiveAccess.gcpMember().ifPresent(member -> object.setString("gcpMember", member)); } private void toSlime(Cursor object, QuotaUsage usage) { object.setDouble("budgetUsed", usage.rate()); } private void toSlime(ClusterResources resources, Cursor object) { object.setLong("nodes", resources.nodes()); object.setLong("groups", resources.groups()); toSlime(resources.nodeResources(), object.setObject("nodeResources")); double cost = ResourceMeterMaintainer.cost(resources, controller.serviceRegistry().zoneRegistry().system()); object.setDouble("cost", cost); } private void utilizationToSlime(Cluster.Utilization utilization, Cursor utilizationObject) { utilizationObject.setDouble("cpu", utilization.cpu()); utilizationObject.setDouble("idealCpu", utilization.idealCpu()); utilizationObject.setDouble("currentCpu", utilization.currentCpu()); utilizationObject.setDouble("peakCpu", utilization.peakCpu()); utilizationObject.setDouble("memory", utilization.memory()); utilizationObject.setDouble("idealMemory", utilization.idealMemory()); utilizationObject.setDouble("currentMemory", utilization.currentMemory()); utilizationObject.setDouble("peakMemory", utilization.peakMemory()); utilizationObject.setDouble("disk", utilization.disk()); utilizationObject.setDouble("idealDisk", utilization.idealDisk()); utilizationObject.setDouble("currentDisk", utilization.currentDisk()); utilizationObject.setDouble("peakDisk", utilization.peakDisk()); } private void scalingEventsToSlime(List<Cluster.ScalingEvent> scalingEvents, Cursor scalingEventsArray) { for (Cluster.ScalingEvent scalingEvent : scalingEvents) { Cursor scalingEventObject = scalingEventsArray.addObject(); toSlime(scalingEvent.from(), scalingEventObject.setObject("from")); toSlime(scalingEvent.to(), scalingEventObject.setObject("to")); scalingEventObject.setLong("at", scalingEvent.at().toEpochMilli()); scalingEvent.completion().ifPresent(completion -> scalingEventObject.setLong("completion", completion.toEpochMilli())); } } private void toSlime(NodeResources resources, Cursor object) { object.setDouble("vcpu", resources.vcpu()); object.setDouble("memoryGb", resources.memoryGb()); object.setDouble("diskGb", resources.diskGb()); object.setDouble("bandwidthGbps", resources.bandwidthGbps()); object.setString("diskSpeed", valueOf(resources.diskSpeed())); object.setString("storageType", valueOf(resources.storageType())); } private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) { object.setString("tenant", tenant.name().value()); Cursor metaData = object.setObject("metaData"); metaData.setString("type", tenantType(tenant)); switch (tenant.type()) { case athenz: AthenzTenant athenzTenant = (AthenzTenant) tenant; metaData.setString("athensDomain", athenzTenant.domain().getName()); metaData.setString("property", athenzTenant.property().id()); break; case cloud: break; case deleted: break; default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'."); } object.setString("url", withPath("/application/v4/tenant/" + tenant.name().value(), requestURI).toString()); } private void tenantMetaDataToSlime(Tenant tenant, List<Application> applications, Cursor object) { Optional<Instant> lastDev = applications.stream() .flatMap(application -> application.instances().values().stream()) .flatMap(instance -> instance.deployments().values().stream() .filter(deployment -> deployment.zone().environment() == Environment.dev) .map(deployment -> controller.jobController().lastDeploymentStart(instance.id(), deployment))) .max(Comparator.naturalOrder()) .or(() -> applications.stream() .flatMap(application -> application.instances().values().stream()) .flatMap(instance -> JobType.allIn(controller.zoneRegistry()).stream() .filter(job -> job.environment() == Environment.dev) .flatMap(jobType -> controller.jobController().last(instance.id(), jobType).stream())) .map(Run::start) .max(Comparator.naturalOrder())); Optional<Instant> lastSubmission = applications.stream() .flatMap(app -> app.revisions().last().flatMap(ApplicationVersion::buildTime).stream()) .max(Comparator.naturalOrder()); object.setLong("createdAtMillis", tenant.createdAt().toEpochMilli()); if (tenant.type() == Tenant.Type.deleted) object.setLong("deletedAtMillis", ((DeletedTenant) tenant).deletedAt().toEpochMilli()); lastDev.ifPresent(instant -> object.setLong("lastDeploymentToDevMillis", instant.toEpochMilli())); lastSubmission.ifPresent(instant -> object.setLong("lastSubmissionToProdMillis", instant.toEpochMilli())); tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.user) .ifPresent(instant -> object.setLong("lastLoginByUserMillis", instant.toEpochMilli())); tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.developer) .ifPresent(instant -> object.setLong("lastLoginByDeveloperMillis", instant.toEpochMilli())); tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.administrator) .ifPresent(instant -> object.setLong("lastLoginByAdministratorMillis", instant.toEpochMilli())); } /** Returns a copy of the given URI with the host and port from the given URI, the path set to the given path and the query set to given query*/ private URI withPathAndQuery(String newPath, String newQuery, URI uri) { try { return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, newQuery, null); } catch (URISyntaxException e) { throw new RuntimeException("Will not happen", e); } } /** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */ private URI withPath(String newPath, URI uri) { return withPathAndQuery(newPath, null, uri); } private String toPath(DeploymentId id) { return path("/application", "v4", "tenant", id.applicationId().tenant(), "application", id.applicationId().application(), "instance", id.applicationId().instance(), "environment", id.zoneId().environment(), "region", id.zoneId().region()); } private long asLong(String valueOrNull, long defaultWhenNull) { if (valueOrNull == null) return defaultWhenNull; try { return Long.parseLong(valueOrNull); } catch (NumberFormatException e) { throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'"); } } private Slime toSlime(InputStream jsonStream) { try { byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000); return SlimeUtils.jsonToSlime(jsonBytes); } catch (IOException e) { throw new RuntimeException(); } } private static Principal requireUserPrincipal(HttpRequest request) { Principal principal = request.getJDiscRequest().getUserPrincipal(); if (principal == null) throw new IllegalArgumentException("Expected a user principal"); return principal; } private Inspector mandatory(String key, Inspector object) { if ( ! object.field(key).valid()) throw new IllegalArgumentException("'" + key + "' is missing"); return object.field(key); } private Optional<String> optional(String key, Inspector object) { return SlimeUtils.optionalString(object.field(key)); } private static String path(Object... elements) { return Joiner.on("/").join(elements); } private void toSlime(TenantAndApplicationId id, Cursor object, HttpRequest request) { object.setString("tenant", id.tenant().value()); object.setString("application", id.application().value()); object.setString("url", withPath("/application/v4" + "/tenant/" + id.tenant().value() + "/application/" + id.application().value(), request.getUri()).toString()); } private void toSlime(ApplicationId id, Cursor object, HttpRequest request) { object.setString("tenant", id.tenant().value()); object.setString("application", id.application().value()); object.setString("instance", id.instance().value()); object.setString("url", withPath("/application/v4" + "/tenant/" + id.tenant().value() + "/application/" + id.application().value() + "/instance/" + id.instance().value(), request.getUri()).toString()); } private Slime toSlime(ActivateResult result) { Slime slime = new Slime(); Cursor object = slime.setObject(); object.setString("revisionId", result.revisionId().id()); object.setLong("applicationZipSize", result.applicationZipSizeBytes()); Cursor logArray = object.setArray("prepareMessages"); if (result.prepareResponse().log != null) { for (Log logMessage : result.prepareResponse().log) { Cursor logObject = logArray.addObject(); logObject.setLong("time", logMessage.time); logObject.setString("level", logMessage.level); logObject.setString("message", logMessage.message); } } Cursor changeObject = object.setObject("configChangeActions"); Cursor restartActionsArray = changeObject.setArray("restart"); for (RestartAction restartAction : result.prepareResponse().configChangeActions.restartActions) { Cursor restartActionObject = restartActionsArray.addObject(); restartActionObject.setString("clusterName", restartAction.clusterName); restartActionObject.setString("clusterType", restartAction.clusterType); restartActionObject.setString("serviceType", restartAction.serviceType); serviceInfosToSlime(restartAction.services, restartActionObject.setArray("services")); stringsToSlime(restartAction.messages, restartActionObject.setArray("messages")); } Cursor refeedActionsArray = changeObject.setArray("refeed"); for (RefeedAction refeedAction : result.prepareResponse().configChangeActions.refeedActions) { Cursor refeedActionObject = refeedActionsArray.addObject(); refeedActionObject.setString("name", refeedAction.name); refeedActionObject.setString("documentType", refeedAction.documentType); refeedActionObject.setString("clusterName", refeedAction.clusterName); serviceInfosToSlime(refeedAction.services, refeedActionObject.setArray("services")); stringsToSlime(refeedAction.messages, refeedActionObject.setArray("messages")); } return slime; } private void serviceInfosToSlime(List<ServiceInfo> serviceInfoList, Cursor array) { for (ServiceInfo serviceInfo : serviceInfoList) { Cursor serviceInfoObject = array.addObject(); serviceInfoObject.setString("serviceName", serviceInfo.serviceName); serviceInfoObject.setString("serviceType", serviceInfo.serviceType); serviceInfoObject.setString("configId", serviceInfo.configId); serviceInfoObject.setString("hostName", serviceInfo.hostName); } } private void stringsToSlime(List<String> strings, Cursor array) { for (String string : strings) array.addString(string); } private void toSlime(Cursor object, List<TenantSecretStore> tenantSecretStores) { Cursor secretStore = object.setArray("secretStores"); tenantSecretStores.forEach(store -> { toSlime(secretStore.addObject(), store); }); } private void toSlime(Cursor object, TenantRoles tenantRoles, List<TenantSecretStore> tenantSecretStores) { object.setString("tenantRole", tenantRoles.containerRole()); var stores = object.setArray("accounts"); tenantSecretStores.forEach(secretStore -> { toSlime(stores.addObject(), secretStore); }); } private void toSlime(Cursor object, TenantSecretStore secretStore) { object.setString("name", secretStore.getName()); object.setString("awsId", secretStore.getAwsId()); object.setString("role", secretStore.getRole()); } private String readToString(InputStream stream) { Scanner scanner = new Scanner(stream).useDelimiter("\\A"); if ( ! scanner.hasNext()) return null; return scanner.next(); } private static boolean recurseOverTenants(HttpRequest request) { return recurseOverApplications(request) || "tenant".equals(request.getProperty("recursive")); } private static boolean recurseOverApplications(HttpRequest request) { return recurseOverDeployments(request) || "application".equals(request.getProperty("recursive")); } private static boolean recurseOverDeployments(HttpRequest request) { return ImmutableSet.of("all", "true", "deployment").contains(request.getProperty("recursive")); } private static boolean showOnlyProductionInstances(HttpRequest request) { return "true".equals(request.getProperty("production")); } private static boolean showOnlyActiveInstances(HttpRequest request) { return "true".equals(request.getProperty("activeInstances")); } private static boolean includeDeleted(HttpRequest request) { return "true".equals(request.getProperty("includeDeleted")); } private static String tenantType(Tenant tenant) { return switch (tenant.type()) { case athenz: yield "ATHENS"; case cloud: yield "CLOUD"; case deleted: yield "DELETED"; }; } private static ApplicationId appIdFromPath(Path path) { return ApplicationId.from(path.get("tenant"), path.get("application"), path.get("instance")); } private JobType jobTypeFromPath(Path path) { return JobType.fromJobName(path.get("jobtype"), controller.zoneRegistry()); } private RunId runIdFromPath(Path path) { long number = Long.parseLong(path.get("number")); return new RunId(appIdFromPath(path), jobTypeFromPath(path), number); } private HttpResponse submit(String tenant, String application, HttpRequest request) { Map<String, byte[]> dataParts = parseDataParts(request); Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get(); long projectId = submitOptions.field("projectId").asLong(); projectId = projectId == 0 ? 1 : projectId; Optional<String> repository = optional("repository", submitOptions); Optional<String> branch = optional("branch", submitOptions); Optional<String> commit = optional("commit", submitOptions); Optional<SourceRevision> sourceRevision = repository.isPresent() && branch.isPresent() && commit.isPresent() ? Optional.of(new SourceRevision(repository.get(), branch.get(), commit.get())) : Optional.empty(); Optional<String> sourceUrl = optional("sourceUrl", submitOptions); Optional<String> authorEmail = optional("authorEmail", submitOptions); Optional<String> description = optional("description", submitOptions); int risk = (int) submitOptions.field("risk").asLong(); sourceUrl.map(URI::create).ifPresent(url -> { if (url.getHost() == null || url.getScheme() == null) throw new IllegalArgumentException("Source URL must include scheme and host"); }); ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP), true); byte[] testPackage = dataParts.get(EnvironmentResource.APPLICATION_TEST_ZIP); Submission submission = new Submission(applicationPackage, testPackage, sourceUrl, sourceRevision, authorEmail, description, risk); controller.applications().verifyApplicationIdentityConfiguration(TenantName.from(tenant), Optional.empty(), Optional.empty(), applicationPackage, Optional.of(requireUserPrincipal(request))); TenantAndApplicationId id = TenantAndApplicationId.from(tenant, application); ensureApplicationExists(id, request); return JobControllerApiHandlerHelper.submitResponse(controller.jobController(), id, submission, projectId); } private HttpResponse removeAllProdDeployments(String tenant, String application) { JobControllerApiHandlerHelper.submitResponse(controller.jobController(), TenantAndApplicationId.from(tenant, application), new Submission(ApplicationPackage.deploymentRemoval(), new byte[0], Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), 0), 0); return new MessageResponse("All deployments removed"); } private ZoneId requireZone(String environment, String region) { return requireZone(ZoneId.from(environment, region)); } private ZoneId requireZone(ZoneId zone) { if (zone.environment() == Environment.prod && zone.region().value().equals("controller")) { return zone; } if (!controller.zoneRegistry().hasZone(zone)) { throw new IllegalArgumentException("Zone " + zone + " does not exist in this system"); } return zone; } private static Map<String, byte[]> parseDataParts(HttpRequest request) { String contentHash = request.getHeader("X-Content-Hash"); if (contentHash == null) return new MultipartParser().parse(request); DigestInputStream digester = Signatures.sha256Digester(request.getData()); var dataParts = new MultipartParser().parse(request.getHeader("Content-Type"), digester, request.getUri()); if ( ! Arrays.equals(digester.getMessageDigest().digest(), Base64.getDecoder().decode(contentHash))) throw new IllegalArgumentException("Value of X-Content-Hash header does not match computed content hash"); return dataParts; } private static RotationId findRotationId(Instance instance, Optional<String> endpointId) { if (instance.rotations().isEmpty()) { throw new NotExistsException("global rotation does not exist for " + instance); } if (endpointId.isPresent()) { return instance.rotations().stream() .filter(r -> r.endpointId().id().equals(endpointId.get())) .map(AssignedRotation::rotationId) .findFirst() .orElseThrow(() -> new NotExistsException("endpoint " + endpointId.get() + " does not exist for " + instance)); } else if (instance.rotations().size() > 1) { throw new IllegalArgumentException(instance + " has multiple rotations. Query parameter 'endpointId' must be given"); } return instance.rotations().get(0).rotationId(); } private static String rotationStateString(RotationState state) { return switch (state) { case in: yield "IN"; case out: yield "OUT"; case unknown: yield "UNKNOWN"; }; } private static String endpointScopeString(Endpoint.Scope scope) { return switch (scope) { case weighted: yield "weighted"; case application: yield "application"; case global: yield "global"; case zone: yield "zone"; }; } private static String routingMethodString(RoutingMethod method) { return switch (method) { case exclusive: yield "exclusive"; case sharedLayer4: yield "sharedLayer4"; }; } private static <T> T getAttribute(HttpRequest request, String attributeName, Class<T> cls) { return Optional.ofNullable(request.getJDiscRequest().context().get(attributeName)) .filter(cls::isInstance) .map(cls::cast) .orElseThrow(() -> new IllegalArgumentException("Attribute '" + attributeName + "' was not set on request")); } /** Returns whether given request is by an operator */ private static boolean isOperator(HttpRequest request) { var securityContext = getAttribute(request, SecurityContext.ATTRIBUTE_NAME, SecurityContext.class); return securityContext.roles().stream() .map(Role::definition) .anyMatch(definition -> definition == RoleDefinition.hostedOperator); } private List<Deployment> sortedDeployments(Collection<Deployment> deployments, DeploymentInstanceSpec spec) { List<ZoneId> productionZones = spec.zones().stream() .filter(z -> z.region().isPresent()) .map(z -> ZoneId.from(z.environment(), z.region().get())) .toList(); return deployments.stream() .sorted(comparingInt(deployment -> productionZones.indexOf(deployment.zone()))) .collect(collectingAndThen(Collectors.toList(), Collections::unmodifiableList)); } }
class ApplicationApiHandler extends AuditLoggingRequestHandler { private static final ObjectMapper jsonMapper = new ObjectMapper(); private final Controller controller; private final AccessControlRequests accessControlRequests; private final TestConfigSerializer testConfigSerializer; @Inject public ApplicationApiHandler(ThreadedHttpRequestHandler.Context parentCtx, Controller controller, AccessControlRequests accessControlRequests) { super(parentCtx, controller.auditLogger()); this.controller = controller; this.accessControlRequests = accessControlRequests; this.testConfigSerializer = new TestConfigSerializer(controller.system()); } @Override public Duration getTimeout() { return Duration.ofMinutes(20); } @Override public HttpResponse auditAndHandle(HttpRequest request) { try { Path path = new Path(request.getUri()); return switch (request.getMethod()) { case GET: yield handleGET(path, request); case PUT: yield handlePUT(path, request); case POST: yield handlePOST(path, request); case PATCH: yield handlePATCH(path, request); case DELETE: yield handleDELETE(path, request); case OPTIONS: yield handleOPTIONS(); default: yield ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported"); }; } catch (RestApiException.Forbidden e) { return ErrorResponse.forbidden(Exceptions.toMessageString(e)); } catch (RestApiException.Unauthorized e) { return ErrorResponse.unauthorized(Exceptions.toMessageString(e)); } catch (NotExistsException e) { return ErrorResponse.notFoundError(Exceptions.toMessageString(e)); } catch (IllegalArgumentException e) { return ErrorResponse.badRequest(Exceptions.toMessageString(e)); } catch (ConfigServerException e) { return switch (e.code()) { case NOT_FOUND: yield ErrorResponse.notFoundError(Exceptions.toMessageString(e)); case ACTIVATION_CONFLICT: yield new ErrorResponse(CONFLICT, e.code().name(), Exceptions.toMessageString(e)); case INTERNAL_SERVER_ERROR: yield ErrorResponses.logThrowing(request, log, e); default: yield new ErrorResponse(BAD_REQUEST, e.code().name(), Exceptions.toMessageString(e)); }; } catch (RuntimeException e) { return ErrorResponses.logThrowing(request, log, e); } } private HttpResponse handleGET(Path path, HttpRequest request) { if (path.matches("/application/v4/")) return root(request); if (path.matches("/application/v4/notifications")) return notifications(request, Optional.ofNullable(request.getProperty("tenant")), true); if (path.matches("/application/v4/tenant")) return tenants(request); if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/request/operator")) return accessRequests(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/info")) return tenantInfo(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/info/profile")) return withCloudTenant(path.get("tenant"), this::tenantInfoProfile); if (path.matches("/application/v4/tenant/{tenant}/info/billing")) return withCloudTenant(path.get("tenant"), this::tenantInfoBilling); if (path.matches("/application/v4/tenant/{tenant}/info/contacts")) return withCloudTenant(path.get("tenant"), this::tenantInfoContacts); if (path.matches("/application/v4/tenant/{tenant}/notifications")) return notifications(request, Optional.of(path.get("tenant")), false); if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}/validate")) return validateSecretStore(path.get("tenant"), path.get("name"), request); if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), Optional.empty(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/compile-version")) return compileVersion(path.get("tenant"), path.get("application"), request.getProperty("allowMajor")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return JobControllerApiHandlerHelper.overviewResponse(controller, TenantAndApplicationId.from(path.get("tenant"), path.get("application")), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/package")) return applicationPackage(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/diff/{number}")) return applicationPackageDiff(path.get("tenant"), path.get("application"), path.get("number")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploying(path.get("tenant"), path.get("application"), "default", request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), "default", request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance")) return applications(path.get("tenant"), Optional.of(path.get("application")), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return instance(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.applications().requireApplication(TenantAndApplicationId.from(path.get("tenant"), path.get("application"))), controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)).descendingMap(), Optional.ofNullable(request.getProperty("limit")), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/package")) return devApplicationPackage(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/diff/{number}")) return devApplicationPackageDiff(runIdFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/test-config")) return testConfig(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}/logs")) return JobControllerApiHandlerHelper.vespaLogsResponse(controller.jobController(), runIdFromPath(path), asLong(request.getProperty("from"), 0), request.getBooleanProperty("tester")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return getReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{host}/status/{*}")) return status(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/service/{service}/{host}/state/v1/{*}")) return stateV1(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/orchestrator")) return orchestrator(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/content/{*}")) return content(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return supportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/node/{node}/service-dump")) return getServiceDump(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("node"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/scaling")) return scaling(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/metrics")) return metrics(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId"))); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{host}/status/{*}")) return status(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.get("host"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/nodes")) return nodes(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/clusters")) return clusters(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), Optional.ofNullable(request.getProperty("endpointId"))); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePUT(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/request/operator")) return requestSshAccess(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/approve/operator")) return approveAccessRequest(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/managed/operator")) return addManagedAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/info")) return updateTenantInfo(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/info/profile")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoProfile); if (path.matches("/application/v4/tenant/{tenant}/info/billing")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoBilling); if (path.matches("/application/v4/tenant/{tenant}/info/contacts")) return withCloudTenant(path.get("tenant"), request, this::putTenantInfoContacts); if (path.matches("/application/v4/tenant/{tenant}/archive-access")) return allowAwsArchiveAccess(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/archive-access/aws")) return allowAwsArchiveAccess(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/archive-access/gcp")) return allowGcpArchiveAccess(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return addSecretStore(path.get("tenant"), path.get("name"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePOST(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/key")) return addDeveloperKey(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), "default", false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), "default", true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), "default", request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return addDeployKey(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return submit(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return createInstance(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploy/{jobtype}")) return jobDeploy(appIdFromPath(path), jobTypeFromPath(path), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), path.get("instance"), true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/submit")) return submit(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return trigger(appIdFromPath(path), jobTypeFromPath(path), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return pause(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindex")) return reindex(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return enableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return allowSupportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/node/{node}/service-dump")) return requestServiceDump(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("node"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePATCH(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return patchApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return patchApplication(path.get("tenant"), path.get("application"), request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleDELETE(Path path, HttpRequest request) { if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/access/managed/operator")) return removeManagedAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/key")) return removeDeveloperKey(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/archive-access")) return removeAwsArchiveAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/archive-access/aws")) return removeAwsArchiveAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/archive-access/gcp")) return removeGcpArchiveAccess(path.get("tenant")); if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return deleteSecretStore(path.get("tenant"), path.get("name"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return removeAllProdDeployments(path.get("tenant"), path.get("application")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", "all"); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), "default", path.get("choice")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/key")) return removeDeployKey(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit/{build}")) return cancelBuild(path.get("tenant"), path.get("application"), path.get("build")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}")) return deleteInstance(path.get("tenant"), path.get("application"), path.get("instance"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), "all"); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("choice")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.abortJobResponse(controller.jobController(), request, appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return resume(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/reindexing")) return disableReindexing(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/suspend")) return suspend(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}/access/support")) return disallowSupportAccess(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleOPTIONS() { EmptyResponse response = new EmptyResponse(); response.headers().put("Allow", "GET,PUT,POST,PATCH,DELETE,OPTIONS"); return response; } private HttpResponse recursiveRoot(HttpRequest request) { Slime slime = new Slime(); Cursor tenantArray = slime.setArray(); List<Application> applications = controller.applications().asList(); for (Tenant tenant : controller.tenants().asList(includeDeleted(request))) toSlime(tenantArray.addObject(), tenant, applications.stream().filter(app -> app.id().tenant().equals(tenant.name())).collect(toList()), request); return new SlimeJsonResponse(slime); } private HttpResponse root(HttpRequest request) { return recurseOverTenants(request) ? recursiveRoot(request) : new ResourceResponse(request, "tenant"); } private HttpResponse tenants(HttpRequest request) { Slime slime = new Slime(); Cursor response = slime.setArray(); for (Tenant tenant : controller.tenants().asList(includeDeleted(request))) tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject()); return new SlimeJsonResponse(slime); } private HttpResponse tenant(String tenantName, HttpRequest request) { return controller.tenants().get(TenantName.from(tenantName), includeDeleted(request)) .map(tenant -> tenant(tenant, request)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist")); } private HttpResponse tenant(Tenant tenant, HttpRequest request) { Slime slime = new Slime(); toSlime(slime.setObject(), tenant, controller.applications().asList(tenant.name()), request); return new SlimeJsonResponse(slime); } private HttpResponse accessRequests(String tenantName, HttpRequest request) { var tenant = TenantName.from(tenantName); if (controller.tenants().require(tenant).type() != Tenant.Type.cloud) return ErrorResponse.badRequest("Can only see access requests for cloud tenants"); var accessControlService = controller.serviceRegistry().accessControlService(); var accessRoleInformation = accessControlService.getAccessRoleInformation(tenant); var managedAccess = accessControlService.getManagedAccess(tenant); var slime = new Slime(); var cursor = slime.setObject(); cursor.setBool("managedAccess", managedAccess); accessRoleInformation.getPendingRequest() .ifPresent(membershipRequest -> { var requestCursor = cursor.setObject("pendingRequest"); requestCursor.setString("requestTime", membershipRequest.getCreationTime()); requestCursor.setString("reason", membershipRequest.getReason()); }); var auditLogCursor = cursor.setArray("auditLog"); accessRoleInformation.getAuditLog() .forEach(auditLogEntry -> { var entryCursor = auditLogCursor.addObject(); entryCursor.setString("created", auditLogEntry.getCreationTime()); entryCursor.setString("approver", auditLogEntry.getApprover()); entryCursor.setString("reason", auditLogEntry.getReason()); entryCursor.setString("status", auditLogEntry.getAction()); }); return new SlimeJsonResponse(slime); } private HttpResponse requestSshAccess(String tenantName, HttpRequest request) { if (!isOperator(request)) { return ErrorResponse.forbidden("Only operators are allowed to request ssh access"); } if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) return ErrorResponse.badRequest("Can only request access for cloud tenants"); controller.serviceRegistry().accessControlService().requestSshAccess(TenantName.from(tenantName)); return new MessageResponse("OK"); } private HttpResponse approveAccessRequest(String tenantName, HttpRequest request) { var tenant = TenantName.from(tenantName); if (controller.tenants().require(tenant).type() != Tenant.Type.cloud) return ErrorResponse.badRequest("Can only see access requests for cloud tenants"); var inspector = toSlime(request.getData()).get(); var expiry = inspector.field("expiry").valid() ? Instant.ofEpochMilli(inspector.field("expiry").asLong()) : Instant.now().plus(1, ChronoUnit.DAYS); var approve = inspector.field("approve").asBool(); controller.serviceRegistry().accessControlService().decideSshAccess(tenant, expiry, OAuthCredentials.fromAuth0RequestContext(request.getJDiscRequest().context()), approve); return new MessageResponse("OK"); } private HttpResponse addManagedAccess(String tenantName) { return setManagedAccess(tenantName, true); } private HttpResponse removeManagedAccess(String tenantName) { return setManagedAccess(tenantName, false); } private HttpResponse setManagedAccess(String tenantName, boolean managedAccess) { var tenant = TenantName.from(tenantName); if (controller.tenants().require(tenant).type() != Tenant.Type.cloud) return ErrorResponse.badRequest("Can only set access privel for cloud tenants"); controller.serviceRegistry().accessControlService().setManagedAccess(tenant, managedAccess); var slime = new Slime(); slime.setObject().setBool("managedAccess", managedAccess); return new SlimeJsonResponse(slime); } private HttpResponse tenantInfo(String tenantName, HttpRequest request) { return controller.tenants().get(TenantName.from(tenantName)) .filter(tenant -> tenant.type() == Tenant.Type.cloud) .map(tenant -> tenantInfo(((CloudTenant)tenant).info(), request)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private HttpResponse withCloudTenant(String tenantName, Function<CloudTenant, SlimeJsonResponse> handler) { return controller.tenants().get(TenantName.from(tenantName)) .filter(tenant -> tenant.type() == Tenant.Type.cloud) .map(tenant -> handler.apply((CloudTenant) tenant)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private SlimeJsonResponse tenantInfo(TenantInfo info, HttpRequest request) { Slime slime = new Slime(); Cursor infoCursor = slime.setObject(); if (!info.isEmpty()) { infoCursor.setString("name", info.name()); infoCursor.setString("email", info.email()); infoCursor.setString("website", info.website()); infoCursor.setString("contactName", info.contact().name()); infoCursor.setString("contactEmail", info.contact().email()); toSlime(info.address(), infoCursor); toSlime(info.billingContact(), infoCursor); toSlime(info.contacts(), infoCursor); } return new SlimeJsonResponse(slime); } private SlimeJsonResponse tenantInfoProfile(CloudTenant cloudTenant) { var slime = new Slime(); var root = slime.setObject(); var info = cloudTenant.info(); if (!info.isEmpty()) { var contact = root.setObject("contact"); contact.setString("name", info.contact().name()); contact.setString("email", info.contact().email()); var tenant = root.setObject("tenant"); tenant.setString("company", info.name()); tenant.setString("website", info.website()); toSlime(info.address(), root); } return new SlimeJsonResponse(slime); } private SlimeJsonResponse withCloudTenant(String tenantName, HttpRequest request, BiFunction<CloudTenant, Inspector, SlimeJsonResponse> handler) { return controller.tenants().get(tenantName) .map(tenant -> handler.apply((CloudTenant) tenant, toSlime(request.getData()).get())) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private SlimeJsonResponse putTenantInfoProfile(CloudTenant cloudTenant, Inspector inspector) { var info = cloudTenant.info(); var mergedContact = TenantContact.empty() .withName(getString(inspector.field("contact").field("name"), info.contact().name())) .withEmail(getString(inspector.field("contact").field("email"), info.contact().email())); var mergedAddress = updateTenantInfoAddress(inspector.field("address"), info.address()); var mergedInfo = info .withName(getString(inspector.field("tenant").field("name"), info.name())) .withWebsite(getString(inspector.field("tenant").field("website"), info.website())) .withContact(mergedContact) .withAddress(mergedAddress); validateMergedTenantInfo(mergedInfo); controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private SlimeJsonResponse tenantInfoBilling(CloudTenant cloudTenant) { var slime = new Slime(); var root = slime.setObject(); var info = cloudTenant.info(); if (!info.isEmpty()) { var billingContact = info.billingContact(); var contact = root.setObject("contact"); contact.setString("name", billingContact.contact().name()); contact.setString("email", billingContact.contact().email()); contact.setString("phone", billingContact.contact().phone()); toSlime(billingContact.address(), root); } return new SlimeJsonResponse(slime); } private SlimeJsonResponse putTenantInfoBilling(CloudTenant cloudTenant, Inspector inspector) { var info = cloudTenant.info(); var contact = info.billingContact().contact(); var address = info.billingContact().address(); var mergedContact = updateTenantInfoContact(inspector.field("contact"), contact); var mergedAddress = updateTenantInfoAddress(inspector.field("address"), info.billingContact().address()); var mergedBilling = info.billingContact() .withContact(mergedContact) .withAddress(mergedAddress); var mergedInfo = info.withBilling(mergedBilling); controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private SlimeJsonResponse tenantInfoContacts(CloudTenant cloudTenant) { var slime = new Slime(); var root = slime.setObject(); toSlime(cloudTenant.info().contacts(), root); return new SlimeJsonResponse(slime); } private SlimeJsonResponse putTenantInfoContacts(CloudTenant cloudTenant, Inspector inspector) { var mergedInfo = cloudTenant.info() .withContacts(updateTenantInfoContacts(inspector.field("contacts"), cloudTenant.info().contacts())); controller.tenants().lockOrThrow(cloudTenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private void validateMergedTenantInfo(TenantInfo mergedInfo) { if (mergedInfo.contact().name().isBlank()) { throw new IllegalArgumentException("'contactName' cannot be empty"); } if (mergedInfo.contact().email().isBlank()) { throw new IllegalArgumentException("'contactEmail' cannot be empty"); } if (! mergedInfo.contact().email().contains("@")) { throw new IllegalArgumentException("'contactEmail' needs to be an email address"); } if (! mergedInfo.website().isBlank()) { try { new URL(mergedInfo.website()); } catch (MalformedURLException e) { throw new IllegalArgumentException("'website' needs to be a valid address"); } } } private void toSlime(TenantAddress address, Cursor parentCursor) { if (address.isEmpty()) return; Cursor addressCursor = parentCursor.setObject("address"); addressCursor.setString("addressLines", address.address()); addressCursor.setString("postalCodeOrZip", address.code()); addressCursor.setString("city", address.city()); addressCursor.setString("stateRegionProvince", address.region()); addressCursor.setString("country", address.country()); } private void toSlime(TenantBilling billingContact, Cursor parentCursor) { if (billingContact.isEmpty()) return; Cursor addressCursor = parentCursor.setObject("billingContact"); addressCursor.setString("name", billingContact.contact().name()); addressCursor.setString("email", billingContact.contact().email()); addressCursor.setString("phone", billingContact.contact().phone()); toSlime(billingContact.address(), addressCursor); } private void toSlime(TenantContacts contacts, Cursor parentCursor) { Cursor contactsCursor = parentCursor.setArray("contacts"); contacts.all().forEach(contact -> { Cursor contactCursor = contactsCursor.addObject(); Cursor audiencesArray = contactCursor.setArray("audiences"); contact.audiences().forEach(audience -> audiencesArray.addString(toAudience(audience))); switch (contact.type()) { case EMAIL: var email = (TenantContacts.EmailContact) contact; contactCursor.setString("email", email.email()); return; default: throw new IllegalArgumentException("Serialization for contact type not implemented: " + contact.type()); } }); } private static TenantContacts.Audience fromAudience(String value) { return switch (value) { case "tenant": yield TenantContacts.Audience.TENANT; case "notifications": yield TenantContacts.Audience.NOTIFICATIONS; default: throw new IllegalArgumentException("Unknown contact audience '" + value + "'."); }; } private static String toAudience(TenantContacts.Audience audience) { return switch (audience) { case TENANT: yield "tenant"; case NOTIFICATIONS: yield "notifications"; }; } private HttpResponse updateTenantInfo(String tenantName, HttpRequest request) { return controller.tenants().get(TenantName.from(tenantName)) .filter(tenant -> tenant.type() == Tenant.Type.cloud) .map(tenant -> updateTenantInfo(((CloudTenant)tenant), request)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this")); } private String getString(Inspector field, String defaultVale) { return field.valid() ? field.asString().trim() : defaultVale; } private SlimeJsonResponse updateTenantInfo(CloudTenant tenant, HttpRequest request) { TenantInfo oldInfo = tenant.info(); Inspector insp = toSlime(request.getData()).get(); TenantContact mergedContact = TenantContact.empty() .withName(getString(insp.field("contactName"), oldInfo.contact().name())) .withEmail(getString(insp.field("contactEmail"), oldInfo.contact().email())); TenantInfo mergedInfo = TenantInfo.empty() .withName(getString(insp.field("name"), oldInfo.name())) .withEmail(getString(insp.field("email"), oldInfo.email())) .withWebsite(getString(insp.field("website"), oldInfo.website())) .withContact(mergedContact) .withAddress(updateTenantInfoAddress(insp.field("address"), oldInfo.address())) .withBilling(updateTenantInfoBillingContact(insp.field("billingContact"), oldInfo.billingContact())) .withContacts(updateTenantInfoContacts(insp.field("contacts"), oldInfo.contacts())); validateMergedTenantInfo(mergedInfo); controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(mergedInfo); controller.tenants().store(lockedTenant); }); return new MessageResponse("Tenant info updated"); } private TenantAddress updateTenantInfoAddress(Inspector insp, TenantAddress oldAddress) { if (!insp.valid()) return oldAddress; TenantAddress address = TenantAddress.empty() .withCountry(getString(insp.field("country"), oldAddress.country())) .withRegion(getString(insp.field("stateRegionProvince"), oldAddress.region())) .withCity(getString(insp.field("city"), oldAddress.city())) .withCode(getString(insp.field("postalCodeOrZip"), oldAddress.code())) .withAddress(getString(insp.field("addressLines"), oldAddress.address())); List<String> fields = List.of(address.address(), address.code(), address.country(), address.city(), address.region()); if (fields.stream().allMatch(String::isBlank) || fields.stream().noneMatch(String::isBlank)) return address; throw new IllegalArgumentException("All address fields must be set"); } private TenantContact updateTenantInfoContact(Inspector insp, TenantContact oldContact) { if (!insp.valid()) return oldContact; String email = getString(insp.field("email"), oldContact.email()); if (!email.isBlank() && !email.contains("@")) { throw new IllegalArgumentException("'email' needs to be an email address"); } return TenantContact.empty() .withName(getString(insp.field("name"), oldContact.name())) .withEmail(getString(insp.field("email"), oldContact.email())) .withPhone(getString(insp.field("phone"), oldContact.phone())); } private TenantBilling updateTenantInfoBillingContact(Inspector insp, TenantBilling oldContact) { if (!insp.valid()) return oldContact; return TenantBilling.empty() .withContact(updateTenantInfoContact(insp, oldContact.contact())) .withAddress(updateTenantInfoAddress(insp.field("address"), oldContact.address())); } private TenantContacts updateTenantInfoContacts(Inspector insp, TenantContacts oldContacts) { if (!insp.valid()) return oldContacts; List<TenantContacts.EmailContact> contacts = SlimeUtils.entriesStream(insp).map(inspector -> { String email = inspector.field("email").asString().trim(); List<TenantContacts.Audience> audiences = SlimeUtils.entriesStream(inspector.field("audiences")) .map(audience -> fromAudience(audience.asString())) .toList(); if (!email.contains("@")) { throw new IllegalArgumentException("'email' needs to be an email address"); } return new TenantContacts.EmailContact(audiences, email); }).toList(); return new TenantContacts(contacts); } private HttpResponse notifications(HttpRequest request, Optional<String> tenant, boolean includeTenantFieldInResponse) { boolean productionOnly = showOnlyProductionInstances(request); boolean excludeMessages = "true".equals(request.getProperty("excludeMessages")); Slime slime = new Slime(); Cursor notificationsArray = slime.setObject().setArray("notifications"); tenant.map(t -> Stream.of(TenantName.from(t))) .orElseGet(() -> controller.notificationsDb().listTenantsWithNotifications().stream()) .flatMap(tenantName -> controller.notificationsDb().listNotifications(NotificationSource.from(tenantName), productionOnly).stream()) .filter(notification -> propertyEquals(request, "application", ApplicationName::from, notification.source().application()) && propertyEquals(request, "instance", InstanceName::from, notification.source().instance()) && propertyEquals(request, "zone", ZoneId::from, notification.source().zoneId()) && propertyEquals(request, "job", job -> JobType.fromJobName(job, controller.zoneRegistry()), notification.source().jobType()) && propertyEquals(request, "type", Notification.Type::valueOf, Optional.of(notification.type())) && propertyEquals(request, "level", Notification.Level::valueOf, Optional.of(notification.level()))) .forEach(notification -> toSlime(notificationsArray.addObject(), notification, includeTenantFieldInResponse, excludeMessages)); return new SlimeJsonResponse(slime); } private static <T> boolean propertyEquals(HttpRequest request, String property, Function<String, T> mapper, Optional<T> value) { return Optional.ofNullable(request.getProperty(property)) .map(propertyValue -> value.isPresent() && mapper.apply(propertyValue).equals(value.get())) .orElse(true); } private static void toSlime(Cursor cursor, Notification notification, boolean includeTenantFieldInResponse, boolean excludeMessages) { cursor.setLong("at", notification.at().toEpochMilli()); cursor.setString("level", notificationLevelAsString(notification.level())); cursor.setString("type", notificationTypeAsString(notification.type())); if (!excludeMessages) { Cursor messagesArray = cursor.setArray("messages"); notification.messages().forEach(messagesArray::addString); } if (includeTenantFieldInResponse) cursor.setString("tenant", notification.source().tenant().value()); notification.source().application().ifPresent(application -> cursor.setString("application", application.value())); notification.source().instance().ifPresent(instance -> cursor.setString("instance", instance.value())); notification.source().zoneId().ifPresent(zoneId -> { cursor.setString("environment", zoneId.environment().value()); cursor.setString("region", zoneId.region().value()); }); notification.source().clusterId().ifPresent(clusterId -> cursor.setString("clusterId", clusterId.value())); notification.source().jobType().ifPresent(jobType -> cursor.setString("jobName", jobType.jobName())); notification.source().runNumber().ifPresent(runNumber -> cursor.setLong("runNumber", runNumber)); } private static String notificationTypeAsString(Notification.Type type) { return switch (type) { case submission, applicationPackage: yield "applicationPackage"; case testPackage: yield "testPackage"; case deployment: yield "deployment"; case feedBlock: yield "feedBlock"; case reindex: yield "reindex"; }; } private static String notificationLevelAsString(Notification.Level level) { return switch (level) { case info: yield "info"; case warning: yield "warning"; case error: yield "error"; }; } private HttpResponse applications(String tenantName, Optional<String> applicationName, HttpRequest request) { TenantName tenant = TenantName.from(tenantName); getTenantOrThrow(tenantName); List<Application> applications = applicationName.isEmpty() ? controller.applications().asList(tenant) : controller.applications().getApplication(TenantAndApplicationId.from(tenantName, applicationName.get())) .map(List::of) .orElseThrow(() -> new NotExistsException("Application '" + applicationName.get() + "' does not exist")); Slime slime = new Slime(); Cursor applicationArray = slime.setArray(); for (Application application : applications) { Cursor applicationObject = applicationArray.addObject(); applicationObject.setString("tenant", application.id().tenant().value()); applicationObject.setString("application", application.id().application().value()); applicationObject.setString("url", withPath("/application/v4" + "/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value(), request.getUri()).toString()); Cursor instanceArray = applicationObject.setArray("instances"); for (InstanceName instance : showOnlyProductionInstances(request) ? application.productionInstances().keySet() : application.instances().keySet()) { Cursor instanceObject = instanceArray.addObject(); instanceObject.setString("instance", instance.value()); instanceObject.setString("url", withPath("/application/v4" + "/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value() + "/instance/" + instance.value(), request.getUri()).toString()); } } return new SlimeJsonResponse(slime); } private HttpResponse devApplicationPackage(ApplicationId id, JobType type) { ZoneId zone = type.zone(); RevisionId revision = controller.jobController().last(id, type).get().versions().targetRevision(); byte[] applicationPackage = controller.applications().applicationStore().get(new DeploymentId(id, zone), revision); return new ZipResponse(id.toFullString() + "." + zone.value() + ".zip", applicationPackage); } private HttpResponse devApplicationPackageDiff(RunId runId) { DeploymentId deploymentId = new DeploymentId(runId.application(), runId.job().type().zone()); return controller.applications().applicationStore().getDevDiff(deploymentId, runId.number()) .map(ByteArrayResponse::new) .orElseThrow(() -> new NotExistsException("No application package diff found for " + runId)); } private HttpResponse applicationPackage(String tenantName, String applicationName, HttpRequest request) { TenantAndApplicationId tenantAndApplication = TenantAndApplicationId.from(tenantName, applicationName); final long build; String requestedBuild = request.getProperty("build"); if (requestedBuild != null) { if (requestedBuild.equals("latestDeployed")) { build = controller.applications().requireApplication(tenantAndApplication).latestDeployedRevision() .map(RevisionId::number) .orElseThrow(() -> new NotExistsException("no application package has been deployed in production for " + tenantAndApplication)); } else { try { build = Validation.requireAtLeast(Long.parseLong(request.getProperty("build")), "build number", 1L); } catch (NumberFormatException e) { throw new IllegalArgumentException("invalid value for request parameter 'build'", e); } } } else { build = controller.applications().requireApplication(tenantAndApplication).revisions().last() .map(version -> version.id().number()) .orElseThrow(() -> new NotExistsException("no application package has been submitted for " + tenantAndApplication)); } RevisionId revision = RevisionId.forProduction(build); boolean tests = request.getBooleanProperty("tests"); byte[] applicationPackage = tests ? controller.applications().applicationStore().getTester(tenantAndApplication.tenant(), tenantAndApplication.application(), revision) : controller.applications().applicationStore().get(new DeploymentId(tenantAndApplication.defaultInstance(), ZoneId.defaultId()), revision); String filename = tenantAndApplication + (tests ? "-tests" : "-build") + revision.number() + ".zip"; return new ZipResponse(filename, applicationPackage); } private HttpResponse applicationPackageDiff(String tenant, String application, String number) { TenantAndApplicationId tenantAndApplication = TenantAndApplicationId.from(tenant, application); return controller.applications().applicationStore().getDiff(tenantAndApplication.tenant(), tenantAndApplication.application(), Long.parseLong(number)) .map(ByteArrayResponse::new) .orElseThrow(() -> new NotExistsException("No application package diff found for '" + tenantAndApplication + "' with build number " + number)); } private HttpResponse application(String tenantName, String applicationName, HttpRequest request) { Slime slime = new Slime(); toSlime(slime.setObject(), getApplication(tenantName, applicationName), request); return new SlimeJsonResponse(slime); } private HttpResponse compileVersion(String tenantName, String applicationName, String allowMajorParam) { Slime slime = new Slime(); OptionalInt allowMajor = OptionalInt.empty(); if (allowMajorParam != null) { try { allowMajor = OptionalInt.of(Integer.parseInt(allowMajorParam)); } catch (NumberFormatException e) { throw new IllegalArgumentException("Invalid major version '" + allowMajorParam + "'", e); } } Version compileVersion = controller.applications().compileVersion(TenantAndApplicationId.from(tenantName, applicationName), allowMajor); slime.setObject().setString("compileVersion", compileVersion.toFullString()); return new SlimeJsonResponse(slime); } private HttpResponse instance(String tenantName, String applicationName, String instanceName, HttpRequest request) { Slime slime = new Slime(); toSlime(slime.setObject(), getInstance(tenantName, applicationName, instanceName), controller.jobController().deploymentStatus(getApplication(tenantName, applicationName)), request); return new SlimeJsonResponse(slime); } private HttpResponse addDeveloperKey(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); Principal user = request.getJDiscRequest().getUserPrincipal(); String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString(); PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey); Slime root = new Slime(); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> { tenant = tenant.withDeveloperKey(developerKey, user); toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys()); controller.tenants().store(tenant); }); return new SlimeJsonResponse(root); } private HttpResponse validateSecretStore(String tenantName, String secretStoreName, HttpRequest request) { var awsRegion = request.getProperty("aws-region"); var parameterName = request.getProperty("parameter-name"); var applicationId = ApplicationId.fromFullString(request.getProperty("application-id")); if (!applicationId.tenant().equals(TenantName.from(tenantName))) return ErrorResponse.badRequest("Invalid application id"); var zoneId = requireZone(ZoneId.from(request.getProperty("zone"))); var deploymentId = new DeploymentId(applicationId, zoneId); var tenant = controller.tenants().require(applicationId.tenant(), CloudTenant.class); var tenantSecretStore = tenant.tenantSecretStores() .stream() .filter(secretStore -> secretStore.getName().equals(secretStoreName)) .findFirst(); if (tenantSecretStore.isEmpty()) return ErrorResponse.notFoundError("No secret store '" + secretStoreName + "' configured for tenant '" + tenantName + "'"); var response = controller.serviceRegistry().configServer().validateSecretStore(deploymentId, tenantSecretStore.get(), awsRegion, parameterName); try { var responseRoot = new Slime(); var responseCursor = responseRoot.setObject(); responseCursor.setString("target", deploymentId.toString()); var responseResultCursor = responseCursor.setObject("result"); var responseSlime = SlimeUtils.jsonToSlime(response); SlimeUtils.copyObject(responseSlime.get(), responseResultCursor); return new SlimeJsonResponse(responseRoot); } catch (JsonParseException e) { return ErrorResponses.logThrowing(request, log, e); } } private HttpResponse removeDeveloperKey(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); String pemDeveloperKey = toSlime(request.getData()).get().field("key").asString(); PublicKey developerKey = KeyUtils.fromPemEncodedPublicKey(pemDeveloperKey); Principal user = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class).developerKeys().get(developerKey); Slime root = new Slime(); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, tenant -> { tenant = tenant.withoutDeveloperKey(developerKey); toSlime(root.setObject().setArray("keys"), tenant.get().developerKeys()); controller.tenants().store(tenant); }); return new SlimeJsonResponse(root); } private void toSlime(Cursor keysArray, Map<PublicKey, ? extends Principal> keys) { keys.forEach((key, principal) -> { Cursor keyObject = keysArray.addObject(); keyObject.setString("key", KeyUtils.toPem(key)); keyObject.setString("user", principal.getName()); }); } private HttpResponse addDeployKey(String tenantName, String applicationName, HttpRequest request) { String pemDeployKey = toSlime(request.getData()).get().field("key").asString(); PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey); Slime root = new Slime(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> { application = application.withDeployKey(deployKey); application.get().deployKeys().stream() .map(KeyUtils::toPem) .forEach(root.setObject().setArray("keys")::addString); controller.applications().store(application); }); return new SlimeJsonResponse(root); } private HttpResponse removeDeployKey(String tenantName, String applicationName, HttpRequest request) { String pemDeployKey = toSlime(request.getData()).get().field("key").asString(); PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey); Slime root = new Slime(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> { application = application.withoutDeployKey(deployKey); application.get().deployKeys().stream() .map(KeyUtils::toPem) .forEach(root.setObject().setArray("keys")::addString); controller.applications().store(application); }); return new SlimeJsonResponse(root); } private HttpResponse addSecretStore(String tenantName, String name, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); var data = toSlime(request.getData()).get(); var awsId = mandatory("awsId", data).asString(); var externalId = mandatory("externalId", data).asString(); var role = mandatory("role", data).asString(); var tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class); var tenantSecretStore = new TenantSecretStore(name, awsId, role); if (!tenantSecretStore.isValid()) { return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is invalid"); } if (tenant.tenantSecretStores().contains(tenantSecretStore)) { return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is already configured"); } controller.serviceRegistry().roleService().createTenantPolicy(TenantName.from(tenantName), name, awsId, role); controller.serviceRegistry().tenantSecretService().addSecretStore(tenant.name(), tenantSecretStore, externalId); controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withSecretStore(tenantSecretStore); controller.tenants().store(lockedTenant); }); tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class); var slime = new Slime(); toSlime(slime.setObject(), tenant.tenantSecretStores()); return new SlimeJsonResponse(slime); } private HttpResponse deleteSecretStore(String tenantName, String name, HttpRequest request) { var tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class); var optionalSecretStore = tenant.tenantSecretStores().stream() .filter(secretStore -> secretStore.getName().equals(name)) .findFirst(); if (optionalSecretStore.isEmpty()) return ErrorResponse.notFoundError("Could not delete secret store '" + name + "': Secret store not found"); var tenantSecretStore = optionalSecretStore.get(); controller.serviceRegistry().tenantSecretService().deleteSecretStore(tenant.name(), tenantSecretStore); controller.serviceRegistry().roleService().deleteTenantPolicy(tenant.name(), tenantSecretStore.getName(), tenantSecretStore.getRole()); controller.tenants().lockOrThrow(tenant.name(), LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withoutSecretStore(tenantSecretStore); controller.tenants().store(lockedTenant); }); tenant = controller.tenants().require(TenantName.from(tenantName), CloudTenant.class); var slime = new Slime(); toSlime(slime.setObject(), tenant.tenantSecretStores()); return new SlimeJsonResponse(slime); } private HttpResponse allowAwsArchiveAccess(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); var data = toSlime(request.getData()).get(); var role = mandatory("role", data).asString(); if (role.isBlank()) { return ErrorResponse.badRequest("AWS archive access role can't be whitespace only"); } controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> { var access = lockedTenant.get().archiveAccess(); lockedTenant = lockedTenant.withArchiveAccess(access.withAWSRole(role)); controller.tenants().store(lockedTenant); }); return new MessageResponse("AWS archive access role set to '" + role + "' for tenant " + tenantName + "."); } private HttpResponse removeAwsArchiveAccess(String tenantName) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> { var access = lockedTenant.get().archiveAccess(); lockedTenant = lockedTenant.withArchiveAccess(access.removeAWSRole()); controller.tenants().store(lockedTenant); }); return new MessageResponse("AWS archive access role removed for tenant " + tenantName + "."); } private HttpResponse allowGcpArchiveAccess(String tenantName, HttpRequest request) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); var data = toSlime(request.getData()).get(); var member = mandatory("member", data).asString(); if (member.isBlank()) { return ErrorResponse.badRequest("GCP archive access role can't be whitespace only"); } controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> { var access = lockedTenant.get().archiveAccess(); lockedTenant = lockedTenant.withArchiveAccess(access.withGCPMember(member)); controller.tenants().store(lockedTenant); }); return new MessageResponse("GCP archive access member set to '" + member + "' for tenant " + tenantName + "."); } private HttpResponse removeGcpArchiveAccess(String tenantName) { if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud) throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant"); controller.tenants().lockOrThrow(TenantName.from(tenantName), LockedTenant.Cloud.class, lockedTenant -> { var access = lockedTenant.get().archiveAccess(); lockedTenant = lockedTenant.withArchiveAccess(access.removeGCPMember()); controller.tenants().store(lockedTenant); }); return new MessageResponse("GCP archive access member removed for tenant " + tenantName + "."); } private HttpResponse patchApplication(String tenantName, String applicationName, HttpRequest request) { Inspector requestObject = toSlime(request.getData()).get(); StringJoiner messageBuilder = new StringJoiner("\n").setEmptyValue("No applicable changes."); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(tenantName, applicationName), application -> { Inspector majorVersionField = requestObject.field("majorVersion"); if (majorVersionField.valid()) { Integer majorVersion = majorVersionField.asLong() == 0 ? null : (int) majorVersionField.asLong(); application = application.withMajorVersion(majorVersion); messageBuilder.add("Set major version to " + (majorVersion == null ? "empty" : majorVersion)); } Inspector pemDeployKeyField = requestObject.field("pemDeployKey"); if (pemDeployKeyField.valid()) { String pemDeployKey = pemDeployKeyField.asString(); PublicKey deployKey = KeyUtils.fromPemEncodedPublicKey(pemDeployKey); application = application.withDeployKey(deployKey); messageBuilder.add("Added deploy key " + pemDeployKey); } controller.applications().store(application); }); return new MessageResponse(messageBuilder.toString()); } private Application getApplication(String tenantName, String applicationName) { TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName); return controller.applications().getApplication(applicationId) .orElseThrow(() -> new NotExistsException(applicationId + " not found")); } private Instance getInstance(String tenantName, String applicationName, String instanceName) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); return controller.applications().getInstance(applicationId) .orElseThrow(() -> new NotExistsException(applicationId + " not found")); } private HttpResponse nodes(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone, NodeFilter.all().applications(id)); Slime slime = new Slime(); Cursor nodesArray = slime.setObject().setArray("nodes"); for (Node node : nodes) { Cursor nodeObject = nodesArray.addObject(); nodeObject.setString("hostname", node.hostname().value()); nodeObject.setString("state", valueOf(node.state())); node.reservedTo().ifPresent(tenant -> nodeObject.setString("reservedTo", tenant.value())); nodeObject.setString("orchestration", valueOf(node.serviceState())); nodeObject.setString("version", node.currentVersion().toString()); node.flavor().ifPresent(flavor -> nodeObject.setString("flavor", flavor)); toSlime(node.resources(), nodeObject); nodeObject.setString("clusterId", node.clusterId()); nodeObject.setString("clusterType", valueOf(node.clusterType())); nodeObject.setBool("down", node.down()); nodeObject.setBool("retired", node.retired() || node.wantToRetire()); nodeObject.setBool("restarting", node.wantedRestartGeneration() > node.restartGeneration()); nodeObject.setBool("rebooting", node.wantedRebootGeneration() > node.rebootGeneration()); nodeObject.setString("group", node.group()); nodeObject.setLong("index", node.index()); } return new SlimeJsonResponse(slime); } private HttpResponse clusters(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); com.yahoo.vespa.hosted.controller.api.integration.configserver.Application application = controller.serviceRegistry().configServer().nodeRepository().getApplication(zone, id); Slime slime = new Slime(); Cursor clustersObject = slime.setObject().setObject("clusters"); for (Cluster cluster : application.clusters().values()) { Cursor clusterObject = clustersObject.setObject(cluster.id().value()); clusterObject.setString("type", cluster.type().name()); toSlime(cluster.min(), clusterObject.setObject("min")); toSlime(cluster.max(), clusterObject.setObject("max")); toSlime(cluster.current(), clusterObject.setObject("current")); if (cluster.target().isPresent() && ! cluster.target().get().justNumbers().equals(cluster.current().justNumbers())) toSlime(cluster.target().get(), clusterObject.setObject("target")); cluster.suggested().ifPresent(suggested -> toSlime(suggested, clusterObject.setObject("suggested"))); utilizationToSlime(cluster.utilization(), clusterObject.setObject("utilization")); scalingEventsToSlime(cluster.scalingEvents(), clusterObject.setArray("scalingEvents")); clusterObject.setString("autoscalingStatusCode", cluster.autoscalingStatusCode()); clusterObject.setString("autoscalingStatus", cluster.autoscalingStatus()); clusterObject.setLong("scalingDuration", cluster.scalingDuration().toMillis()); clusterObject.setDouble("maxQueryGrowthRate", cluster.maxQueryGrowthRate()); clusterObject.setDouble("currentQueryFractionOfMax", cluster.currentQueryFractionOfMax()); } return new SlimeJsonResponse(slime); } private static String valueOf(Node.State state) { return switch (state) { case failed: yield "failed"; case parked: yield "parked"; case dirty: yield "dirty"; case ready: yield "ready"; case active: yield "active"; case inactive: yield "inactive"; case reserved: yield "reserved"; case provisioned: yield "provisioned"; case breakfixed: yield "breakfixed"; case deprovisioned: yield "deprovisioned"; default: throw new IllegalArgumentException("Unexpected node state '" + state + "'."); }; } static String valueOf(Node.ServiceState state) { switch (state) { case expectedUp: return "expectedUp"; case allowedDown: return "allowedDown"; case permanentlyDown: return "permanentlyDown"; case unorchestrated: return "unorchestrated"; case unknown: break; } return "unknown"; } private static String valueOf(Node.ClusterType type) { return switch (type) { case admin: yield "admin"; case content: yield "content"; case container: yield "container"; case combined: yield "combined"; case unknown: throw new IllegalArgumentException("Unexpected node cluster type '" + type + "'."); }; } private static String valueOf(NodeResources.DiskSpeed diskSpeed) { return switch (diskSpeed) { case fast : yield "fast"; case slow : yield "slow"; case any : yield "any"; }; } private static String valueOf(NodeResources.StorageType storageType) { return switch (storageType) { case remote : yield "remote"; case local : yield "local"; case any : yield "any"; }; } private HttpResponse logs(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) { ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); DeploymentId deployment = new DeploymentId(application, zone); InputStream logStream = controller.serviceRegistry().configServer().getLogs(deployment, queryParameters); return new HttpResponse(200) { @Override public void render(OutputStream outputStream) throws IOException { try (logStream) { logStream.transferTo(outputStream); } } @Override public long maxPendingBytes() { return 1 << 26; } }; } private HttpResponse supportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) { DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); SupportAccess supportAccess = controller.supportAccess().forDeployment(deployment); return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(supportAccess, controller.clock().instant())); } private HttpResponse allowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Principal principal = requireUserPrincipal(request); Instant now = controller.clock().instant(); SupportAccess allowed = controller.supportAccess().allow(deployment, now.plus(7, ChronoUnit.DAYS), principal.getName()); return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(allowed, now)); } private HttpResponse disallowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Principal principal = requireUserPrincipal(request); SupportAccess disallowed = controller.supportAccess().disallow(deployment, principal.getName()); controller.applications().deploymentTrigger().reTriggerOrAddToQueue(deployment, "re-triggered to disallow support access, by " + request.getJDiscRequest().getUserPrincipal().getName()); return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(disallowed, controller.clock().instant())); } private HttpResponse metrics(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); DeploymentId deployment = new DeploymentId(application, zone); List<ProtonMetrics> protonMetrics = controller.serviceRegistry().configServer().getProtonMetrics(deployment); return buildResponseFromProtonMetrics(protonMetrics); } private HttpResponse scaling(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { var from = Optional.ofNullable(request.getProperty("from")) .map(Long::valueOf) .map(Instant::ofEpochSecond) .orElse(Instant.EPOCH); var until = Optional.ofNullable(request.getProperty("until")) .map(Long::valueOf) .map(Instant::ofEpochSecond) .orElse(Instant.now(controller.clock())); var application = ApplicationId.from(tenantName, applicationName, instanceName); var zone = requireZone(environment, region); var deployment = new DeploymentId(application, zone); var events = controller.serviceRegistry().resourceDatabase().scalingEvents(from, until, deployment); var slime = new Slime(); var root = slime.setObject(); for (var entry : events.entrySet()) { var serviceRoot = root.setArray(entry.getKey().clusterId().value()); scalingEventsToSlime(entry.getValue(), serviceRoot); } return new SlimeJsonResponse(slime); } private JsonResponse buildResponseFromProtonMetrics(List<ProtonMetrics> protonMetrics) { try { var jsonObject = jsonMapper.createObjectNode(); var jsonArray = jsonMapper.createArrayNode(); for (ProtonMetrics metrics : protonMetrics) { jsonArray.add(metrics.toJson()); } jsonObject.set("metrics", jsonArray); return new JsonResponse(200, jsonMapper.writerWithDefaultPrettyPrinter().writeValueAsString(jsonObject)); } catch (JsonProcessingException e) { log.log(Level.WARNING, "Unable to build JsonResponse with Proton data: " + e.getMessage(), e); return new JsonResponse(500, ""); } } private HttpResponse trigger(ApplicationId id, JobType type, HttpRequest request) { ZoneRegistry zones = controller.zoneRegistry(); type = switch (type.environment()) { case test -> JobType.systemTest(zones, zones.systemZone().getCloudName()); case staging -> JobType.stagingTest(zones, zones.systemZone().getCloudName()); default -> type; }; Inspector requestObject = toSlime(request.getData()).get(); boolean requireTests = ! requestObject.field("skipTests").asBool(); boolean reTrigger = requestObject.field("reTrigger").asBool(); boolean upgradeRevision = ! requestObject.field("skipRevision").asBool(); boolean upgradePlatform = ! requestObject.field("skipUpgrade").asBool(); String triggered = reTrigger ? controller.applications().deploymentTrigger() .reTrigger(id, type, "re-triggered by " + request.getJDiscRequest().getUserPrincipal().getName()).type().jobName() : controller.applications().deploymentTrigger() .forceTrigger(id, type, "triggered by " + request.getJDiscRequest().getUserPrincipal().getName(), requireTests, upgradeRevision, upgradePlatform) .stream().map(job -> job.type().jobName()).collect(joining(", ")); String suppressedUpgrades = ( ! upgradeRevision || ! upgradePlatform ? ", without " : "") + (upgradeRevision ? "" : "revision") + ( ! upgradeRevision && ! upgradePlatform ? " and " : "") + (upgradePlatform ? "" : "platform") + ( ! upgradeRevision || ! upgradePlatform ? " upgrade" : ""); return new MessageResponse(triggered.isEmpty() ? "Job " + type.jobName() + " for " + id + " not triggered" : "Triggered " + triggered + " for " + id + suppressedUpgrades); } private HttpResponse pause(ApplicationId id, JobType type) { Instant until = controller.clock().instant().plus(DeploymentTrigger.maxPause); controller.applications().deploymentTrigger().pauseJob(id, type, until); return new MessageResponse(type.jobName() + " for " + id + " paused for " + DeploymentTrigger.maxPause); } private HttpResponse resume(ApplicationId id, JobType type) { controller.applications().deploymentTrigger().resumeJob(id, type); return new MessageResponse(type.jobName() + " for " + id + " resumed"); } private void toSlime(Cursor object, Application application, HttpRequest request) { object.setString("tenant", application.id().tenant().value()); object.setString("application", application.id().application().value()); object.setString("deployments", withPath("/application/v4" + "/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value() + "/job/", request.getUri()).toString()); DeploymentStatus status = controller.jobController().deploymentStatus(application); application.revisions().last().ifPresent(version -> JobControllerApiHandlerHelper.toSlime(object.setObject("latestVersion"), version)); application.projectId().ifPresent(id -> object.setLong("projectId", id)); application.instances().values().stream().findFirst().ifPresent(instance -> { if ( ! instance.change().isEmpty()) toSlime(object.setObject("deploying"), instance.change(), application); if ( ! status.outstandingChange(instance.name()).isEmpty()) toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), application); }); application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion)); Cursor instancesArray = object.setArray("instances"); for (Instance instance : showOnlyProductionInstances(request) ? application.productionInstances().values() : application.instances().values()) toSlime(instancesArray.addObject(), status, instance, application.deploymentSpec(), request); application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString); Cursor metricsObject = object.setObject("metrics"); metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality()); metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality()); Cursor activity = object.setObject("activity"); application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value())); application.owner().ifPresent(owner -> object.setString("owner", owner.username())); application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value())); } private void toSlime(Cursor object, DeploymentStatus status, Instance instance, DeploymentSpec deploymentSpec, HttpRequest request) { object.setString("instance", instance.name().value()); if (deploymentSpec.instance(instance.name()).isPresent()) { Collection<JobStatus> jobStatus = status.instanceJobs(instance.name()).values(); if ( ! instance.change().isEmpty()) toSlime(object.setObject("deploying"), instance.change(), status.application()); if ( ! status.outstandingChange(instance.name()).isEmpty()) toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), status.application()); Cursor changeBlockers = object.setArray("changeBlockers"); deploymentSpec.instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> { Cursor changeBlockerObject = changeBlockers.addObject(); changeBlockerObject.setBool("versions", changeBlocker.blocksVersions()); changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions()); changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId()); Cursor days = changeBlockerObject.setArray("days"); changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong); Cursor hours = changeBlockerObject.setArray("hours"); changeBlocker.window().hours().forEach(hours::addLong); })); } addRotationId(object, instance); List<Deployment> deployments = deploymentSpec.instance(instance.name()) .map(spec -> sortedDeployments(instance.deployments().values(), spec)) .orElse(List.copyOf(instance.deployments().values())); Cursor deploymentsArray = object.setArray("deployments"); for (Deployment deployment : deployments) { Cursor deploymentObject = deploymentsArray.addObject(); if (deployment.zone().environment() == Environment.prod && ! instance.rotations().isEmpty()) toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject); if (recurseOverDeployments(request)) toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request); else { deploymentObject.setString("environment", deployment.zone().environment().value()); deploymentObject.setString("region", deployment.zone().region().value()); deploymentObject.setString("url", withPath(request.getUri().getPath() + "/instance/" + instance.name().value() + "/environment/" + deployment.zone().environment().value() + "/region/" + deployment.zone().region().value(), request.getUri()).toString()); } } } private void addRotationId(Cursor object, Instance instance) { instance.rotations().stream() .map(AssignedRotation::rotationId) .findFirst() .ifPresent(rotation -> object.setString("rotationId", rotation.asString())); } private void toSlime(Cursor object, Instance instance, DeploymentStatus status, HttpRequest request) { Application application = status.application(); object.setString("tenant", instance.id().tenant().value()); object.setString("application", instance.id().application().value()); object.setString("instance", instance.id().instance().value()); object.setString("deployments", withPath("/application/v4" + "/tenant/" + instance.id().tenant().value() + "/application/" + instance.id().application().value() + "/instance/" + instance.id().instance().value() + "/job/", request.getUri()).toString()); application.revisions().last().ifPresent(version -> { version.sourceUrl().ifPresent(url -> object.setString("sourceUrl", url)); version.commit().ifPresent(commit -> object.setString("commit", commit)); }); application.projectId().ifPresent(id -> object.setLong("projectId", id)); if (application.deploymentSpec().instance(instance.name()).isPresent()) { Collection<JobStatus> jobStatus = status.instanceJobs(instance.name()).values(); if ( ! instance.change().isEmpty()) toSlime(object.setObject("deploying"), instance.change(), application); if ( ! status.outstandingChange(instance.name()).isEmpty()) toSlime(object.setObject("outstandingChange"), status.outstandingChange(instance.name()), application); Cursor changeBlockers = object.setArray("changeBlockers"); application.deploymentSpec().instance(instance.name()).ifPresent(spec -> spec.changeBlocker().forEach(changeBlocker -> { Cursor changeBlockerObject = changeBlockers.addObject(); changeBlockerObject.setBool("versions", changeBlocker.blocksVersions()); changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions()); changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId()); Cursor days = changeBlockerObject.setArray("days"); changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong); Cursor hours = changeBlockerObject.setArray("hours"); changeBlocker.window().hours().forEach(hours::addLong); })); } application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion)); addRotationId(object, instance); List<Deployment> deployments = application.deploymentSpec().instance(instance.name()) .map(spec -> sortedDeployments(instance.deployments().values(), spec)) .orElse(List.copyOf(instance.deployments().values())); Cursor instancesArray = object.setArray("instances"); for (Deployment deployment : deployments) { Cursor deploymentObject = instancesArray.addObject(); if (deployment.zone().environment() == Environment.prod) { if (instance.rotations().size() == 1) { toSlime(instance.rotationStatus().of(instance.rotations().get(0).rotationId(), deployment), deploymentObject); } if ( ! recurseOverDeployments(request) && ! instance.rotations().isEmpty()) { toSlime(instance.rotations(), instance.rotationStatus(), deployment, deploymentObject); } } if (recurseOverDeployments(request)) toSlime(deploymentObject, new DeploymentId(instance.id(), deployment.zone()), deployment, request); else { deploymentObject.setString("environment", deployment.zone().environment().value()); deploymentObject.setString("region", deployment.zone().region().value()); deploymentObject.setString("instance", instance.id().instance().value()); deploymentObject.setString("url", withPath(request.getUri().getPath() + "/environment/" + deployment.zone().environment().value() + "/region/" + deployment.zone().region().value(), request.getUri()).toString()); } } Stream.concat(status.jobSteps().keySet().stream() .filter(job -> job.application().instance().equals(instance.name())) .filter(job -> job.type().isProduction() && job.type().isDeployment()), controller.jobController().active(instance.id()).stream() .map(run -> run.id().job()) .filter(job -> job.type().environment().isManuallyDeployed())) .map(job -> job.type().zone()) .filter(zone -> ! instance.deployments().containsKey(zone)) .forEach(zone -> { Cursor deploymentObject = instancesArray.addObject(); deploymentObject.setString("environment", zone.environment().value()); deploymentObject.setString("region", zone.region().value()); }); application.deployKeys().stream().findFirst().ifPresent(key -> object.setString("pemDeployKey", KeyUtils.toPem(key))); application.deployKeys().stream().map(KeyUtils::toPem).forEach(object.setArray("pemDeployKeys")::addString); Cursor metricsObject = object.setObject("metrics"); metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality()); metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality()); Cursor activity = object.setObject("activity"); application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value())); application.owner().ifPresent(owner -> object.setString("owner", owner.username())); application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value())); } private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); Instance instance = controller.applications().getInstance(id) .orElseThrow(() -> new NotExistsException(id + " not found")); DeploymentId deploymentId = new DeploymentId(instance.id(), requireZone(environment, region)); Deployment deployment = instance.deployments().get(deploymentId.zoneId()); if (deployment == null) throw new NotExistsException(instance + " is not deployed in " + deploymentId.zoneId()); Slime slime = new Slime(); toSlime(slime.setObject(), deploymentId, deployment, request); return new SlimeJsonResponse(slime); } private void toSlime(Cursor object, Change change, Application application) { change.platform().ifPresent(version -> object.setString("version", version.toString())); change.revision().ifPresent(revision -> JobControllerApiHandlerHelper.toSlime(object.setObject("revision"), application.revisions().get(revision))); } private void toSlime(Endpoint endpoint, Cursor object) { object.setString("cluster", endpoint.cluster().value()); object.setBool("tls", endpoint.tls()); object.setString("url", endpoint.url().toString()); object.setString("scope", endpointScopeString(endpoint.scope())); object.setString("routingMethod", routingMethodString(endpoint.routingMethod())); object.setBool("legacy", endpoint.legacy()); } private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) { response.setString("tenant", deploymentId.applicationId().tenant().value()); response.setString("application", deploymentId.applicationId().application().value()); response.setString("instance", deploymentId.applicationId().instance().value()); response.setString("environment", deploymentId.zoneId().environment().value()); response.setString("region", deploymentId.zoneId().region().value()); var application = controller.applications().requireApplication(TenantAndApplicationId.from(deploymentId.applicationId())); boolean legacyEndpoints = request.getBooleanProperty("includeLegacyEndpoints"); var endpointArray = response.setArray("endpoints"); EndpointList zoneEndpoints = controller.routing().readEndpointsOf(deploymentId) .scope(Endpoint.Scope.zone); if (!legacyEndpoints) { zoneEndpoints = zoneEndpoints.not().legacy().direct(); } for (var endpoint : zoneEndpoints) { toSlime(endpoint, endpointArray.addObject()); } EndpointList declaredEndpoints = controller.routing().declaredEndpointsOf(application) .targets(deploymentId); if (!legacyEndpoints) { declaredEndpoints = declaredEndpoints.not().legacy().direct(); } for (var endpoint : declaredEndpoints) { toSlime(endpoint, endpointArray.addObject()); } response.setString("clusters", withPath(toPath(deploymentId) + "/clusters", request.getUri()).toString()); response.setString("nodes", withPathAndQuery("/zone/v2/" + deploymentId.zoneId().environment() + "/" + deploymentId.zoneId().region() + "/nodes/v2/node/", "recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString()); response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString()); response.setString("version", deployment.version().toFullString()); response.setString("revision", application.revisions().get(deployment.revision()).stringId()); response.setLong("build", deployment.revision().number()); Instant lastDeploymentStart = controller.jobController().lastDeploymentStart(deploymentId.applicationId(), deployment); response.setLong("deployTimeEpochMs", lastDeploymentStart.toEpochMilli()); controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zoneId()) .ifPresent(deploymentTimeToLive -> response.setLong("expiryTimeEpochMs", lastDeploymentStart.plus(deploymentTimeToLive).toEpochMilli())); application.projectId().ifPresent(i -> response.setString("screwdriverId", String.valueOf(i))); var instance = application.instances().get(deploymentId.applicationId().instance()); if (instance != null) { if (!instance.rotations().isEmpty() && deployment.zone().environment() == Environment.prod) toSlime(instance.rotations(), instance.rotationStatus(), deployment, response); if (!deployment.zone().environment().isManuallyDeployed()) { DeploymentStatus status = controller.jobController().deploymentStatus(application); JobId jobId = new JobId(instance.id(), JobType.deploymentTo(deployment.zone())); Optional.ofNullable(status.jobSteps().get(jobId)) .ifPresent(stepStatus -> { JobControllerApiHandlerHelper.toSlime(response.setObject("applicationVersion"), application.revisions().get(deployment.revision())); if ( ! status.jobsToRun().containsKey(stepStatus.job().get())) response.setString("status", "complete"); else if (stepStatus.readyAt(instance.change()).map(controller.clock().instant()::isBefore).orElse(true)) response.setString("status", "pending"); else response.setString("status", "running"); }); } else { var deploymentRun = controller.jobController().last(deploymentId.applicationId(), JobType.deploymentTo(deploymentId.zoneId())); deploymentRun.ifPresent(run -> { response.setString("status", run.hasEnded() ? "complete" : "running"); }); } } response.setDouble("quota", deployment.quota().rate()); deployment.cost().ifPresent(cost -> response.setDouble("cost", cost)); controller.archiveBucketDb().archiveUriFor(deploymentId.zoneId(), deploymentId.applicationId().tenant(), false) .ifPresent(archiveUri -> response.setString("archiveUri", archiveUri.toString())); Cursor activity = response.setObject("activity"); deployment.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); deployment.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); deployment.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); deployment.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); DeploymentMetrics metrics = deployment.metrics(); Cursor metricsObject = response.setObject("metrics"); metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond()); metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond()); metricsObject.setDouble("documentCount", metrics.documentCount()); metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis()); metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis()); metrics.instant().ifPresent(instant -> metricsObject.setLong("lastUpdated", instant.toEpochMilli())); } private void toSlime(RotationState state, Cursor object) { Cursor bcpStatus = object.setObject("bcpStatus"); bcpStatus.setString("rotationStatus", rotationStateString(state)); } private void toSlime(List<AssignedRotation> rotations, RotationStatus status, Deployment deployment, Cursor object) { var array = object.setArray("endpointStatus"); for (var rotation : rotations) { var statusObject = array.addObject(); var targets = status.of(rotation.rotationId()); statusObject.setString("endpointId", rotation.endpointId().id()); statusObject.setString("rotationId", rotation.rotationId().asString()); statusObject.setString("clusterId", rotation.clusterId().value()); statusObject.setString("status", rotationStateString(status.of(rotation.rotationId(), deployment))); statusObject.setLong("lastUpdated", targets.lastUpdated().toEpochMilli()); } } private URI monitoringSystemUri(DeploymentId deploymentId) { return controller.zoneRegistry().getMonitoringSystemUri(deploymentId); } private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) { Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName)); ZoneId zone = requireZone(environment, region); Deployment deployment = instance.deployments().get(zone); if (deployment == null) { throw new NotExistsException(instance + " has no deployment in " + zone); } DeploymentId deploymentId = new DeploymentId(instance.id(), zone); RoutingStatus.Agent agent = isOperator(request) ? RoutingStatus.Agent.operator : RoutingStatus.Agent.tenant; RoutingStatus.Value status = inService ? RoutingStatus.Value.in : RoutingStatus.Value.out; controller.routing().of(deploymentId).setRoutingStatus(status, agent); return new MessageResponse(Text.format("Successfully set %s in %s %s service", instance.id().toShortString(), zone, inService ? "in" : "out of")); } private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Slime slime = new Slime(); Cursor array = slime.setObject().setArray("globalrotationoverride"); Optional<Endpoint> primaryEndpoint = controller.routing().readDeclaredEndpointsOf(deploymentId.applicationId()) .requiresRotation() .primary(); if (primaryEndpoint.isPresent()) { DeploymentRoutingContext context = controller.routing().of(deploymentId); RoutingStatus status = context.routingStatus(); array.addString(primaryEndpoint.get().upstreamName(deploymentId)); Cursor statusObject = array.addObject(); statusObject.setString("status", status.value().name()); statusObject.setString("reason", ""); statusObject.setString("agent", status.agent().name()); statusObject.setLong("timestamp", status.changedAt().getEpochSecond()); } return new SlimeJsonResponse(slime); } private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region, Optional<String> endpointId) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); Instance instance = controller.applications().requireInstance(applicationId); ZoneId zone = requireZone(environment, region); RotationId rotation = findRotationId(instance, endpointId); Deployment deployment = instance.deployments().get(zone); if (deployment == null) { throw new NotExistsException(instance + " has no deployment in " + zone); } Slime slime = new Slime(); Cursor response = slime.setObject(); toSlime(instance.rotationStatus().of(rotation, deployment), response); return new SlimeJsonResponse(slime); } private HttpResponse deploying(String tenantName, String applicationName, String instanceName, HttpRequest request) { Instance instance = controller.applications().requireInstance(ApplicationId.from(tenantName, applicationName, instanceName)); Slime slime = new Slime(); Cursor root = slime.setObject(); if ( ! instance.change().isEmpty()) { instance.change().platform().ifPresent(version -> root.setString("platform", version.toString())); instance.change().revision().ifPresent(revision -> root.setString("application", revision.toString())); root.setBool("pinned", instance.change().isPinned()); } return new SlimeJsonResponse(slime); } private HttpResponse suspended(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); boolean suspended = controller.applications().isSuspended(deploymentId); Slime slime = new Slime(); Cursor response = slime.setObject(); response.setBool("suspended", suspended); return new SlimeJsonResponse(slime); } private HttpResponse status(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String host, HttpURL.Path restPath, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); return controller.serviceRegistry().configServer().getServiceNodePage(deploymentId, serviceName, DomainName.of(host), HttpURL.Path.parse("/status").append(restPath), Query.empty().add(request.getJDiscRequest().parameters())); } private HttpResponse orchestrator(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); return controller.serviceRegistry().configServer().getServiceNodes(deploymentId); } private HttpResponse stateV1(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String host, HttpURL.Path rest, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); Query query = Query.empty().add(request.getJDiscRequest().parameters()); query = query.set("forwarded-url", HttpURL.from(request.getUri()).withQuery(Query.empty()).asURI().toString()); return controller.serviceRegistry().configServer().getServiceNodePage( deploymentId, serviceName, DomainName.of(host), HttpURL.Path.parse("/state/v1").append(rest), query); } private HttpResponse content(String tenantName, String applicationName, String instanceName, String environment, String region, HttpURL.Path restPath, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); return controller.serviceRegistry().configServer().getApplicationPackageContent(deploymentId, restPath, request.getUri()); } private HttpResponse updateTenant(String tenantName, HttpRequest request) { getTenantOrThrow(tenantName); TenantName tenant = TenantName.from(tenantName); Inspector requestObject = toSlime(request.getData()).get(); controller.tenants().update(accessControlRequests.specification(tenant, requestObject), accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest())); return tenant(controller.tenants().require(TenantName.from(tenantName)), request); } private HttpResponse createTenant(String tenantName, HttpRequest request) { TenantName tenant = TenantName.from(tenantName); Inspector requestObject = toSlime(request.getData()).get(); controller.tenants().create(accessControlRequests.specification(tenant, requestObject), accessControlRequests.credentials(tenant, requestObject, request.getJDiscRequest())); if (controller.system().isPublic()) { User user = getAttribute(request, User.ATTRIBUTE_NAME, User.class); TenantInfo info = controller.tenants().require(tenant, CloudTenant.class) .info() .withContact(TenantContact.from(user.name(), user.email())); controller.tenants().lockOrThrow(tenant, LockedTenant.Cloud.class, lockedTenant -> { lockedTenant = lockedTenant.withInfo(info); controller.tenants().store(lockedTenant); }); } return tenant(controller.tenants().require(TenantName.from(tenantName)), request); } private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) { Inspector requestObject = toSlime(request.getData()).get(); TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); Credentials credentials = accessControlRequests.credentials(id.tenant(), requestObject, request.getJDiscRequest()); Application application = controller.applications().createApplication(id, credentials); Slime slime = new Slime(); toSlime(id, slime.setObject(), request); return new SlimeJsonResponse(slime); } private HttpResponse createInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) { TenantAndApplicationId applicationId = TenantAndApplicationId.from(tenantName, applicationName); if (controller.applications().getApplication(applicationId).isEmpty()) createApplication(tenantName, applicationName, request); controller.applications().createInstance(applicationId.instance(instanceName)); Slime slime = new Slime(); toSlime(applicationId.instance(instanceName), slime.setObject(), request); return new SlimeJsonResponse(slime); } /** Trigger deployment of the given Vespa version if a valid one is given, e.g., "7.8.9". */ private HttpResponse deployPlatform(String tenantName, String applicationName, String instanceName, boolean pin, HttpRequest request) { String versionString = readToString(request.getData()); ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); StringBuilder response = new StringBuilder(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { Version version = Version.fromString(versionString); VersionStatus versionStatus = controller.readVersionStatus(); if (version.equals(Version.emptyVersion)) version = controller.systemVersion(versionStatus); if (!versionStatus.isActive(version)) throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " + "Version is not active in this system. " + "Active versions: " + versionStatus.versions() .stream() .map(VespaVersion::versionNumber) .map(Version::toString) .collect(joining(", "))); Change change = Change.of(version); if (pin) change = change.withPin(); controller.applications().deploymentTrigger().forceChange(id, change, isOperator(request)); response.append("Triggered ").append(change).append(" for ").append(id); }); return new MessageResponse(response.toString()); } /** Trigger deployment to the last known application package for the given application. */ private HttpResponse deployApplication(String tenantName, String applicationName, String instanceName, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); Inspector buildField = toSlime(request.getData()).get().field("build"); long build = buildField.valid() ? buildField.asLong() : -1; StringBuilder response = new StringBuilder(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { RevisionId revision = build == -1 ? application.get().revisions().last().get().id() : getRevision(application.get(), build); Change change = Change.of(revision); controller.applications().deploymentTrigger().forceChange(id, change, isOperator(request)); response.append("Triggered ").append(change).append(" for ").append(id); }); return new MessageResponse(response.toString()); } private RevisionId getRevision(Application application, long build) { return application.revisions().withPackage().stream() .map(ApplicationVersion::id) .filter(version -> version.number() == build) .findFirst() .filter(version -> controller.applications().applicationStore().hasBuild(application.id().tenant(), application.id().application(), build)) .orElseThrow(() -> new IllegalArgumentException("Build number '" + build + "' was not found")); } private HttpResponse cancelBuild(String tenantName, String applicationName, String build){ TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); RevisionId revision = RevisionId.forProduction(Long.parseLong(build)); controller.applications().lockApplicationOrThrow(id, application -> { controller.applications().store(application.withRevisions(revisions -> revisions.with(revisions.get(revision).skipped()))); for (Instance instance : application.get().instances().values()) if (instance.change().revision().equals(Optional.of(revision))) controller.applications().deploymentTrigger().cancelChange(instance.id(), ChangesToCancel.APPLICATION); }); return new MessageResponse("Marked build '" + build + "' as non-deployable"); } /** Cancel ongoing change for given application, e.g., everything with {"cancel":"all"} */ private HttpResponse cancelDeploy(String tenantName, String applicationName, String instanceName, String choice) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); StringBuilder response = new StringBuilder(); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { Change change = application.get().require(id.instance()).change(); if (change.isEmpty()) { response.append("No deployment in progress for ").append(id).append(" at this time"); return; } ChangesToCancel cancel = ChangesToCancel.valueOf(choice.toUpperCase()); controller.applications().deploymentTrigger().cancelChange(id, cancel); response.append("Changed deployment from '").append(change).append("' to '").append(controller.applications().requireInstance(id).change()).append("' for ").append(id); }); return new MessageResponse(response.toString()); } /** Schedule reindexing of an application, or a subset of clusters, possibly on a subset of documents. */ private HttpResponse reindex(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); List<String> clusterNames = Optional.ofNullable(request.getProperty("clusterId")).stream() .flatMap(clusters -> Stream.of(clusters.split(","))) .filter(cluster -> ! cluster.isBlank()) .toList(); List<String> documentTypes = Optional.ofNullable(request.getProperty("documentType")).stream() .flatMap(types -> Stream.of(types.split(","))) .filter(type -> ! type.isBlank()) .toList(); Double speed = request.hasProperty("speed") ? Double.parseDouble(request.getProperty("speed")) : null; boolean indexedOnly = request.getBooleanProperty("indexedOnly"); controller.applications().reindex(id, zone, clusterNames, documentTypes, indexedOnly, speed); return new MessageResponse("Requested reindexing of " + id + " in " + zone + (clusterNames.isEmpty() ? "" : ", on clusters " + String.join(", ", clusterNames)) + (documentTypes.isEmpty() ? "" : ", for types " + String.join(", ", documentTypes)) + (indexedOnly ? ", for indexed types" : "") + (speed != null ? ", with speed " + speed : "")); } /** Gets reindexing status of an application in a zone. */ private HttpResponse getReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); ApplicationReindexing reindexing = controller.applications().applicationReindexing(id, zone); Slime slime = new Slime(); Cursor root = slime.setObject(); root.setBool("enabled", reindexing.enabled()); Cursor clustersArray = root.setArray("clusters"); reindexing.clusters().entrySet().stream().sorted(comparingByKey()) .forEach(cluster -> { Cursor clusterObject = clustersArray.addObject(); clusterObject.setString("name", cluster.getKey()); Cursor pendingArray = clusterObject.setArray("pending"); cluster.getValue().pending().entrySet().stream().sorted(comparingByKey()) .forEach(pending -> { Cursor pendingObject = pendingArray.addObject(); pendingObject.setString("type", pending.getKey()); pendingObject.setLong("requiredGeneration", pending.getValue()); }); Cursor readyArray = clusterObject.setArray("ready"); cluster.getValue().ready().entrySet().stream().sorted(comparingByKey()) .forEach(ready -> { Cursor readyObject = readyArray.addObject(); readyObject.setString("type", ready.getKey()); setStatus(readyObject, ready.getValue()); }); }); return new SlimeJsonResponse(slime); } void setStatus(Cursor statusObject, ApplicationReindexing.Status status) { status.readyAt().ifPresent(readyAt -> statusObject.setLong("readyAtMillis", readyAt.toEpochMilli())); status.startedAt().ifPresent(startedAt -> statusObject.setLong("startedAtMillis", startedAt.toEpochMilli())); status.endedAt().ifPresent(endedAt -> statusObject.setLong("endedAtMillis", endedAt.toEpochMilli())); status.state().map(ApplicationApiHandler::toString).ifPresent(state -> statusObject.setString("state", state)); status.message().ifPresent(message -> statusObject.setString("message", message)); status.progress().ifPresent(progress -> statusObject.setDouble("progress", progress)); status.speed().ifPresent(speed -> statusObject.setDouble("speed", speed)); } private static String toString(ApplicationReindexing.State state) { return switch (state) { case PENDING: yield "pending"; case RUNNING: yield "running"; case FAILED: yield "failed"; case SUCCESSFUL: yield "successful"; }; } /** Enables reindexing of an application in a zone. */ private HttpResponse enableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); controller.applications().enableReindexing(id, zone); return new MessageResponse("Enabled reindexing of " + id + " in " + zone); } /** Disables reindexing of an application in a zone. */ private HttpResponse disableReindexing(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); controller.applications().disableReindexing(id, zone); return new MessageResponse("Disabled reindexing of " + id + " in " + zone); } /** Schedule restart of deployment, or specific host in a deployment */ private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); RestartFilter restartFilter = new RestartFilter() .withHostName(Optional.ofNullable(request.getProperty("hostname")).map(HostName::of)) .withClusterType(Optional.ofNullable(request.getProperty("clusterType")).map(ClusterSpec.Type::from)) .withClusterId(Optional.ofNullable(request.getProperty("clusterId")).map(ClusterSpec.Id::from)); controller.applications().restart(deploymentId, restartFilter); return new MessageResponse("Requested restart of " + deploymentId); } /** Set suspension status of the given deployment. */ private HttpResponse suspend(String tenantName, String applicationName, String instanceName, String environment, String region, boolean suspend) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); controller.applications().setSuspension(deploymentId, suspend); return new MessageResponse((suspend ? "Suspended" : "Resumed") + " orchestration of " + deploymentId); } private HttpResponse jobDeploy(ApplicationId id, JobType type, HttpRequest request) { if ( ! type.environment().isManuallyDeployed() && ! isOperator(request)) throw new IllegalArgumentException("Direct deployments are only allowed to manually deployed environments."); Map<String, byte[]> dataParts = parseDataParts(request); if ( ! dataParts.containsKey("applicationZip")) throw new IllegalArgumentException("Missing required form part 'applicationZip'"); ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP)); controller.applications().verifyApplicationIdentityConfiguration(id.tenant(), Optional.of(id.instance()), Optional.of(type.zone()), applicationPackage, Optional.of(requireUserPrincipal(request))); Optional<Version> version = Optional.ofNullable(dataParts.get("deployOptions")) .map(json -> SlimeUtils.jsonToSlime(json).get()) .flatMap(options -> optional("vespaVersion", options)) .map(Version::fromString); ensureApplicationExists(TenantAndApplicationId.from(id), request); boolean dryRun = Optional.ofNullable(dataParts.get("deployOptions")) .map(json -> SlimeUtils.jsonToSlime(json).get()) .flatMap(options -> optional("dryRun", options)) .map(Boolean::valueOf) .orElse(false); controller.jobController().deploy(id, type, version, applicationPackage, dryRun, isOperator(request)); RunId runId = controller.jobController().last(id, type).get().id(); Slime slime = new Slime(); Cursor rootObject = slime.setObject(); rootObject.setString("message", "Deployment started in " + runId + ". This may take about 15 minutes the first time."); rootObject.setLong("run", runId.number()); return new SlimeJsonResponse(slime); } private HttpResponse deploy(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = requireZone(environment, region); Map<String, byte[]> dataParts = parseDataParts(request); if ( ! dataParts.containsKey("deployOptions")) return ErrorResponse.badRequest("Missing required form part 'deployOptions'"); Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get(); Optional<SystemApplication> systemApplication = SystemApplication.matching(applicationId); if (systemApplication.isEmpty() || !systemApplication.get().hasApplicationPackage()) { return ErrorResponse.badRequest("Deployment of " + applicationId + " is not supported through this API"); } String vespaVersion = deployOptions.field("vespaVersion").asString(); if (!vespaVersion.isEmpty() && !vespaVersion.equals("null")) { return ErrorResponse.badRequest("Specifying version for " + applicationId + " is not permitted"); } VersionStatus versionStatus = controller.readVersionStatus(); if (versionStatus.isUpgrading()) { throw new IllegalArgumentException("Deployment of system applications during a system upgrade is not allowed"); } Optional<VespaVersion> systemVersion = versionStatus.systemVersion(); if (systemVersion.isEmpty()) { throw new IllegalArgumentException("Deployment of system applications is not permitted until system version is determined"); } ActivateResult result = controller.applications() .deploySystemApplicationPackage(systemApplication.get(), zone, systemVersion.get().versionNumber()); return new SlimeJsonResponse(toSlime(result)); } private HttpResponse deleteTenant(String tenantName, HttpRequest request) { boolean forget = request.getBooleanProperty("forget"); if (forget && !isOperator(request)) return ErrorResponse.forbidden("Only operators can forget a tenant"); controller.tenants().delete(TenantName.from(tenantName), Optional.of(accessControlRequests.credentials(TenantName.from(tenantName), toSlime(request.getData()).get(), request.getJDiscRequest())), forget); return new MessageResponse("Deleted tenant " + tenantName); } private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) { TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest()); controller.applications().deleteApplication(id, credentials); return new MessageResponse("Deleted application " + id); } private HttpResponse deleteInstance(String tenantName, String applicationName, String instanceName, HttpRequest request) { TenantAndApplicationId id = TenantAndApplicationId.from(tenantName, applicationName); controller.applications().deleteInstance(id.instance(instanceName)); if (controller.applications().requireApplication(id).instances().isEmpty()) { Credentials credentials = accessControlRequests.credentials(id.tenant(), toSlime(request.getData()).get(), request.getJDiscRequest()); controller.applications().deleteApplication(id, credentials); } return new MessageResponse("Deleted instance " + id.instance(instanceName).toFullString()); } private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId id = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region)); controller.applications().deactivate(id.applicationId(), id.zoneId()); controller.jobController().last(id.applicationId(), JobType.deploymentTo(id.zoneId())) .filter(run -> ! run.hasEnded()) .ifPresent(last -> controller.jobController().abort(last.id(), "deployment deactivated by " + request.getJDiscRequest().getUserPrincipal().getName())); return new MessageResponse("Deactivated " + id); } /** Returns test config for indicated job, with production deployments of the default instance if the given is not in deployment spec. */ private HttpResponse testConfig(ApplicationId id, JobType type) { Application application = controller.applications().requireApplication(TenantAndApplicationId.from(id)); ApplicationId prodInstanceId = application.deploymentSpec().instance(id.instance()).isPresent() ? id : TenantAndApplicationId.from(id).defaultInstance(); HashSet<DeploymentId> deployments = controller.applications() .getInstance(prodInstanceId).stream() .flatMap(instance -> instance.productionDeployments().keySet().stream()) .map(zone -> new DeploymentId(prodInstanceId, zone)) .collect(Collectors.toCollection(HashSet::new)); ApplicationId toTest = type.isProduction() ? prodInstanceId : id; if ( ! type.isProduction()) deployments.add(new DeploymentId(toTest, type.zone())); Deployment deployment = application.require(toTest.instance()).deployments().get(type.zone()); if (deployment == null) throw new NotExistsException(toTest + " is not deployed in " + type.zone()); return new SlimeJsonResponse(testConfigSerializer.configSlime(id, type, false, deployment.version(), deployment.revision(), deployment.at(), controller.routing().readTestRunnerEndpointsOf(deployments), controller.applications().reachableContentClustersByZone(deployments))); } private HttpResponse requestServiceDump(String tenant, String application, String instance, String environment, String region, String hostname, HttpRequest request) { NodeRepository nodeRepository = controller.serviceRegistry().configServer().nodeRepository(); ZoneId zone = requireZone(environment, region); Slime report = getReport(nodeRepository, zone, tenant, application, instance, hostname).orElse(null); if (report != null) { Cursor cursor = report.get(); boolean force = request.getBooleanProperty("force"); if (!force && cursor.field("failedAt").asLong() == 0 && cursor.field("completedAt").asLong() == 0) { throw new IllegalArgumentException("Service dump already in progress for " + cursor.field("configId").asString()); } } Slime requestPayload; try { requestPayload = SlimeUtils.jsonToSlimeOrThrow(request.getData().readAllBytes()); } catch (Exception e) { throw new IllegalArgumentException("Missing or invalid JSON in request content", e); } Cursor requestPayloadCursor = requestPayload.get(); String configId = requestPayloadCursor.field("configId").asString(); long expiresAt = requestPayloadCursor.field("expiresAt").asLong(); if (configId.isEmpty()) { throw new IllegalArgumentException("Missing configId"); } Cursor artifactsCursor = requestPayloadCursor.field("artifacts"); int artifactEntries = artifactsCursor.entries(); if (artifactEntries == 0) { throw new IllegalArgumentException("Missing or empty 'artifacts'"); } Slime dumpRequest = new Slime(); Cursor dumpRequestCursor = dumpRequest.setObject(); dumpRequestCursor.setLong("createdMillis", controller.clock().millis()); dumpRequestCursor.setString("configId", configId); Cursor dumpRequestArtifactsCursor = dumpRequestCursor.setArray("artifacts"); for (int i = 0; i < artifactEntries; i++) { dumpRequestArtifactsCursor.addString(artifactsCursor.entry(i).asString()); } if (expiresAt > 0) { dumpRequestCursor.setLong("expiresAt", expiresAt); } Cursor dumpOptionsCursor = requestPayloadCursor.field("dumpOptions"); if (dumpOptionsCursor.children() > 0) { SlimeUtils.copyObject(dumpOptionsCursor, dumpRequestCursor.setObject("dumpOptions")); } var reportsUpdate = Map.of("serviceDump", new String(uncheck(() -> SlimeUtils.toJsonBytes(dumpRequest)))); nodeRepository.updateReports(zone, hostname, reportsUpdate); boolean wait = request.getBooleanProperty("wait"); if (!wait) return new MessageResponse("Request created"); return waitForServiceDumpResult(nodeRepository, zone, tenant, application, instance, hostname); } private HttpResponse getServiceDump(String tenant, String application, String instance, String environment, String region, String hostname, HttpRequest request) { NodeRepository nodeRepository = controller.serviceRegistry().configServer().nodeRepository(); ZoneId zone = requireZone(environment, region); Slime report = getReport(nodeRepository, zone, tenant, application, instance, hostname) .orElseThrow(() -> new NotExistsException("No service dump for node " + hostname)); return new SlimeJsonResponse(report); } private HttpResponse waitForServiceDumpResult(NodeRepository nodeRepository, ZoneId zone, String tenant, String application, String instance, String hostname) { int pollInterval = 2; Slime report; while (true) { report = getReport(nodeRepository, zone, tenant, application, instance, hostname).get(); Cursor cursor = report.get(); if (cursor.field("completedAt").asLong() > 0 || cursor.field("failedAt").asLong() > 0) { break; } final Slime copyForLambda = report; log.fine(() -> uncheck(() -> new String(SlimeUtils.toJsonBytes(copyForLambda)))); log.fine("Sleeping " + pollInterval + " seconds before checking report status again"); controller.sleeper().sleep(Duration.ofSeconds(pollInterval)); } return new SlimeJsonResponse(report); } private Optional<Slime> getReport(NodeRepository nodeRepository, ZoneId zone, String tenant, String application, String instance, String hostname) { Node node; try { node = nodeRepository.getNode(zone, hostname); } catch (IllegalArgumentException e) { throw new NotExistsException(hostname); } ApplicationId app = ApplicationId.from(tenant, application, instance); ApplicationId owner = node.owner().orElseThrow(() -> new IllegalArgumentException("Node has no owner")); if (!app.equals(owner)) { throw new IllegalArgumentException("Node is not owned by " + app.toFullString()); } String json = node.reports().get("serviceDump"); if (json == null) return Optional.empty(); return Optional.of(SlimeUtils.jsonToSlimeOrThrow(json)); } private static SourceRevision toSourceRevision(Inspector object) { if (!object.field("repository").valid() || !object.field("branch").valid() || !object.field("commit").valid()) { throw new IllegalArgumentException("Must specify \"repository\", \"branch\", and \"commit\"."); } return new SourceRevision(object.field("repository").asString(), object.field("branch").asString(), object.field("commit").asString()); } private Tenant getTenantOrThrow(String tenantName) { return controller.tenants().get(tenantName) .orElseThrow(() -> new NotExistsException(new TenantId(tenantName))); } private void toSlime(Cursor object, Tenant tenant, List<Application> applications, HttpRequest request) { object.setString("tenant", tenant.name().value()); object.setString("type", tenantType(tenant)); switch (tenant.type()) { case athenz: AthenzTenant athenzTenant = (AthenzTenant) tenant; object.setString("athensDomain", athenzTenant.domain().getName()); object.setString("property", athenzTenant.property().id()); athenzTenant.propertyId().ifPresent(id -> object.setString("propertyId", id.toString())); athenzTenant.contact().ifPresent(c -> { object.setString("propertyUrl", c.propertyUrl().toString()); object.setString("contactsUrl", c.url().toString()); object.setString("issueCreationUrl", c.issueTrackerUrl().toString()); Cursor contactsArray = object.setArray("contacts"); c.persons().forEach(persons -> { Cursor personArray = contactsArray.addArray(); persons.forEach(personArray::addString); }); }); break; case cloud: { CloudTenant cloudTenant = (CloudTenant) tenant; cloudTenant.creator().ifPresent(creator -> object.setString("creator", creator.getName())); Cursor pemDeveloperKeysArray = object.setArray("pemDeveloperKeys"); cloudTenant.developerKeys().forEach((key, user) -> { Cursor keyObject = pemDeveloperKeysArray.addObject(); keyObject.setString("key", KeyUtils.toPem(key)); keyObject.setString("user", user.getName()); }); toSlime(object, cloudTenant.tenantSecretStores()); toSlime(object.setObject("integrations").setObject("aws"), controller.serviceRegistry().roleService().getTenantRole(tenant.name()), cloudTenant.tenantSecretStores()); try { var usedQuota = applications.stream() .map(Application::quotaUsage) .reduce(QuotaUsage.none, QuotaUsage::add); toSlime(object.setObject("quota"), usedQuota); } catch (Exception e) { log.warning(String.format("Failed to get quota for tenant %s: %s", tenant.name(), Exceptions.toMessageString(e))); } cloudTenant.archiveAccess().awsRole().ifPresent(role -> object.setString("archiveAccessRole", role)); toSlime(cloudTenant.archiveAccess(), object.setObject("archiveAccess")); break; } case deleted: break; default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'."); } Cursor applicationArray = object.setArray("applications"); for (Application application : applications) { DeploymentStatus status = null; Collection<Instance> instances = showOnlyProductionInstances(request) ? application.productionInstances().values() : application.instances().values(); if (instances.isEmpty() && !showOnlyActiveInstances(request)) toSlime(application.id(), applicationArray.addObject(), request); for (Instance instance : instances) { if (showOnlyActiveInstances(request) && instance.deployments().isEmpty()) continue; if (recurseOverApplications(request)) { if (status == null) status = controller.jobController().deploymentStatus(application); toSlime(applicationArray.addObject(), instance, status, request); } else { toSlime(instance.id(), applicationArray.addObject(), request); } } } tenantMetaDataToSlime(tenant, applications, object.setObject("metaData")); } private void toSlime(ArchiveAccess archiveAccess, Cursor object) { archiveAccess.awsRole().ifPresent(role -> object.setString("awsRole", role)); archiveAccess.gcpMember().ifPresent(member -> object.setString("gcpMember", member)); } private void toSlime(Cursor object, QuotaUsage usage) { object.setDouble("budgetUsed", usage.rate()); } private void toSlime(ClusterResources resources, Cursor object) { object.setLong("nodes", resources.nodes()); object.setLong("groups", resources.groups()); toSlime(resources.nodeResources(), object.setObject("nodeResources")); double cost = ResourceMeterMaintainer.cost(resources, controller.serviceRegistry().zoneRegistry().system()); object.setDouble("cost", cost); } private void utilizationToSlime(Cluster.Utilization utilization, Cursor utilizationObject) { utilizationObject.setDouble("cpu", utilization.cpu()); utilizationObject.setDouble("idealCpu", utilization.idealCpu()); utilizationObject.setDouble("currentCpu", utilization.currentCpu()); utilizationObject.setDouble("peakCpu", utilization.peakCpu()); utilizationObject.setDouble("memory", utilization.memory()); utilizationObject.setDouble("idealMemory", utilization.idealMemory()); utilizationObject.setDouble("currentMemory", utilization.currentMemory()); utilizationObject.setDouble("peakMemory", utilization.peakMemory()); utilizationObject.setDouble("disk", utilization.disk()); utilizationObject.setDouble("idealDisk", utilization.idealDisk()); utilizationObject.setDouble("currentDisk", utilization.currentDisk()); utilizationObject.setDouble("peakDisk", utilization.peakDisk()); } private void scalingEventsToSlime(List<Cluster.ScalingEvent> scalingEvents, Cursor scalingEventsArray) { for (Cluster.ScalingEvent scalingEvent : scalingEvents) { Cursor scalingEventObject = scalingEventsArray.addObject(); toSlime(scalingEvent.from(), scalingEventObject.setObject("from")); toSlime(scalingEvent.to(), scalingEventObject.setObject("to")); scalingEventObject.setLong("at", scalingEvent.at().toEpochMilli()); scalingEvent.completion().ifPresent(completion -> scalingEventObject.setLong("completion", completion.toEpochMilli())); } } private void toSlime(NodeResources resources, Cursor object) { object.setDouble("vcpu", resources.vcpu()); object.setDouble("memoryGb", resources.memoryGb()); object.setDouble("diskGb", resources.diskGb()); object.setDouble("bandwidthGbps", resources.bandwidthGbps()); object.setString("diskSpeed", valueOf(resources.diskSpeed())); object.setString("storageType", valueOf(resources.storageType())); } private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) { object.setString("tenant", tenant.name().value()); Cursor metaData = object.setObject("metaData"); metaData.setString("type", tenantType(tenant)); switch (tenant.type()) { case athenz: AthenzTenant athenzTenant = (AthenzTenant) tenant; metaData.setString("athensDomain", athenzTenant.domain().getName()); metaData.setString("property", athenzTenant.property().id()); break; case cloud: break; case deleted: break; default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'."); } object.setString("url", withPath("/application/v4/tenant/" + tenant.name().value(), requestURI).toString()); } private void tenantMetaDataToSlime(Tenant tenant, List<Application> applications, Cursor object) { Optional<Instant> lastDev = applications.stream() .flatMap(application -> application.instances().values().stream()) .flatMap(instance -> instance.deployments().values().stream() .filter(deployment -> deployment.zone().environment() == Environment.dev) .map(deployment -> controller.jobController().lastDeploymentStart(instance.id(), deployment))) .max(Comparator.naturalOrder()) .or(() -> applications.stream() .flatMap(application -> application.instances().values().stream()) .flatMap(instance -> JobType.allIn(controller.zoneRegistry()).stream() .filter(job -> job.environment() == Environment.dev) .flatMap(jobType -> controller.jobController().last(instance.id(), jobType).stream())) .map(Run::start) .max(Comparator.naturalOrder())); Optional<Instant> lastSubmission = applications.stream() .flatMap(app -> app.revisions().last().flatMap(ApplicationVersion::buildTime).stream()) .max(Comparator.naturalOrder()); object.setLong("createdAtMillis", tenant.createdAt().toEpochMilli()); if (tenant.type() == Tenant.Type.deleted) object.setLong("deletedAtMillis", ((DeletedTenant) tenant).deletedAt().toEpochMilli()); lastDev.ifPresent(instant -> object.setLong("lastDeploymentToDevMillis", instant.toEpochMilli())); lastSubmission.ifPresent(instant -> object.setLong("lastSubmissionToProdMillis", instant.toEpochMilli())); tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.user) .ifPresent(instant -> object.setLong("lastLoginByUserMillis", instant.toEpochMilli())); tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.developer) .ifPresent(instant -> object.setLong("lastLoginByDeveloperMillis", instant.toEpochMilli())); tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.administrator) .ifPresent(instant -> object.setLong("lastLoginByAdministratorMillis", instant.toEpochMilli())); } /** Returns a copy of the given URI with the host and port from the given URI, the path set to the given path and the query set to given query*/ private URI withPathAndQuery(String newPath, String newQuery, URI uri) { try { return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, newQuery, null); } catch (URISyntaxException e) { throw new RuntimeException("Will not happen", e); } } /** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */ private URI withPath(String newPath, URI uri) { return withPathAndQuery(newPath, null, uri); } private String toPath(DeploymentId id) { return path("/application", "v4", "tenant", id.applicationId().tenant(), "application", id.applicationId().application(), "instance", id.applicationId().instance(), "environment", id.zoneId().environment(), "region", id.zoneId().region()); } private long asLong(String valueOrNull, long defaultWhenNull) { if (valueOrNull == null) return defaultWhenNull; try { return Long.parseLong(valueOrNull); } catch (NumberFormatException e) { throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'"); } } private Slime toSlime(InputStream jsonStream) { try { byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000); return SlimeUtils.jsonToSlime(jsonBytes); } catch (IOException e) { throw new RuntimeException(); } } private static Principal requireUserPrincipal(HttpRequest request) { Principal principal = request.getJDiscRequest().getUserPrincipal(); if (principal == null) throw new IllegalArgumentException("Expected a user principal"); return principal; } private Inspector mandatory(String key, Inspector object) { if ( ! object.field(key).valid()) throw new IllegalArgumentException("'" + key + "' is missing"); return object.field(key); } private Optional<String> optional(String key, Inspector object) { return SlimeUtils.optionalString(object.field(key)); } private static String path(Object... elements) { return Joiner.on("/").join(elements); } private void toSlime(TenantAndApplicationId id, Cursor object, HttpRequest request) { object.setString("tenant", id.tenant().value()); object.setString("application", id.application().value()); object.setString("url", withPath("/application/v4" + "/tenant/" + id.tenant().value() + "/application/" + id.application().value(), request.getUri()).toString()); } private void toSlime(ApplicationId id, Cursor object, HttpRequest request) { object.setString("tenant", id.tenant().value()); object.setString("application", id.application().value()); object.setString("instance", id.instance().value()); object.setString("url", withPath("/application/v4" + "/tenant/" + id.tenant().value() + "/application/" + id.application().value() + "/instance/" + id.instance().value(), request.getUri()).toString()); } private Slime toSlime(ActivateResult result) { Slime slime = new Slime(); Cursor object = slime.setObject(); object.setString("revisionId", result.revisionId().id()); object.setLong("applicationZipSize", result.applicationZipSizeBytes()); Cursor logArray = object.setArray("prepareMessages"); if (result.prepareResponse().log != null) { for (Log logMessage : result.prepareResponse().log) { Cursor logObject = logArray.addObject(); logObject.setLong("time", logMessage.time); logObject.setString("level", logMessage.level); logObject.setString("message", logMessage.message); } } Cursor changeObject = object.setObject("configChangeActions"); Cursor restartActionsArray = changeObject.setArray("restart"); for (RestartAction restartAction : result.prepareResponse().configChangeActions.restartActions) { Cursor restartActionObject = restartActionsArray.addObject(); restartActionObject.setString("clusterName", restartAction.clusterName); restartActionObject.setString("clusterType", restartAction.clusterType); restartActionObject.setString("serviceType", restartAction.serviceType); serviceInfosToSlime(restartAction.services, restartActionObject.setArray("services")); stringsToSlime(restartAction.messages, restartActionObject.setArray("messages")); } Cursor refeedActionsArray = changeObject.setArray("refeed"); for (RefeedAction refeedAction : result.prepareResponse().configChangeActions.refeedActions) { Cursor refeedActionObject = refeedActionsArray.addObject(); refeedActionObject.setString("name", refeedAction.name); refeedActionObject.setString("documentType", refeedAction.documentType); refeedActionObject.setString("clusterName", refeedAction.clusterName); serviceInfosToSlime(refeedAction.services, refeedActionObject.setArray("services")); stringsToSlime(refeedAction.messages, refeedActionObject.setArray("messages")); } return slime; } private void serviceInfosToSlime(List<ServiceInfo> serviceInfoList, Cursor array) { for (ServiceInfo serviceInfo : serviceInfoList) { Cursor serviceInfoObject = array.addObject(); serviceInfoObject.setString("serviceName", serviceInfo.serviceName); serviceInfoObject.setString("serviceType", serviceInfo.serviceType); serviceInfoObject.setString("configId", serviceInfo.configId); serviceInfoObject.setString("hostName", serviceInfo.hostName); } } private void stringsToSlime(List<String> strings, Cursor array) { for (String string : strings) array.addString(string); } private void toSlime(Cursor object, List<TenantSecretStore> tenantSecretStores) { Cursor secretStore = object.setArray("secretStores"); tenantSecretStores.forEach(store -> { toSlime(secretStore.addObject(), store); }); } private void toSlime(Cursor object, TenantRoles tenantRoles, List<TenantSecretStore> tenantSecretStores) { object.setString("tenantRole", tenantRoles.containerRole()); var stores = object.setArray("accounts"); tenantSecretStores.forEach(secretStore -> { toSlime(stores.addObject(), secretStore); }); } private void toSlime(Cursor object, TenantSecretStore secretStore) { object.setString("name", secretStore.getName()); object.setString("awsId", secretStore.getAwsId()); object.setString("role", secretStore.getRole()); } private String readToString(InputStream stream) { Scanner scanner = new Scanner(stream).useDelimiter("\\A"); if ( ! scanner.hasNext()) return null; return scanner.next(); } private static boolean recurseOverTenants(HttpRequest request) { return recurseOverApplications(request) || "tenant".equals(request.getProperty("recursive")); } private static boolean recurseOverApplications(HttpRequest request) { return recurseOverDeployments(request) || "application".equals(request.getProperty("recursive")); } private static boolean recurseOverDeployments(HttpRequest request) { return ImmutableSet.of("all", "true", "deployment").contains(request.getProperty("recursive")); } private static boolean showOnlyProductionInstances(HttpRequest request) { return "true".equals(request.getProperty("production")); } private static boolean showOnlyActiveInstances(HttpRequest request) { return "true".equals(request.getProperty("activeInstances")); } private static boolean includeDeleted(HttpRequest request) { return "true".equals(request.getProperty("includeDeleted")); } private static String tenantType(Tenant tenant) { return switch (tenant.type()) { case athenz: yield "ATHENS"; case cloud: yield "CLOUD"; case deleted: yield "DELETED"; }; } private static ApplicationId appIdFromPath(Path path) { return ApplicationId.from(path.get("tenant"), path.get("application"), path.get("instance")); } private JobType jobTypeFromPath(Path path) { return JobType.fromJobName(path.get("jobtype"), controller.zoneRegistry()); } private RunId runIdFromPath(Path path) { long number = Long.parseLong(path.get("number")); return new RunId(appIdFromPath(path), jobTypeFromPath(path), number); } private HttpResponse submit(String tenant, String application, HttpRequest request) { Map<String, byte[]> dataParts = parseDataParts(request); Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get(); long projectId = submitOptions.field("projectId").asLong(); projectId = projectId == 0 ? 1 : projectId; Optional<String> repository = optional("repository", submitOptions); Optional<String> branch = optional("branch", submitOptions); Optional<String> commit = optional("commit", submitOptions); Optional<SourceRevision> sourceRevision = repository.isPresent() && branch.isPresent() && commit.isPresent() ? Optional.of(new SourceRevision(repository.get(), branch.get(), commit.get())) : Optional.empty(); Optional<String> sourceUrl = optional("sourceUrl", submitOptions); Optional<String> authorEmail = optional("authorEmail", submitOptions); Optional<String> description = optional("description", submitOptions); int risk = (int) submitOptions.field("risk").asLong(); sourceUrl.map(URI::create).ifPresent(url -> { if (url.getHost() == null || url.getScheme() == null) throw new IllegalArgumentException("Source URL must include scheme and host"); }); ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP), true); byte[] testPackage = dataParts.get(EnvironmentResource.APPLICATION_TEST_ZIP); Submission submission = new Submission(applicationPackage, testPackage, sourceUrl, sourceRevision, authorEmail, description, risk); controller.applications().verifyApplicationIdentityConfiguration(TenantName.from(tenant), Optional.empty(), Optional.empty(), applicationPackage, Optional.of(requireUserPrincipal(request))); TenantAndApplicationId id = TenantAndApplicationId.from(tenant, application); ensureApplicationExists(id, request); return JobControllerApiHandlerHelper.submitResponse(controller.jobController(), id, submission, projectId); } private HttpResponse removeAllProdDeployments(String tenant, String application) { JobControllerApiHandlerHelper.submitResponse(controller.jobController(), TenantAndApplicationId.from(tenant, application), new Submission(ApplicationPackage.deploymentRemoval(), new byte[0], Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), 0), 0); return new MessageResponse("All deployments removed"); } private ZoneId requireZone(String environment, String region) { return requireZone(ZoneId.from(environment, region)); } private ZoneId requireZone(ZoneId zone) { if (zone.environment() == Environment.prod && zone.region().value().equals("controller")) { return zone; } if (!controller.zoneRegistry().hasZone(zone)) { throw new IllegalArgumentException("Zone " + zone + " does not exist in this system"); } return zone; } private static Map<String, byte[]> parseDataParts(HttpRequest request) { String contentHash = request.getHeader("X-Content-Hash"); if (contentHash == null) return new MultipartParser().parse(request); DigestInputStream digester = Signatures.sha256Digester(request.getData()); var dataParts = new MultipartParser().parse(request.getHeader("Content-Type"), digester, request.getUri()); if ( ! Arrays.equals(digester.getMessageDigest().digest(), Base64.getDecoder().decode(contentHash))) throw new IllegalArgumentException("Value of X-Content-Hash header does not match computed content hash"); return dataParts; } private static RotationId findRotationId(Instance instance, Optional<String> endpointId) { if (instance.rotations().isEmpty()) { throw new NotExistsException("global rotation does not exist for " + instance); } if (endpointId.isPresent()) { return instance.rotations().stream() .filter(r -> r.endpointId().id().equals(endpointId.get())) .map(AssignedRotation::rotationId) .findFirst() .orElseThrow(() -> new NotExistsException("endpoint " + endpointId.get() + " does not exist for " + instance)); } else if (instance.rotations().size() > 1) { throw new IllegalArgumentException(instance + " has multiple rotations. Query parameter 'endpointId' must be given"); } return instance.rotations().get(0).rotationId(); } private static String rotationStateString(RotationState state) { return switch (state) { case in: yield "IN"; case out: yield "OUT"; case unknown: yield "UNKNOWN"; }; } private static String endpointScopeString(Endpoint.Scope scope) { return switch (scope) { case weighted: yield "weighted"; case application: yield "application"; case global: yield "global"; case zone: yield "zone"; }; } private static String routingMethodString(RoutingMethod method) { return switch (method) { case exclusive: yield "exclusive"; case sharedLayer4: yield "sharedLayer4"; }; } private static <T> T getAttribute(HttpRequest request, String attributeName, Class<T> cls) { return Optional.ofNullable(request.getJDiscRequest().context().get(attributeName)) .filter(cls::isInstance) .map(cls::cast) .orElseThrow(() -> new IllegalArgumentException("Attribute '" + attributeName + "' was not set on request")); } /** Returns whether given request is by an operator */ private static boolean isOperator(HttpRequest request) { var securityContext = getAttribute(request, SecurityContext.ATTRIBUTE_NAME, SecurityContext.class); return securityContext.roles().stream() .map(Role::definition) .anyMatch(definition -> definition == RoleDefinition.hostedOperator); } private boolean hasOktaContext(HttpRequest request) { try { OAuthCredentials.fromOktaRequestContext(request.getJDiscRequest().context()); return true; } catch (IllegalArgumentException e) { return false; } } private List<Deployment> sortedDeployments(Collection<Deployment> deployments, DeploymentInstanceSpec spec) { List<ZoneId> productionZones = spec.zones().stream() .filter(z -> z.region().isPresent()) .map(z -> ZoneId.from(z.environment(), z.region().get())) .toList(); return deployments.stream() .sorted(comparingInt(deployment -> productionZones.indexOf(deployment.zone()))) .collect(collectingAndThen(Collectors.toList(), Collections::unmodifiableList)); } }
Tjis is the log line to look for
public ZooKeeperServer(FileTxnSnapLog txnLogFactory, int tickTime, int minSessionTimeout, int maxSessionTimeout, int clientPortListenBacklog, ZKDatabase zkDb, String initialConfig, boolean reconfigEnabled) { serverStats = new ServerStats(this); this.txnLogFactory = txnLogFactory; this.txnLogFactory.setServerStats(this.serverStats); this.zkDb = zkDb; this.tickTime = tickTime; setMinSessionTimeout(minSessionTimeout); setMaxSessionTimeout(maxSessionTimeout); this.listenBacklog = clientPortListenBacklog; this.reconfigEnabled = reconfigEnabled; listener = new ZooKeeperServerListenerImpl(this); readResponseCache = new ResponseCache(Integer.getInteger( GET_DATA_RESPONSE_CACHE_SIZE, ResponseCache.DEFAULT_RESPONSE_CACHE_SIZE), "getData"); getChildrenResponseCache = new ResponseCache(Integer.getInteger( GET_CHILDREN_RESPONSE_CACHE_SIZE, ResponseCache.DEFAULT_RESPONSE_CACHE_SIZE), "getChildren"); this.initialConfig = initialConfig; this.requestPathMetricsCollector = new RequestPathMetricsCollector(); this.initLargeRequestThrottlingSettings(); this.authHelper = new AuthenticationHelper(); LOG.info( "Created patched server with" + " tickTime {} ms" + " minSessionTimeout {} ms" + " maxSessionTimeout {} ms" + " clientPortListenBacklog {}" + " datadir {}" + " snapdir {}", tickTime, getMinSessionTimeout(), getMaxSessionTimeout(), getClientPortListenBacklog(), txnLogFactory.getDataDir(), txnLogFactory.getSnapDir()); }
"Created patched server with"
public ZooKeeperServer(FileTxnSnapLog txnLogFactory, int tickTime, int minSessionTimeout, int maxSessionTimeout, int clientPortListenBacklog, ZKDatabase zkDb, String initialConfig, boolean reconfigEnabled) { serverStats = new ServerStats(this); this.txnLogFactory = txnLogFactory; this.txnLogFactory.setServerStats(this.serverStats); this.zkDb = zkDb; this.tickTime = tickTime; setMinSessionTimeout(minSessionTimeout); setMaxSessionTimeout(maxSessionTimeout); this.listenBacklog = clientPortListenBacklog; this.reconfigEnabled = reconfigEnabled; listener = new ZooKeeperServerListenerImpl(this); readResponseCache = new ResponseCache(Integer.getInteger( GET_DATA_RESPONSE_CACHE_SIZE, ResponseCache.DEFAULT_RESPONSE_CACHE_SIZE), "getData"); getChildrenResponseCache = new ResponseCache(Integer.getInteger( GET_CHILDREN_RESPONSE_CACHE_SIZE, ResponseCache.DEFAULT_RESPONSE_CACHE_SIZE), "getChildren"); this.initialConfig = initialConfig; this.requestPathMetricsCollector = new RequestPathMetricsCollector(); this.initLargeRequestThrottlingSettings(); this.authHelper = new AuthenticationHelper(); LOG.info( "Created patched server with" + " tickTime {} ms" + " minSessionTimeout {} ms" + " maxSessionTimeout {} ms" + " clientPortListenBacklog {}" + " datadir {}" + " snapdir {}", tickTime, getMinSessionTimeout(), getMaxSessionTimeout(), getClientPortListenBacklog(), txnLogFactory.getDataDir(), txnLogFactory.getSnapDir()); }
class ZooKeeperServer implements SessionExpirer, ServerStats.Provider { protected static final Logger LOG; private static final RateLogger RATE_LOGGER; public static final String GLOBAL_OUTSTANDING_LIMIT = "zookeeper.globalOutstandingLimit"; public static final String ENABLE_EAGER_ACL_CHECK = "zookeeper.enableEagerACLCheck"; public static final String SKIP_ACL = "zookeeper.skipACL"; public static final String ENFORCE_QUOTA = "zookeeper.enforceQuota"; static final boolean enableEagerACLCheck; static final boolean skipACL; public static final boolean enforceQuota; public static final String SASL_SUPER_USER = "zookeeper.superUser"; public static final String ALLOW_SASL_FAILED_CLIENTS = "zookeeper.allowSaslFailedClients"; public static final String ZOOKEEPER_DIGEST_ENABLED = "zookeeper.digest.enabled"; private static boolean digestEnabled; public static final String CLOSE_SESSION_TXN_ENABLED = "zookeeper.closeSessionTxn.enabled"; private static boolean closeSessionTxnEnabled = true; static { LOG = LoggerFactory.getLogger(ZooKeeperServer.class); RATE_LOGGER = new RateLogger(LOG); ZookeeperBanner.printBanner(LOG); Environment.logEnv("Server environment:", LOG); enableEagerACLCheck = Boolean.getBoolean(ENABLE_EAGER_ACL_CHECK); LOG.info("{} = {}", ENABLE_EAGER_ACL_CHECK, enableEagerACLCheck); skipACL = System.getProperty(SKIP_ACL, "no").equals("yes"); if (skipACL) { LOG.info("{}==\"yes\", ACL checks will be skipped", SKIP_ACL); } enforceQuota = Boolean.parseBoolean(System.getProperty(ENFORCE_QUOTA, "false")); if (enforceQuota) { LOG.info("{} = {}, Quota Enforce enables", ENFORCE_QUOTA, enforceQuota); } digestEnabled = Boolean.parseBoolean(System.getProperty(ZOOKEEPER_DIGEST_ENABLED, "true")); LOG.info("{} = {}", ZOOKEEPER_DIGEST_ENABLED, digestEnabled); closeSessionTxnEnabled = Boolean.parseBoolean( System.getProperty(CLOSE_SESSION_TXN_ENABLED, "true")); LOG.info("{} = {}", CLOSE_SESSION_TXN_ENABLED, closeSessionTxnEnabled); } public static boolean isCloseSessionTxnEnabled() { return closeSessionTxnEnabled; } public static void setCloseSessionTxnEnabled(boolean enabled) { ZooKeeperServer.closeSessionTxnEnabled = enabled; LOG.info("Update {} to {}", CLOSE_SESSION_TXN_ENABLED, ZooKeeperServer.closeSessionTxnEnabled); } protected ZooKeeperServerBean jmxServerBean; protected DataTreeBean jmxDataTreeBean; public static final int DEFAULT_TICK_TIME = 3000; protected int tickTime = DEFAULT_TICK_TIME; public static final int DEFAULT_THROTTLED_OP_WAIT_TIME = 0; protected static volatile int throttledOpWaitTime = Integer.getInteger("zookeeper.throttled_op_wait_time", DEFAULT_THROTTLED_OP_WAIT_TIME); /** value of -1 indicates unset, use default */ protected int minSessionTimeout = -1; /** value of -1 indicates unset, use default */ protected int maxSessionTimeout = -1; /** Socket listen backlog. Value of -1 indicates unset */ protected int listenBacklog = -1; protected SessionTracker sessionTracker; private FileTxnSnapLog txnLogFactory = null; private ZKDatabase zkDb; private ResponseCache readResponseCache; private ResponseCache getChildrenResponseCache; private final AtomicLong hzxid = new AtomicLong(0); public static final Exception ok = new Exception("No prob"); protected RequestProcessor firstProcessor; protected JvmPauseMonitor jvmPauseMonitor; protected volatile State state = State.INITIAL; private boolean isResponseCachingEnabled = true; /* contains the configuration file content read at startup */ protected String initialConfig; protected boolean reconfigEnabled; private final RequestPathMetricsCollector requestPathMetricsCollector; private static final int DEFAULT_SNAP_COUNT = 100000; private static final int DEFAULT_GLOBAL_OUTSTANDING_LIMIT = 1000; private boolean localSessionEnabled = false; protected enum State { INITIAL, RUNNING, SHUTDOWN, ERROR } /** * This is the secret that we use to generate passwords. For the moment, * it's more of a checksum that's used in reconnection, which carries no * security weight, and is treated internally as if it carries no * security weight. */ private static final long superSecret = 0XB3415C00L; private final AtomicInteger requestsInProcess = new AtomicInteger(0); final Deque<ChangeRecord> outstandingChanges = new ArrayDeque<>(); final Map<String, ChangeRecord> outstandingChangesForPath = new HashMap<String, ChangeRecord>(); protected ServerCnxnFactory serverCnxnFactory; protected ServerCnxnFactory secureServerCnxnFactory; private final ServerStats serverStats; private final ZooKeeperServerListener listener; private ZooKeeperServerShutdownHandler zkShutdownHandler; private volatile int createSessionTrackerServerId = 1; private static final String FLUSH_DELAY = "zookeeper.flushDelay"; private static volatile long flushDelay; private static final String MAX_WRITE_QUEUE_POLL_SIZE = "zookeeper.maxWriteQueuePollTime"; private static volatile long maxWriteQueuePollTime; private static final String MAX_BATCH_SIZE = "zookeeper.maxBatchSize"; private static volatile int maxBatchSize; /** * Starting size of read and write ByteArroyOuputBuffers. Default is 32 bytes. * Flag not used for small transfers like connectResponses. */ public static final String INT_BUFFER_STARTING_SIZE_BYTES = "zookeeper.intBufferStartingSizeBytes"; public static final int DEFAULT_STARTING_BUFFER_SIZE = 1024; public static final int intBufferStartingSizeBytes; public static final String GET_DATA_RESPONSE_CACHE_SIZE = "zookeeper.maxResponseCacheSize"; public static final String GET_CHILDREN_RESPONSE_CACHE_SIZE = "zookeeper.maxGetChildrenResponseCacheSize"; static { long configuredFlushDelay = Long.getLong(FLUSH_DELAY, 0); setFlushDelay(configuredFlushDelay); setMaxWriteQueuePollTime(Long.getLong(MAX_WRITE_QUEUE_POLL_SIZE, configuredFlushDelay / 3)); setMaxBatchSize(Integer.getInteger(MAX_BATCH_SIZE, 1000)); intBufferStartingSizeBytes = Integer.getInteger(INT_BUFFER_STARTING_SIZE_BYTES, DEFAULT_STARTING_BUFFER_SIZE); if (intBufferStartingSizeBytes < 32) { String msg = "Buffer starting size (" + intBufferStartingSizeBytes + ") must be greater than or equal to 32. " + "Configure with \"-Dzookeeper.intBufferStartingSizeBytes=<size>\" "; LOG.error(msg); throw new IllegalArgumentException(msg); } LOG.info("{} = {}", INT_BUFFER_STARTING_SIZE_BYTES, intBufferStartingSizeBytes); } private BlueThrottle connThrottle = new BlueThrottle(); private RequestThrottler requestThrottler; public static final String SNAP_COUNT = "zookeeper.snapCount"; /** * This setting sets a limit on the total number of large requests that * can be inflight and is designed to prevent ZooKeeper from accepting * too many large requests such that the JVM runs out of usable heap and * ultimately crashes. * * The limit is enforced by the {@link checkRequestSize(int, boolean)} * method which is called by the connection layer ({@link NIOServerCnxn}, * {@link NettyServerCnxn}) before allocating a byte buffer and pulling * data off the TCP socket. The limit is then checked again by the * ZooKeeper server in {@link processPacket(ServerCnxn, ByteBuffer)} which * also atomically updates {@link currentLargeRequestBytes}. The request is * then marked as a large request, with the request size stored in the Request * object so that it can later be decremented from {@link currentLargeRequestsBytes}. * * When a request is completed or dropped, the relevant code path calls the * {@link requestFinished(Request)} method which performs the decrement if * needed. */ private volatile int largeRequestMaxBytes = 100 * 1024 * 1024; /** * The size threshold after which a request is considered a large request * and is checked against the large request byte limit. */ private volatile int largeRequestThreshold = -1; private final AtomicInteger currentLargeRequestBytes = new AtomicInteger(0); private AuthenticationHelper authHelper; void removeCnxn(ServerCnxn cnxn) { zkDb.removeCnxn(cnxn); } /** * Creates a ZooKeeperServer instance. Nothing is setup, use the setX * methods to prepare the instance (eg datadir, datalogdir, ticktime, * builder, etc...) * */ public ZooKeeperServer() { listener = new ZooKeeperServerListenerImpl(this); serverStats = new ServerStats(this); this.requestPathMetricsCollector = new RequestPathMetricsCollector(); this.authHelper = new AuthenticationHelper(); } /** * Keeping this constructor for backward compatibility */ public ZooKeeperServer(FileTxnSnapLog txnLogFactory, int tickTime, int minSessionTimeout, int maxSessionTimeout, int clientPortListenBacklog, ZKDatabase zkDb, String initialConfig) { this(txnLogFactory, tickTime, minSessionTimeout, maxSessionTimeout, clientPortListenBacklog, zkDb, initialConfig, QuorumPeerConfig.isReconfigEnabled()); } /** * * Creates a ZooKeeperServer instance. It sets everything up, but doesn't * actually start listening for clients until run() is invoked. * */ public String getInitialConfig() { return initialConfig; } /** * Adds JvmPauseMonitor and calls * {@link * */ public ZooKeeperServer(JvmPauseMonitor jvmPauseMonitor, FileTxnSnapLog txnLogFactory, int tickTime, int minSessionTimeout, int maxSessionTimeout, int clientPortListenBacklog, ZKDatabase zkDb, String initialConfig) { this(txnLogFactory, tickTime, minSessionTimeout, maxSessionTimeout, clientPortListenBacklog, zkDb, initialConfig, QuorumPeerConfig.isReconfigEnabled()); this.jvmPauseMonitor = jvmPauseMonitor; if (jvmPauseMonitor != null) { LOG.info("Added JvmPauseMonitor to server"); } } /** * creates a zookeeperserver instance. * @param txnLogFactory the file transaction snapshot logging class * @param tickTime the ticktime for the server */ public ZooKeeperServer(FileTxnSnapLog txnLogFactory, int tickTime, String initialConfig) { this(txnLogFactory, tickTime, -1, -1, -1, new ZKDatabase(txnLogFactory), initialConfig, QuorumPeerConfig.isReconfigEnabled()); } public ServerStats serverStats() { return serverStats; } public RequestPathMetricsCollector getRequestPathMetricsCollector() { return requestPathMetricsCollector; } public BlueThrottle connThrottle() { return connThrottle; } public void dumpConf(PrintWriter pwriter) { pwriter.print("clientPort="); pwriter.println(getClientPort()); pwriter.print("secureClientPort="); pwriter.println(getSecureClientPort()); pwriter.print("dataDir="); pwriter.println(zkDb.snapLog.getSnapDir().getAbsolutePath()); pwriter.print("dataDirSize="); pwriter.println(getDataDirSize()); pwriter.print("dataLogDir="); pwriter.println(zkDb.snapLog.getDataDir().getAbsolutePath()); pwriter.print("dataLogSize="); pwriter.println(getLogDirSize()); pwriter.print("tickTime="); pwriter.println(getTickTime()); pwriter.print("maxClientCnxns="); pwriter.println(getMaxClientCnxnsPerHost()); pwriter.print("minSessionTimeout="); pwriter.println(getMinSessionTimeout()); pwriter.print("maxSessionTimeout="); pwriter.println(getMaxSessionTimeout()); pwriter.print("clientPortListenBacklog="); pwriter.println(getClientPortListenBacklog()); pwriter.print("serverId="); pwriter.println(getServerId()); } public ZooKeeperServerConf getConf() { return new ZooKeeperServerConf( getClientPort(), zkDb.snapLog.getSnapDir().getAbsolutePath(), zkDb.snapLog.getDataDir().getAbsolutePath(), getTickTime(), getMaxClientCnxnsPerHost(), getMinSessionTimeout(), getMaxSessionTimeout(), getServerId(), getClientPortListenBacklog()); } /** * This constructor is for backward compatibility with the existing unit * test code. * It defaults to FileLogProvider persistence provider. */ public ZooKeeperServer(File snapDir, File logDir, int tickTime) throws IOException { this(new FileTxnSnapLog(snapDir, logDir), tickTime, ""); } /** * Default constructor, relies on the config for its argument values * * @throws IOException */ public ZooKeeperServer(FileTxnSnapLog txnLogFactory) throws IOException { this(txnLogFactory, DEFAULT_TICK_TIME, -1, -1, -1, new ZKDatabase(txnLogFactory), "", QuorumPeerConfig.isReconfigEnabled()); } /** * get the zookeeper database for this server * @return the zookeeper database for this server */ public ZKDatabase getZKDatabase() { return this.zkDb; } /** * set the zkdatabase for this zookeeper server * @param zkDb */ public void setZKDatabase(ZKDatabase zkDb) { this.zkDb = zkDb; } /** * Restore sessions and data */ public void loadData() throws IOException, InterruptedException { /* * When a new leader starts executing Leader * invokes this method. The database, however, has been * initialized before running leader election so that * the server could pick its zxid for its initial vote. * It does it by invoking QuorumPeer * Consequently, we don't need to initialize it once more * and avoid the penalty of loading it a second time. Not * reloading it is particularly important for applications * that host a large database. * * The following if block checks whether the database has * been initialized or not. Note that this method is * invoked by at least one other method: * ZooKeeperServer * * See ZOOKEEPER-1642 for more detail. */ if (zkDb.isInitialized()) { setZxid(zkDb.getDataTreeLastProcessedZxid()); } else { setZxid(zkDb.loadDataBase()); } zkDb.getSessions().stream() .filter(session -> zkDb.getSessionWithTimeOuts().get(session) == null) .forEach(session -> killSession(session, zkDb.getDataTreeLastProcessedZxid())); takeSnapshot(); } public void takeSnapshot() { takeSnapshot(false); } public void takeSnapshot(boolean syncSnap) { long start = Time.currentElapsedTime(); try { txnLogFactory.save(zkDb.getDataTree(), zkDb.getSessionWithTimeOuts(), syncSnap); } catch (IOException e) { LOG.error("Severe unrecoverable error, exiting", e); ServiceUtils.requestSystemExit(ExitCode.TXNLOG_ERROR_TAKING_SNAPSHOT.getValue()); } long elapsed = Time.currentElapsedTime() - start; LOG.info("Snapshot taken in {} ms", elapsed); ServerMetrics.getMetrics().SNAPSHOT_TIME.add(elapsed); } public boolean shouldForceWriteInitialSnapshotAfterLeaderElection() { return txnLogFactory.shouldForceWriteInitialSnapshotAfterLeaderElection(); } @Override public long getDataDirSize() { if (zkDb == null) { return 0L; } File path = zkDb.snapLog.getDataDir(); return getDirSize(path); } @Override public long getLogDirSize() { if (zkDb == null) { return 0L; } File path = zkDb.snapLog.getSnapDir(); return getDirSize(path); } private long getDirSize(File file) { long size = 0L; if (file.isDirectory()) { File[] files = file.listFiles(); if (files != null) { for (File f : files) { size += getDirSize(f); } } } else { size = file.length(); } return size; } public long getZxid() { return hzxid.get(); } public SessionTracker getSessionTracker() { return sessionTracker; } long getNextZxid() { return hzxid.incrementAndGet(); } public void setZxid(long zxid) { hzxid.set(zxid); } private void close(long sessionId) { Request si = new Request(null, sessionId, 0, OpCode.closeSession, null, null); submitRequest(si); } public void closeSession(long sessionId) { LOG.info("Closing session 0x{}", Long.toHexString(sessionId)); close(sessionId); } protected void killSession(long sessionId, long zxid) { zkDb.killSession(sessionId, zxid); if (LOG.isTraceEnabled()) { ZooTrace.logTraceMessage( LOG, ZooTrace.SESSION_TRACE_MASK, "ZooKeeperServer --- killSession: 0x" + Long.toHexString(sessionId)); } if (sessionTracker != null) { sessionTracker.removeSession(sessionId); } } public void expire(Session session) { long sessionId = session.getSessionId(); LOG.info( "Expiring session 0x{}, timeout of {}ms exceeded", Long.toHexString(sessionId), session.getTimeout()); close(sessionId); } public void expire(long sessionId) { LOG.info("forcibly expiring session 0x{}", Long.toHexString(sessionId)); close(sessionId); } public static class MissingSessionException extends IOException { private static final long serialVersionUID = 7467414635467261007L; public MissingSessionException(String msg) { super(msg); } } void touch(ServerCnxn cnxn) throws MissingSessionException { if (cnxn == null) { return; } long id = cnxn.getSessionId(); int to = cnxn.getSessionTimeout(); if (!sessionTracker.touchSession(id, to)) { throw new MissingSessionException("No session with sessionid 0x" + Long.toHexString(id) + " exists, probably expired and removed"); } } protected void registerJMX() { try { jmxServerBean = new ZooKeeperServerBean(this); MBeanRegistry.getInstance().register(jmxServerBean, null); try { jmxDataTreeBean = new DataTreeBean(zkDb.getDataTree()); MBeanRegistry.getInstance().register(jmxDataTreeBean, jmxServerBean); } catch (Exception e) { LOG.warn("Failed to register with JMX", e); jmxDataTreeBean = null; } } catch (Exception e) { LOG.warn("Failed to register with JMX", e); jmxServerBean = null; } } public void startdata() throws IOException, InterruptedException { if (zkDb == null) { zkDb = new ZKDatabase(this.txnLogFactory); } if (!zkDb.isInitialized()) { loadData(); } } public synchronized void startup() { startupWithServerState(State.RUNNING); } public synchronized void startupWithoutServing() { startupWithServerState(State.INITIAL); } public synchronized void startServing() { setState(State.RUNNING); notifyAll(); } private void startupWithServerState(State state) { if (sessionTracker == null) { createSessionTracker(); } startSessionTracker(); setupRequestProcessors(); startRequestThrottler(); registerJMX(); startJvmPauseMonitor(); registerMetrics(); setState(state); requestPathMetricsCollector.start(); localSessionEnabled = sessionTracker.isLocalSessionsEnabled(); notifyAll(); } protected void startJvmPauseMonitor() { if (this.jvmPauseMonitor != null) { this.jvmPauseMonitor.serviceStart(); } } protected void startRequestThrottler() { requestThrottler = new RequestThrottler(this); requestThrottler.start(); } protected void setupRequestProcessors() { RequestProcessor finalProcessor = new FinalRequestProcessor(this); RequestProcessor syncProcessor = new SyncRequestProcessor(this, finalProcessor); ((SyncRequestProcessor) syncProcessor).start(); firstProcessor = new PrepRequestProcessor(this, syncProcessor); ((PrepRequestProcessor) firstProcessor).start(); } public ZooKeeperServerListener getZooKeeperServerListener() { return listener; } /** * Change the server ID used by {@link * {@link * * @param newId ID to use */ public void setCreateSessionTrackerServerId(int newId) { createSessionTrackerServerId = newId; } protected void createSessionTracker() { sessionTracker = new SessionTrackerImpl(this, zkDb.getSessionWithTimeOuts(), tickTime, createSessionTrackerServerId, getZooKeeperServerListener()); } protected void startSessionTracker() { ((SessionTrackerImpl) sessionTracker).start(); } /** * Sets the state of ZooKeeper server. After changing the state, it notifies * the server state change to a registered shutdown handler, if any. * <p> * The following are the server state transitions: * <ul><li>During startup the server will be in the INITIAL state.</li> * <li>After successfully starting, the server sets the state to RUNNING. * </li> * <li>The server transitions to the ERROR state if it hits an internal * error. {@link ZooKeeperServerListenerImpl} notifies any critical resource * error events, e.g., SyncRequestProcessor not being able to write a txn to * disk.</li> * <li>During shutdown the server sets the state to SHUTDOWN, which * corresponds to the server not running.</li></ul> * * @param state new server state. */ protected void setState(State state) { this.state = state; if (zkShutdownHandler != null) { zkShutdownHandler.handle(state); } else { LOG.debug( "ZKShutdownHandler is not registered, so ZooKeeper server" + " won't take any action on ERROR or SHUTDOWN server state changes"); } } /** * This can be used while shutting down the server to see whether the server * is already shutdown or not. * * @return true if the server is running or server hits an error, false * otherwise. */ protected boolean canShutdown() { return state == State.RUNNING || state == State.ERROR; } /** * @return true if the server is running, false otherwise. */ public boolean isRunning() { return state == State.RUNNING; } public void shutdown() { shutdown(false); } /** * Shut down the server instance * @param fullyShutDown true if another server using the same database will not replace this one in the same process */ public synchronized void shutdown(boolean fullyShutDown) { if (!canShutdown()) { if (fullyShutDown && zkDb != null) { zkDb.clear(); } LOG.debug("ZooKeeper server is not running, so not proceeding to shutdown!"); return; } LOG.info("shutting down"); setState(State.SHUTDOWN); unregisterMetrics(); if (requestThrottler != null) { requestThrottler.shutdown(); } if (sessionTracker != null) { sessionTracker.shutdown(); } if (firstProcessor != null) { firstProcessor.shutdown(); } if (jvmPauseMonitor != null) { jvmPauseMonitor.serviceStop(); } if (zkDb != null) { if (fullyShutDown) { zkDb.clear(); } else { try { zkDb.fastForwardDataBase(); } catch (IOException e) { LOG.error("Error updating DB", e); zkDb.clear(); } } } requestPathMetricsCollector.shutdown(); unregisterJMX(); } protected void unregisterJMX() { try { if (jmxDataTreeBean != null) { MBeanRegistry.getInstance().unregister(jmxDataTreeBean); } } catch (Exception e) { LOG.warn("Failed to unregister with JMX", e); } try { if (jmxServerBean != null) { MBeanRegistry.getInstance().unregister(jmxServerBean); } } catch (Exception e) { LOG.warn("Failed to unregister with JMX", e); } jmxServerBean = null; jmxDataTreeBean = null; } public void incInProcess() { requestsInProcess.incrementAndGet(); } public void decInProcess() { requestsInProcess.decrementAndGet(); if (requestThrottler != null) { requestThrottler.throttleWake(); } } public int getInProcess() { return requestsInProcess.get(); } public int getInflight() { return requestThrottleInflight(); } private int requestThrottleInflight() { if (requestThrottler != null) { return requestThrottler.getInflight(); } return 0; } static class PrecalculatedDigest { final long nodeDigest; final long treeDigest; PrecalculatedDigest(long nodeDigest, long treeDigest) { this.nodeDigest = nodeDigest; this.treeDigest = treeDigest; } } /** * This structure is used to facilitate information sharing between PrepRP * and FinalRP. */ static class ChangeRecord { PrecalculatedDigest precalculatedDigest; byte[] data; ChangeRecord(long zxid, String path, StatPersisted stat, int childCount, List<ACL> acl) { this.zxid = zxid; this.path = path; this.stat = stat; this.childCount = childCount; this.acl = acl; } long zxid; String path; StatPersisted stat; /* Make sure to create a new object when changing */ int childCount; List<ACL> acl; /* Make sure to create a new object when changing */ ChangeRecord duplicate(long zxid) { StatPersisted stat = new StatPersisted(); if (this.stat != null) { DataTree.copyStatPersisted(this.stat, stat); } ChangeRecord changeRecord = new ChangeRecord(zxid, path, stat, childCount, acl == null ? new ArrayList<>() : new ArrayList<>(acl)); changeRecord.precalculatedDigest = precalculatedDigest; changeRecord.data = data; return changeRecord; } } byte[] generatePasswd(long id) { Random r = new Random(id ^ superSecret); byte[] p = new byte[16]; r.nextBytes(p); return p; } protected boolean checkPasswd(long sessionId, byte[] passwd) { return sessionId != 0 && Arrays.equals(passwd, generatePasswd(sessionId)); } long createSession(ServerCnxn cnxn, byte[] passwd, int timeout) { if (passwd == null) { passwd = new byte[0]; } long sessionId = sessionTracker.createSession(timeout); Random r = new Random(sessionId ^ superSecret); r.nextBytes(passwd); ByteBuffer to = ByteBuffer.allocate(4); to.putInt(timeout); cnxn.setSessionId(sessionId); Request si = new Request(cnxn, sessionId, 0, OpCode.createSession, to, null); submitRequest(si); return sessionId; } /** * set the owner of this session as owner * @param id the session id * @param owner the owner of the session * @throws SessionExpiredException */ public void setOwner(long id, Object owner) throws SessionExpiredException { sessionTracker.setOwner(id, owner); } protected void revalidateSession(ServerCnxn cnxn, long sessionId, int sessionTimeout) throws IOException { boolean rc = sessionTracker.touchSession(sessionId, sessionTimeout); if (LOG.isTraceEnabled()) { ZooTrace.logTraceMessage( LOG, ZooTrace.SESSION_TRACE_MASK, "Session 0x" + Long.toHexString(sessionId) + " is valid: " + rc); } finishSessionInit(cnxn, rc); } public void reopenSession(ServerCnxn cnxn, long sessionId, byte[] passwd, int sessionTimeout) throws IOException { if (checkPasswd(sessionId, passwd)) { revalidateSession(cnxn, sessionId, sessionTimeout); } else { LOG.warn( "Incorrect password from {} for session 0x{}", cnxn.getRemoteSocketAddress(), Long.toHexString(sessionId)); finishSessionInit(cnxn, false); } } public void finishSessionInit(ServerCnxn cnxn, boolean valid) { try { if (valid) { if (serverCnxnFactory != null && serverCnxnFactory.cnxns.contains(cnxn)) { serverCnxnFactory.registerConnection(cnxn); } else if (secureServerCnxnFactory != null && secureServerCnxnFactory.cnxns.contains(cnxn)) { secureServerCnxnFactory.registerConnection(cnxn); } } } catch (Exception e) { LOG.warn("Failed to register with JMX", e); } try { ConnectResponse rsp = new ConnectResponse( 0, valid ? cnxn.getSessionTimeout() : 0, valid ? cnxn.getSessionId() : 0, valid ? generatePasswd(cnxn.getSessionId()) : new byte[16]); ByteArrayOutputStream baos = new ByteArrayOutputStream(); BinaryOutputArchive bos = BinaryOutputArchive.getArchive(baos); bos.writeInt(-1, "len"); rsp.serialize(bos, "connect"); if (!cnxn.isOldClient) { bos.writeBool(this instanceof ReadOnlyZooKeeperServer, "readOnly"); } baos.close(); ByteBuffer bb = ByteBuffer.wrap(baos.toByteArray()); bb.putInt(bb.remaining() - 4).rewind(); cnxn.sendBuffer(bb); if (valid) { LOG.debug( "Established session 0x{} with negotiated timeout {} for client {}", Long.toHexString(cnxn.getSessionId()), cnxn.getSessionTimeout(), cnxn.getRemoteSocketAddress()); cnxn.enableRecv(); } else { LOG.info( "Invalid session 0x{} for client {}, probably expired", Long.toHexString(cnxn.getSessionId()), cnxn.getRemoteSocketAddress()); cnxn.sendBuffer(ServerCnxnFactory.closeConn); } } catch (Exception e) { LOG.warn("Exception while establishing session, closing", e); cnxn.close(ServerCnxn.DisconnectReason.IO_EXCEPTION_IN_SESSION_INIT); } } public void closeSession(ServerCnxn cnxn, RequestHeader requestHeader) { closeSession(cnxn.getSessionId()); } public long getServerId() { return 0; } /** * If the underlying Zookeeper server support local session, this method * will set a isLocalSession to true if a request is associated with * a local session. * * @param si */ protected void setLocalSessionFlag(Request si) { } public void submitRequest(Request si) { enqueueRequest(si); } public void enqueueRequest(Request si) { if (requestThrottler == null) { synchronized (this) { try { while (state == State.INITIAL) { wait(1000); } } catch (InterruptedException e) { LOG.warn("Unexpected interruption", e); } if (requestThrottler == null) { throw new RuntimeException("Not started"); } } } requestThrottler.submitRequest(si); } public void submitRequestNow(Request si) { if (firstProcessor == null) { synchronized (this) { try { while (state == State.INITIAL) { wait(1000); } } catch (InterruptedException e) { LOG.warn("Unexpected interruption", e); } if (firstProcessor == null || state != State.RUNNING) { throw new RuntimeException("Not started"); } } } try { touch(si.cnxn); boolean validpacket = Request.isValid(si.type); if (validpacket) { setLocalSessionFlag(si); firstProcessor.processRequest(si); if (si.cnxn != null) { incInProcess(); } } else { LOG.warn("Received packet at server of unknown type {}", si.type); requestFinished(si); new UnimplementedRequestProcessor().processRequest(si); } } catch (MissingSessionException e) { LOG.debug("Dropping request.", e); requestFinished(si); } catch (RequestProcessorException e) { LOG.error("Unable to process request", e); requestFinished(si); } } public static int getSnapCount() { int snapCount = Integer.getInteger(SNAP_COUNT, DEFAULT_SNAP_COUNT); if (snapCount < 2) { LOG.warn("SnapCount should be 2 or more. Now, snapCount is reset to 2"); snapCount = 2; } return snapCount; } public int getGlobalOutstandingLimit() { return Integer.getInteger(GLOBAL_OUTSTANDING_LIMIT, DEFAULT_GLOBAL_OUTSTANDING_LIMIT); } public static long getSnapSizeInBytes() { long size = Long.getLong("zookeeper.snapSizeLimitInKb", 4194304L); if (size <= 0) { LOG.info("zookeeper.snapSizeLimitInKb set to a non-positive value {}; disabling feature", size); } return size * 1024; } public void setServerCnxnFactory(ServerCnxnFactory factory) { serverCnxnFactory = factory; } public ServerCnxnFactory getServerCnxnFactory() { return serverCnxnFactory; } public ServerCnxnFactory getSecureServerCnxnFactory() { return secureServerCnxnFactory; } public void setSecureServerCnxnFactory(ServerCnxnFactory factory) { secureServerCnxnFactory = factory; } /** * return the last processed id from the * datatree */ public long getLastProcessedZxid() { return zkDb.getDataTreeLastProcessedZxid(); } /** * return the outstanding requests * in the queue, which haven't been * processed yet */ public long getOutstandingRequests() { return getInProcess(); } /** * return the total number of client connections that are alive * to this server */ public int getNumAliveConnections() { int numAliveConnections = 0; if (serverCnxnFactory != null) { numAliveConnections += serverCnxnFactory.getNumAliveConnections(); } if (secureServerCnxnFactory != null) { numAliveConnections += secureServerCnxnFactory.getNumAliveConnections(); } return numAliveConnections; } /** * truncate the log to get in sync with others * if in a quorum * @param zxid the zxid that it needs to get in sync * with others * @throws IOException */ public void truncateLog(long zxid) throws IOException { this.zkDb.truncateLog(zxid); } public int getTickTime() { return tickTime; } public void setTickTime(int tickTime) { LOG.info("tickTime set to {} ms", tickTime); this.tickTime = tickTime; } public static int getThrottledOpWaitTime() { return throttledOpWaitTime; } public static void setThrottledOpWaitTime(int time) { LOG.info("throttledOpWaitTime set to {} ms", time); throttledOpWaitTime = time; } public int getMinSessionTimeout() { return minSessionTimeout; } public void setMinSessionTimeout(int min) { this.minSessionTimeout = min == -1 ? tickTime * 2 : min; LOG.info("minSessionTimeout set to {} ms", this.minSessionTimeout); } public int getMaxSessionTimeout() { return maxSessionTimeout; } public void setMaxSessionTimeout(int max) { this.maxSessionTimeout = max == -1 ? tickTime * 20 : max; LOG.info("maxSessionTimeout set to {} ms", this.maxSessionTimeout); } public int getClientPortListenBacklog() { return listenBacklog; } public void setClientPortListenBacklog(int backlog) { this.listenBacklog = backlog; LOG.info("clientPortListenBacklog set to {}", backlog); } public int getClientPort() { return serverCnxnFactory != null ? serverCnxnFactory.getLocalPort() : -1; } public int getSecureClientPort() { return secureServerCnxnFactory != null ? secureServerCnxnFactory.getLocalPort() : -1; } /** Maximum number of connections allowed from particular host (ip) */ public int getMaxClientCnxnsPerHost() { if (serverCnxnFactory != null) { return serverCnxnFactory.getMaxClientCnxnsPerHost(); } if (secureServerCnxnFactory != null) { return secureServerCnxnFactory.getMaxClientCnxnsPerHost(); } return -1; } public void setTxnLogFactory(FileTxnSnapLog txnLog) { this.txnLogFactory = txnLog; } public FileTxnSnapLog getTxnLogFactory() { return this.txnLogFactory; } /** * Returns the elapsed sync of time of transaction log in milliseconds. */ public long getTxnLogElapsedSyncTime() { return txnLogFactory.getTxnLogElapsedSyncTime(); } public String getState() { return "standalone"; } public void dumpEphemerals(PrintWriter pwriter) { zkDb.dumpEphemerals(pwriter); } public Map<Long, Set<String>> getEphemerals() { return zkDb.getEphemerals(); } public double getConnectionDropChance() { return connThrottle.getDropChance(); } public void processConnectRequest(ServerCnxn cnxn, ByteBuffer incomingBuffer) throws IOException, ClientCnxnLimitException { BinaryInputArchive bia = BinaryInputArchive.getArchive(new ByteBufferInputStream(incomingBuffer)); ConnectRequest connReq = new ConnectRequest(); connReq.deserialize(bia, "connect"); LOG.debug( "Session establishment request from client {} client's lastZxid is 0x{}", cnxn.getRemoteSocketAddress(), Long.toHexString(connReq.getLastZxidSeen())); long sessionId = connReq.getSessionId(); int tokensNeeded = 1; if (connThrottle.isConnectionWeightEnabled()) { if (sessionId == 0) { if (localSessionEnabled) { tokensNeeded = connThrottle.getRequiredTokensForLocal(); } else { tokensNeeded = connThrottle.getRequiredTokensForGlobal(); } } else { tokensNeeded = connThrottle.getRequiredTokensForRenew(); } } if (!connThrottle.checkLimit(tokensNeeded)) { throw new ClientCnxnLimitException(); } ServerMetrics.getMetrics().CONNECTION_TOKEN_DEFICIT.add(connThrottle.getDeficit()); ServerMetrics.getMetrics().CONNECTION_REQUEST_COUNT.add(1); boolean readOnly = false; try { readOnly = bia.readBool("readOnly"); cnxn.isOldClient = false; } catch (IOException e) { LOG.warn( "Connection request from old client {}; will be dropped if server is in r-o mode", cnxn.getRemoteSocketAddress()); } if (!readOnly && this instanceof ReadOnlyZooKeeperServer) { String msg = "Refusing session request for not-read-only client " + cnxn.getRemoteSocketAddress(); LOG.info(msg); throw new CloseRequestException(msg, ServerCnxn.DisconnectReason.NOT_READ_ONLY_CLIENT); } if (connReq.getLastZxidSeen() > zkDb.dataTree.lastProcessedZxid) { String msg = "Refusing session request for client " + cnxn.getRemoteSocketAddress() + " as it has seen zxid 0x" + Long.toHexString(connReq.getLastZxidSeen()) + " our last zxid is 0x" + Long.toHexString(getZKDatabase().getDataTreeLastProcessedZxid()) + " client must try another server"; LOG.info(msg); throw new CloseRequestException(msg, ServerCnxn.DisconnectReason.CLIENT_ZXID_AHEAD); } int sessionTimeout = connReq.getTimeOut(); byte[] passwd = connReq.getPasswd(); int minSessionTimeout = getMinSessionTimeout(); if (sessionTimeout < minSessionTimeout) { sessionTimeout = minSessionTimeout; } int maxSessionTimeout = getMaxSessionTimeout(); if (sessionTimeout > maxSessionTimeout) { sessionTimeout = maxSessionTimeout; } cnxn.setSessionTimeout(sessionTimeout); cnxn.disableRecv(); if (sessionId == 0) { long id = createSession(cnxn, passwd, sessionTimeout); LOG.debug( "Client attempting to establish new session: session = 0x{}, zxid = 0x{}, timeout = {}, address = {}", Long.toHexString(id), Long.toHexString(connReq.getLastZxidSeen()), connReq.getTimeOut(), cnxn.getRemoteSocketAddress()); } else { validateSession(cnxn, sessionId); LOG.debug( "Client attempting to renew session: session = 0x{}, zxid = 0x{}, timeout = {}, address = {}", Long.toHexString(sessionId), Long.toHexString(connReq.getLastZxidSeen()), connReq.getTimeOut(), cnxn.getRemoteSocketAddress()); if (serverCnxnFactory != null) { serverCnxnFactory.closeSession(sessionId, ServerCnxn.DisconnectReason.CLIENT_RECONNECT); } if (secureServerCnxnFactory != null) { secureServerCnxnFactory.closeSession(sessionId, ServerCnxn.DisconnectReason.CLIENT_RECONNECT); } cnxn.setSessionId(sessionId); reopenSession(cnxn, sessionId, passwd, sessionTimeout); ServerMetrics.getMetrics().CONNECTION_REVALIDATE_COUNT.add(1); } } /** * Validate if a particular session can be reestablished. * * @param cnxn * @param sessionId */ protected void validateSession(ServerCnxn cnxn, long sessionId) throws IOException { } public boolean shouldThrottle(long outStandingCount) { int globalOutstandingLimit = getGlobalOutstandingLimit(); if (globalOutstandingLimit < getInflight() || globalOutstandingLimit < getInProcess()) { return outStandingCount > 0; } return false; } long getFlushDelay() { return flushDelay; } static void setFlushDelay(long delay) { LOG.info("{} = {} ms", FLUSH_DELAY, delay); flushDelay = delay; } long getMaxWriteQueuePollTime() { return maxWriteQueuePollTime; } static void setMaxWriteQueuePollTime(long maxTime) { LOG.info("{} = {} ms", MAX_WRITE_QUEUE_POLL_SIZE, maxTime); maxWriteQueuePollTime = maxTime; } int getMaxBatchSize() { return maxBatchSize; } static void setMaxBatchSize(int size) { LOG.info("{}={}", MAX_BATCH_SIZE, size); maxBatchSize = size; } private void initLargeRequestThrottlingSettings() { setLargeRequestMaxBytes(Integer.getInteger("zookeeper.largeRequestMaxBytes", largeRequestMaxBytes)); setLargeRequestThreshold(Integer.getInteger("zookeeper.largeRequestThreshold", -1)); } public int getLargeRequestMaxBytes() { return largeRequestMaxBytes; } public void setLargeRequestMaxBytes(int bytes) { if (bytes <= 0) { LOG.warn("Invalid max bytes for all large requests {}. It should be a positive number.", bytes); LOG.warn("Will not change the setting. The max bytes stay at {}", largeRequestMaxBytes); } else { largeRequestMaxBytes = bytes; LOG.info("The max bytes for all large requests are set to {}", largeRequestMaxBytes); } } public int getLargeRequestThreshold() { return largeRequestThreshold; } public void setLargeRequestThreshold(int threshold) { if (threshold == 0 || threshold < -1) { LOG.warn("Invalid large request threshold {}. It should be -1 or positive. Setting to -1 ", threshold); largeRequestThreshold = -1; } else { largeRequestThreshold = threshold; LOG.info("The large request threshold is set to {}", largeRequestThreshold); } } public int getLargeRequestBytes() { return currentLargeRequestBytes.get(); } private boolean isLargeRequest(int length) { if (largeRequestThreshold == -1) { return false; } return length > largeRequestThreshold; } public boolean checkRequestSizeWhenReceivingMessage(int length) throws IOException { if (!isLargeRequest(length)) { return true; } if (currentLargeRequestBytes.get() + length <= largeRequestMaxBytes) { return true; } else { ServerMetrics.getMetrics().LARGE_REQUESTS_REJECTED.add(1); throw new IOException("Rejecting large request"); } } private boolean checkRequestSizeWhenMessageReceived(int length) throws IOException { if (!isLargeRequest(length)) { return true; } int bytes = currentLargeRequestBytes.addAndGet(length); if (bytes > largeRequestMaxBytes) { currentLargeRequestBytes.addAndGet(-length); ServerMetrics.getMetrics().LARGE_REQUESTS_REJECTED.add(1); throw new IOException("Rejecting large request"); } return true; } public void requestFinished(Request request) { int largeRequestLength = request.getLargeRequestSize(); if (largeRequestLength != -1) { currentLargeRequestBytes.addAndGet(-largeRequestLength); } } public void processPacket(ServerCnxn cnxn, ByteBuffer incomingBuffer) throws IOException { InputStream bais = new ByteBufferInputStream(incomingBuffer); BinaryInputArchive bia = BinaryInputArchive.getArchive(bais); RequestHeader h = new RequestHeader(); h.deserialize(bia, "header"); cnxn.incrOutstandingAndCheckThrottle(h); incomingBuffer = incomingBuffer.slice(); if (h.getType() == OpCode.auth) { LOG.info("got auth packet {}", cnxn.getRemoteSocketAddress()); AuthPacket authPacket = new AuthPacket(); ByteBufferInputStream.byteBuffer2Record(incomingBuffer, authPacket); String scheme = authPacket.getScheme(); ServerAuthenticationProvider ap = ProviderRegistry.getServerProvider(scheme); Code authReturn = Code.AUTHFAILED; if (ap != null) { try { authReturn = ap.handleAuthentication( new ServerAuthenticationProvider.ServerObjs(this, cnxn), authPacket.getAuth()); } catch (RuntimeException e) { LOG.warn("Caught runtime exception from AuthenticationProvider: {}", scheme, e); authReturn = Code.AUTHFAILED; } } if (authReturn == Code.OK) { LOG.info("Session 0x{}: auth success for scheme {} and address {}", Long.toHexString(cnxn.getSessionId()), scheme, cnxn.getRemoteSocketAddress()); ReplyHeader rh = new ReplyHeader(h.getXid(), 0, Code.OK.intValue()); cnxn.sendResponse(rh, null, null); } else { if (ap == null) { LOG.warn( "No authentication provider for scheme: {} has {}", scheme, ProviderRegistry.listProviders()); } else { LOG.warn("Authentication failed for scheme: {}", scheme); } ReplyHeader rh = new ReplyHeader(h.getXid(), 0, Code.AUTHFAILED.intValue()); cnxn.sendResponse(rh, null, null); cnxn.sendBuffer(ServerCnxnFactory.closeConn); cnxn.disableRecv(); } return; } else if (h.getType() == OpCode.sasl) { processSasl(incomingBuffer, cnxn, h); } else { if (!authHelper.enforceAuthentication(cnxn, h.getXid())) { return; } else { Request si = new Request(cnxn, cnxn.getSessionId(), h.getXid(), h.getType(), incomingBuffer, cnxn.getAuthInfo()); int length = incomingBuffer.limit(); if (isLargeRequest(length)) { checkRequestSizeWhenMessageReceived(length); si.setLargeRequestSize(length); } si.setOwner(ServerCnxn.me); submitRequest(si); } } } private static boolean isSaslSuperUser(String id) { if (id == null || id.isEmpty()) { return false; } Properties properties = System.getProperties(); int prefixLen = SASL_SUPER_USER.length(); for (String k : properties.stringPropertyNames()) { if (k.startsWith(SASL_SUPER_USER) && (k.length() == prefixLen || k.charAt(prefixLen) == '.')) { String value = properties.getProperty(k); if (value != null && value.equals(id)) { return true; } } } return false; } private static boolean shouldAllowSaslFailedClientsConnect() { return Boolean.getBoolean(ALLOW_SASL_FAILED_CLIENTS); } private void processSasl(ByteBuffer incomingBuffer, ServerCnxn cnxn, RequestHeader requestHeader) throws IOException { LOG.debug("Responding to client SASL token."); GetSASLRequest clientTokenRecord = new GetSASLRequest(); ByteBufferInputStream.byteBuffer2Record(incomingBuffer, clientTokenRecord); byte[] clientToken = clientTokenRecord.getToken(); LOG.debug("Size of client SASL token: {}", clientToken.length); byte[] responseToken = null; try { ZooKeeperSaslServer saslServer = cnxn.zooKeeperSaslServer; try { responseToken = saslServer.evaluateResponse(clientToken); if (saslServer.isComplete()) { String authorizationID = saslServer.getAuthorizationID(); LOG.info("Session 0x{}: adding SASL authorization for authorizationID: {}", Long.toHexString(cnxn.getSessionId()), authorizationID); cnxn.addAuthInfo(new Id("sasl", authorizationID)); if (isSaslSuperUser(authorizationID)) { cnxn.addAuthInfo(new Id("super", "")); LOG.info( "Session 0x{}: Authenticated Id '{}' as super user", Long.toHexString(cnxn.getSessionId()), authorizationID); } } } catch (SaslException e) { LOG.warn("Client {} failed to SASL authenticate: {}", cnxn.getRemoteSocketAddress(), e); if (shouldAllowSaslFailedClientsConnect() && !authHelper.isSaslAuthRequired()) { LOG.warn("Maintaining client connection despite SASL authentication failure."); } else { int error; if (authHelper.isSaslAuthRequired()) { LOG.warn( "Closing client connection due to server requires client SASL authenticaiton," + "but client SASL authentication has failed, or client is not configured with SASL " + "authentication."); error = Code.SESSIONCLOSEDREQUIRESASLAUTH.intValue(); } else { LOG.warn("Closing client connection due to SASL authentication failure."); error = Code.AUTHFAILED.intValue(); } ReplyHeader replyHeader = new ReplyHeader(requestHeader.getXid(), 0, error); cnxn.sendResponse(replyHeader, new SetSASLResponse(null), "response"); cnxn.sendCloseSession(); cnxn.disableRecv(); return; } } } catch (NullPointerException e) { LOG.error("cnxn.saslServer is null: cnxn object did not initialize its saslServer properly."); } if (responseToken != null) { LOG.debug("Size of server SASL response: {}", responseToken.length); } ReplyHeader replyHeader = new ReplyHeader(requestHeader.getXid(), 0, Code.OK.intValue()); Record record = new SetSASLResponse(responseToken); cnxn.sendResponse(replyHeader, record, "response"); } public ProcessTxnResult processTxn(TxnHeader hdr, Record txn) { processTxnForSessionEvents(null, hdr, txn); return processTxnInDB(hdr, txn, null); } public ProcessTxnResult processTxn(Request request) { TxnHeader hdr = request.getHdr(); processTxnForSessionEvents(request, hdr, request.getTxn()); final boolean writeRequest = (hdr != null); final boolean quorumRequest = request.isQuorum(); if (!writeRequest && !quorumRequest) { return new ProcessTxnResult(); } synchronized (outstandingChanges) { ProcessTxnResult rc = processTxnInDB(hdr, request.getTxn(), request.getTxnDigest()); if (writeRequest) { long zxid = hdr.getZxid(); while (!outstandingChanges.isEmpty() && outstandingChanges.peek().zxid <= zxid) { ChangeRecord cr = outstandingChanges.remove(); ServerMetrics.getMetrics().OUTSTANDING_CHANGES_REMOVED.add(1); if (cr.zxid < zxid) { LOG.warn( "Zxid outstanding 0x{} is less than current 0x{}", Long.toHexString(cr.zxid), Long.toHexString(zxid)); } if (outstandingChangesForPath.get(cr.path) == cr) { outstandingChangesForPath.remove(cr.path); } } } if (quorumRequest) { getZKDatabase().addCommittedProposal(request); } return rc; } } private void processTxnForSessionEvents(Request request, TxnHeader hdr, Record txn) { int opCode = (request == null) ? hdr.getType() : request.type; long sessionId = (request == null) ? hdr.getClientId() : request.sessionId; if (opCode == OpCode.createSession) { if (hdr != null && txn instanceof CreateSessionTxn) { CreateSessionTxn cst = (CreateSessionTxn) txn; sessionTracker.commitSession(sessionId, cst.getTimeOut()); } else if (request == null || !request.isLocalSession()) { LOG.warn("*****>>>>> Got {} {}", txn.getClass(), txn.toString()); } } else if (opCode == OpCode.closeSession) { sessionTracker.removeSession(sessionId); } } private ProcessTxnResult processTxnInDB(TxnHeader hdr, Record txn, TxnDigest digest) { if (hdr == null) { return new ProcessTxnResult(); } else { return getZKDatabase().processTxn(hdr, txn, digest); } } public Map<Long, Set<Long>> getSessionExpiryMap() { return sessionTracker.getSessionExpiryMap(); } /** * This method is used to register the ZooKeeperServerShutdownHandler to get * server's error or shutdown state change notifications. * {@link ZooKeeperServerShutdownHandler * every server state changes {@link * * @param zkShutdownHandler shutdown handler */ void registerServerShutdownHandler(ZooKeeperServerShutdownHandler zkShutdownHandler) { this.zkShutdownHandler = zkShutdownHandler; } public boolean isResponseCachingEnabled() { return isResponseCachingEnabled; } public void setResponseCachingEnabled(boolean isEnabled) { isResponseCachingEnabled = isEnabled; } public ResponseCache getReadResponseCache() { return isResponseCachingEnabled ? readResponseCache : null; } public ResponseCache getGetChildrenResponseCache() { return isResponseCachingEnabled ? getChildrenResponseCache : null; } protected void registerMetrics() { MetricsContext rootContext = ServerMetrics.getMetrics().getMetricsProvider().getRootContext(); final ZKDatabase zkdb = this.getZKDatabase(); final ServerStats stats = this.serverStats(); rootContext.registerGauge("avg_latency", stats::getAvgLatency); rootContext.registerGauge("max_latency", stats::getMaxLatency); rootContext.registerGauge("min_latency", stats::getMinLatency); rootContext.registerGauge("packets_received", stats::getPacketsReceived); rootContext.registerGauge("packets_sent", stats::getPacketsSent); rootContext.registerGauge("num_alive_connections", stats::getNumAliveClientConnections); rootContext.registerGauge("outstanding_requests", stats::getOutstandingRequests); rootContext.registerGauge("uptime", stats::getUptime); rootContext.registerGauge("znode_count", zkdb::getNodeCount); rootContext.registerGauge("watch_count", zkdb.getDataTree()::getWatchCount); rootContext.registerGauge("ephemerals_count", zkdb.getDataTree()::getEphemeralsCount); rootContext.registerGauge("approximate_data_size", zkdb.getDataTree()::cachedApproximateDataSize); rootContext.registerGauge("global_sessions", zkdb::getSessionCount); rootContext.registerGauge("local_sessions", this.getSessionTracker()::getLocalSessionCount); OSMXBean osMbean = new OSMXBean(); rootContext.registerGauge("open_file_descriptor_count", osMbean::getOpenFileDescriptorCount); rootContext.registerGauge("max_file_descriptor_count", osMbean::getMaxFileDescriptorCount); rootContext.registerGauge("connection_drop_probability", this::getConnectionDropChance); rootContext.registerGauge("last_client_response_size", stats.getClientResponseStats()::getLastBufferSize); rootContext.registerGauge("max_client_response_size", stats.getClientResponseStats()::getMaxBufferSize); rootContext.registerGauge("min_client_response_size", stats.getClientResponseStats()::getMinBufferSize); rootContext.registerGauge("outstanding_tls_handshake", this::getOutstandingHandshakeNum); rootContext.registerGauge("auth_failed_count", stats::getAuthFailedCount); rootContext.registerGauge("non_mtls_remote_conn_count", stats::getNonMTLSRemoteConnCount); rootContext.registerGauge("non_mtls_local_conn_count", stats::getNonMTLSLocalConnCount); rootContext.registerGaugeSet(QuotaMetricsUtils.QUOTA_COUNT_LIMIT_PER_NAMESPACE, () -> QuotaMetricsUtils.getQuotaCountLimit(zkDb.getDataTree())); rootContext.registerGaugeSet(QuotaMetricsUtils.QUOTA_BYTES_LIMIT_PER_NAMESPACE, () -> QuotaMetricsUtils.getQuotaBytesLimit(zkDb.getDataTree())); rootContext.registerGaugeSet(QuotaMetricsUtils.QUOTA_COUNT_USAGE_PER_NAMESPACE, () -> QuotaMetricsUtils.getQuotaCountUsage(zkDb.getDataTree())); rootContext.registerGaugeSet(QuotaMetricsUtils.QUOTA_BYTES_USAGE_PER_NAMESPACE, () -> QuotaMetricsUtils.getQuotaBytesUsage(zkDb.getDataTree())); } protected void unregisterMetrics() { MetricsContext rootContext = ServerMetrics.getMetrics().getMetricsProvider().getRootContext(); rootContext.unregisterGauge("avg_latency"); rootContext.unregisterGauge("max_latency"); rootContext.unregisterGauge("min_latency"); rootContext.unregisterGauge("packets_received"); rootContext.unregisterGauge("packets_sent"); rootContext.unregisterGauge("num_alive_connections"); rootContext.unregisterGauge("outstanding_requests"); rootContext.unregisterGauge("uptime"); rootContext.unregisterGauge("znode_count"); rootContext.unregisterGauge("watch_count"); rootContext.unregisterGauge("ephemerals_count"); rootContext.unregisterGauge("approximate_data_size"); rootContext.unregisterGauge("global_sessions"); rootContext.unregisterGauge("local_sessions"); rootContext.unregisterGauge("open_file_descriptor_count"); rootContext.unregisterGauge("max_file_descriptor_count"); rootContext.unregisterGauge("connection_drop_probability"); rootContext.unregisterGauge("last_client_response_size"); rootContext.unregisterGauge("max_client_response_size"); rootContext.unregisterGauge("min_client_response_size"); rootContext.unregisterGauge("auth_failed_count"); rootContext.unregisterGauge("non_mtls_remote_conn_count"); rootContext.unregisterGauge("non_mtls_local_conn_count"); rootContext.unregisterGaugeSet(QuotaMetricsUtils.QUOTA_COUNT_LIMIT_PER_NAMESPACE); rootContext.unregisterGaugeSet(QuotaMetricsUtils.QUOTA_BYTES_LIMIT_PER_NAMESPACE); rootContext.unregisterGaugeSet(QuotaMetricsUtils.QUOTA_COUNT_USAGE_PER_NAMESPACE); rootContext.unregisterGaugeSet(QuotaMetricsUtils.QUOTA_BYTES_USAGE_PER_NAMESPACE); } /** * Hook into admin server, useful to expose additional data * that do not represent metrics. * * @param response a sink which collects the data. */ public void dumpMonitorValues(BiConsumer<String, Object> response) { ServerStats stats = serverStats(); response.accept("version", Version.getFullVersion()); response.accept("server_state", stats.getServerState()); } /** * Grant or deny authorization to an operation on a node as a function of: * @param cnxn : the server connection * @param acl : set of ACLs for the node * @param perm : the permission that the client is requesting * @param ids : the credentials supplied by the client * @param path : the ZNode path * @param setAcls : for set ACL operations, the list of ACLs being set. Otherwise null. */ public void checkACL(ServerCnxn cnxn, List<ACL> acl, int perm, List<Id> ids, String path, List<ACL> setAcls) throws KeeperException.NoAuthException { if (skipACL) { return; } LOG.debug("Permission requested: {} ", perm); LOG.debug("ACLs for node: {}", acl); LOG.debug("Client credentials: {}", ids); if (acl == null || acl.size() == 0) { return; } for (Id authId : ids) { if (authId.getScheme().equals("super")) { return; } } for (ACL a : acl) { Id id = a.getId(); if ((a.getPerms() & perm) != 0) { if (id.getScheme().equals("world") && id.getId().equals("anyone")) { return; } ServerAuthenticationProvider ap = ProviderRegistry.getServerProvider(id.getScheme()); if (ap != null) { for (Id authId : ids) { if (authId.getScheme().equals(id.getScheme()) && ap.matches( new ServerAuthenticationProvider.ServerObjs(this, cnxn), new ServerAuthenticationProvider.MatchValues(path, authId.getId(), id.getId(), perm, setAcls))) { return; } } } } } throw new KeeperException.NoAuthException(); } /** * check a path whether exceeded the quota. * * @param path * the path of the node, used for the quota prefix check * @param lastData * the current node data, {@code null} for none * @param data * the data to be set, or {@code null} for none * @param type * currently, create and setData need to check quota */ public void checkQuota(String path, byte[] lastData, byte[] data, int type) throws KeeperException.QuotaExceededException { if (!enforceQuota) { return; } long dataBytes = (data == null) ? 0 : data.length; ZKDatabase zkDatabase = getZKDatabase(); String lastPrefix = zkDatabase.getDataTree().getMaxPrefixWithQuota(path); if (StringUtils.isEmpty(lastPrefix)) { return; } final String namespace = PathUtils.getTopNamespace(path); switch (type) { case OpCode.create: checkQuota(lastPrefix, dataBytes, 1, namespace); break; case OpCode.setData: checkQuota(lastPrefix, dataBytes - (lastData == null ? 0 : lastData.length), 0, namespace); break; default: throw new IllegalArgumentException("Unsupported OpCode for checkQuota: " + type); } } /** * check a path whether exceeded the quota. * * @param lastPrefix the path of the node which has a quota. * @param bytesDiff * the diff to be added to number of bytes * @param countDiff * the diff to be added to the count * @param namespace * the namespace for collecting quota exceeded errors */ private void checkQuota(String lastPrefix, long bytesDiff, long countDiff, String namespace) throws KeeperException.QuotaExceededException { LOG.debug("checkQuota: lastPrefix={}, bytesDiff={}, countDiff={}", lastPrefix, bytesDiff, countDiff); String limitNode = Quotas.limitPath(lastPrefix); DataNode node = getZKDatabase().getNode(limitNode); StatsTrack limitStats; if (node == null) { LOG.error("Missing limit node for quota {}", limitNode); return; } synchronized (node) { limitStats = new StatsTrack(node.data); } boolean checkCountQuota = countDiff != 0 && (limitStats.getCount() > -1 || limitStats.getCountHardLimit() > -1); boolean checkByteQuota = bytesDiff != 0 && (limitStats.getBytes() > -1 || limitStats.getByteHardLimit() > -1); if (!checkCountQuota && !checkByteQuota) { return; } String statNode = Quotas.statPath(lastPrefix); node = getZKDatabase().getNode(statNode); StatsTrack currentStats; if (node == null) { LOG.error("Missing node for stat {}", statNode); return; } synchronized (node) { currentStats = new StatsTrack(node.data); } if (checkCountQuota) { long newCount = currentStats.getCount() + countDiff; boolean isCountHardLimit = limitStats.getCountHardLimit() > -1; long countLimit = isCountHardLimit ? limitStats.getCountHardLimit() : limitStats.getCount(); if (newCount > countLimit) { String msg = "Quota exceeded: " + lastPrefix + " [current count=" + newCount + ", " + (isCountHardLimit ? "hard" : "soft") + "CountLimit=" + countLimit + "]"; RATE_LOGGER.rateLimitLog(msg); if (isCountHardLimit) { updateQuotaExceededMetrics(namespace); throw new KeeperException.QuotaExceededException(lastPrefix); } } } if (checkByteQuota) { long newBytes = currentStats.getBytes() + bytesDiff; boolean isByteHardLimit = limitStats.getByteHardLimit() > -1; long byteLimit = isByteHardLimit ? limitStats.getByteHardLimit() : limitStats.getBytes(); if (newBytes > byteLimit) { String msg = "Quota exceeded: " + lastPrefix + " [current bytes=" + newBytes + ", " + (isByteHardLimit ? "hard" : "soft") + "ByteLimit=" + byteLimit + "]"; RATE_LOGGER.rateLimitLog(msg); if (isByteHardLimit) { updateQuotaExceededMetrics(namespace); throw new KeeperException.QuotaExceededException(lastPrefix); } } } } public static boolean isDigestEnabled() { return digestEnabled; } public static void setDigestEnabled(boolean digestEnabled) { LOG.info("{} = {}", ZOOKEEPER_DIGEST_ENABLED, digestEnabled); ZooKeeperServer.digestEnabled = digestEnabled; } /** * Trim a path to get the immediate predecessor. * * @param path * @return * @throws KeeperException.BadArgumentsException */ private String parentPath(String path) throws KeeperException.BadArgumentsException { int lastSlash = path.lastIndexOf('/'); if (lastSlash == -1 || path.indexOf('\0') != -1 || getZKDatabase().isSpecialPath(path)) { throw new KeeperException.BadArgumentsException(path); } return lastSlash == 0 ? "/" : path.substring(0, lastSlash); } private String effectiveACLPath(Request request) throws KeeperException.BadArgumentsException, KeeperException.InvalidACLException { boolean mustCheckACL = false; String path = null; List<ACL> acl = null; switch (request.type) { case OpCode.create: case OpCode.create2: { CreateRequest req = new CreateRequest(); if (buffer2Record(request.request, req)) { mustCheckACL = true; acl = req.getAcl(); path = parentPath(req.getPath()); } break; } case OpCode.delete: { DeleteRequest req = new DeleteRequest(); if (buffer2Record(request.request, req)) { path = parentPath(req.getPath()); } break; } case OpCode.setData: { SetDataRequest req = new SetDataRequest(); if (buffer2Record(request.request, req)) { path = req.getPath(); } break; } case OpCode.setACL: { SetACLRequest req = new SetACLRequest(); if (buffer2Record(request.request, req)) { mustCheckACL = true; acl = req.getAcl(); path = req.getPath(); } break; } } if (mustCheckACL) { /* we ignore the extrapolated ACL returned by fixupACL because * we only care about it being well-formed (and if it isn't, an * exception will be raised). */ PrepRequestProcessor.fixupACL(path, request.authInfo, acl); } return path; } private int effectiveACLPerms(Request request) { switch (request.type) { case OpCode.create: case OpCode.create2: return ZooDefs.Perms.CREATE; case OpCode.delete: return ZooDefs.Perms.DELETE; case OpCode.setData: return ZooDefs.Perms.WRITE; case OpCode.setACL: return ZooDefs.Perms.ADMIN; default: return ZooDefs.Perms.ALL; } } /** * Check Write Requests for Potential Access Restrictions * <p> * Before a request is being proposed to the quorum, lets check it * against local ACLs. Non-write requests (read, session, etc.) * are passed along. Invalid requests are sent a response. * <p> * While we are at it, if the request will set an ACL: make sure it's * a valid one. * * @param request * @return true if request is permitted, false if not. */ public boolean authWriteRequest(Request request) { int err; String pathToCheck; if (!enableEagerACLCheck) { return true; } err = Code.OK.intValue(); try { pathToCheck = effectiveACLPath(request); if (pathToCheck != null) { checkACL(request.cnxn, zkDb.getACL(pathToCheck, null), effectiveACLPerms(request), request.authInfo, pathToCheck, null); } } catch (KeeperException.NoAuthException e) { LOG.debug("Request failed ACL check", e); err = e.code().intValue(); } catch (KeeperException.InvalidACLException e) { LOG.debug("Request has an invalid ACL check", e); err = e.code().intValue(); } catch (KeeperException.NoNodeException e) { LOG.debug("ACL check against non-existent node: {}", e.getMessage()); } catch (KeeperException.BadArgumentsException e) { LOG.debug("ACL check against illegal node path: {}", e.getMessage()); } catch (Throwable t) { LOG.error("Uncaught exception in authWriteRequest with: ", t); throw t; } finally { if (err != Code.OK.intValue()) { /* This request has a bad ACL, so we are dismissing it early. */ decInProcess(); ReplyHeader rh = new ReplyHeader(request.cxid, 0, err); try { request.cnxn.sendResponse(rh, null, null); } catch (IOException e) { LOG.error("IOException : {}", e); } } } return err == Code.OK.intValue(); } private boolean buffer2Record(ByteBuffer request, Record record) { boolean rv = false; try { ByteBufferInputStream.byteBuffer2Record(request, record); request.rewind(); rv = true; } catch (IOException ex) { } return rv; } public int getOutstandingHandshakeNum() { if (serverCnxnFactory instanceof NettyServerCnxnFactory) { return ((NettyServerCnxnFactory) serverCnxnFactory).getOutstandingHandshakeNum(); } else { return 0; } } public boolean isReconfigEnabled() { return this.reconfigEnabled; } public ZooKeeperServerShutdownHandler getZkShutdownHandler() { return zkShutdownHandler; } static void updateQuotaExceededMetrics(final String namespace) { if (namespace == null) { return; } ServerMetrics.getMetrics().QUOTA_EXCEEDED_ERROR_PER_NAMESPACE.add(namespace, 1); } }
class ZooKeeperServer implements SessionExpirer, ServerStats.Provider { protected static final Logger LOG; private static final RateLogger RATE_LOGGER; public static final String GLOBAL_OUTSTANDING_LIMIT = "zookeeper.globalOutstandingLimit"; public static final String ENABLE_EAGER_ACL_CHECK = "zookeeper.enableEagerACLCheck"; public static final String SKIP_ACL = "zookeeper.skipACL"; public static final String ENFORCE_QUOTA = "zookeeper.enforceQuota"; static final boolean enableEagerACLCheck; static final boolean skipACL; public static final boolean enforceQuota; public static final String SASL_SUPER_USER = "zookeeper.superUser"; public static final String ALLOW_SASL_FAILED_CLIENTS = "zookeeper.allowSaslFailedClients"; public static final String ZOOKEEPER_DIGEST_ENABLED = "zookeeper.digest.enabled"; private static boolean digestEnabled; public static final String CLOSE_SESSION_TXN_ENABLED = "zookeeper.closeSessionTxn.enabled"; private static boolean closeSessionTxnEnabled = true; static { LOG = LoggerFactory.getLogger(ZooKeeperServer.class); RATE_LOGGER = new RateLogger(LOG); ZookeeperBanner.printBanner(LOG); Environment.logEnv("Server environment:", LOG); enableEagerACLCheck = Boolean.getBoolean(ENABLE_EAGER_ACL_CHECK); LOG.info("{} = {}", ENABLE_EAGER_ACL_CHECK, enableEagerACLCheck); skipACL = System.getProperty(SKIP_ACL, "no").equals("yes"); if (skipACL) { LOG.info("{}==\"yes\", ACL checks will be skipped", SKIP_ACL); } enforceQuota = Boolean.parseBoolean(System.getProperty(ENFORCE_QUOTA, "false")); if (enforceQuota) { LOG.info("{} = {}, Quota Enforce enables", ENFORCE_QUOTA, enforceQuota); } digestEnabled = Boolean.parseBoolean(System.getProperty(ZOOKEEPER_DIGEST_ENABLED, "true")); LOG.info("{} = {}", ZOOKEEPER_DIGEST_ENABLED, digestEnabled); closeSessionTxnEnabled = Boolean.parseBoolean( System.getProperty(CLOSE_SESSION_TXN_ENABLED, "true")); LOG.info("{} = {}", CLOSE_SESSION_TXN_ENABLED, closeSessionTxnEnabled); } public static boolean isCloseSessionTxnEnabled() { return closeSessionTxnEnabled; } public static void setCloseSessionTxnEnabled(boolean enabled) { ZooKeeperServer.closeSessionTxnEnabled = enabled; LOG.info("Update {} to {}", CLOSE_SESSION_TXN_ENABLED, ZooKeeperServer.closeSessionTxnEnabled); } protected ZooKeeperServerBean jmxServerBean; protected DataTreeBean jmxDataTreeBean; public static final int DEFAULT_TICK_TIME = 3000; protected int tickTime = DEFAULT_TICK_TIME; public static final int DEFAULT_THROTTLED_OP_WAIT_TIME = 0; protected static volatile int throttledOpWaitTime = Integer.getInteger("zookeeper.throttled_op_wait_time", DEFAULT_THROTTLED_OP_WAIT_TIME); /** value of -1 indicates unset, use default */ protected int minSessionTimeout = -1; /** value of -1 indicates unset, use default */ protected int maxSessionTimeout = -1; /** Socket listen backlog. Value of -1 indicates unset */ protected int listenBacklog = -1; protected SessionTracker sessionTracker; private FileTxnSnapLog txnLogFactory = null; private ZKDatabase zkDb; private ResponseCache readResponseCache; private ResponseCache getChildrenResponseCache; private final AtomicLong hzxid = new AtomicLong(0); public static final Exception ok = new Exception("No prob"); protected RequestProcessor firstProcessor; protected JvmPauseMonitor jvmPauseMonitor; protected volatile State state = State.INITIAL; private boolean isResponseCachingEnabled = true; /* contains the configuration file content read at startup */ protected String initialConfig; protected boolean reconfigEnabled; private final RequestPathMetricsCollector requestPathMetricsCollector; private static final int DEFAULT_SNAP_COUNT = 100000; private static final int DEFAULT_GLOBAL_OUTSTANDING_LIMIT = 1000; private boolean localSessionEnabled = false; protected enum State { INITIAL, RUNNING, SHUTDOWN, ERROR } /** * This is the secret that we use to generate passwords. For the moment, * it's more of a checksum that's used in reconnection, which carries no * security weight, and is treated internally as if it carries no * security weight. */ private static final long superSecret = 0XB3415C00L; private final AtomicInteger requestsInProcess = new AtomicInteger(0); final Deque<ChangeRecord> outstandingChanges = new ArrayDeque<>(); final Map<String, ChangeRecord> outstandingChangesForPath = new HashMap<String, ChangeRecord>(); protected ServerCnxnFactory serverCnxnFactory; protected ServerCnxnFactory secureServerCnxnFactory; private final ServerStats serverStats; private final ZooKeeperServerListener listener; private ZooKeeperServerShutdownHandler zkShutdownHandler; private volatile int createSessionTrackerServerId = 1; private static final String FLUSH_DELAY = "zookeeper.flushDelay"; private static volatile long flushDelay; private static final String MAX_WRITE_QUEUE_POLL_SIZE = "zookeeper.maxWriteQueuePollTime"; private static volatile long maxWriteQueuePollTime; private static final String MAX_BATCH_SIZE = "zookeeper.maxBatchSize"; private static volatile int maxBatchSize; /** * Starting size of read and write ByteArroyOuputBuffers. Default is 32 bytes. * Flag not used for small transfers like connectResponses. */ public static final String INT_BUFFER_STARTING_SIZE_BYTES = "zookeeper.intBufferStartingSizeBytes"; public static final int DEFAULT_STARTING_BUFFER_SIZE = 1024; public static final int intBufferStartingSizeBytes; public static final String GET_DATA_RESPONSE_CACHE_SIZE = "zookeeper.maxResponseCacheSize"; public static final String GET_CHILDREN_RESPONSE_CACHE_SIZE = "zookeeper.maxGetChildrenResponseCacheSize"; static { long configuredFlushDelay = Long.getLong(FLUSH_DELAY, 0); setFlushDelay(configuredFlushDelay); setMaxWriteQueuePollTime(Long.getLong(MAX_WRITE_QUEUE_POLL_SIZE, configuredFlushDelay / 3)); setMaxBatchSize(Integer.getInteger(MAX_BATCH_SIZE, 1000)); intBufferStartingSizeBytes = Integer.getInteger(INT_BUFFER_STARTING_SIZE_BYTES, DEFAULT_STARTING_BUFFER_SIZE); if (intBufferStartingSizeBytes < 32) { String msg = "Buffer starting size (" + intBufferStartingSizeBytes + ") must be greater than or equal to 32. " + "Configure with \"-Dzookeeper.intBufferStartingSizeBytes=<size>\" "; LOG.error(msg); throw new IllegalArgumentException(msg); } LOG.info("{} = {}", INT_BUFFER_STARTING_SIZE_BYTES, intBufferStartingSizeBytes); } private BlueThrottle connThrottle = new BlueThrottle(); private RequestThrottler requestThrottler; public static final String SNAP_COUNT = "zookeeper.snapCount"; /** * This setting sets a limit on the total number of large requests that * can be inflight and is designed to prevent ZooKeeper from accepting * too many large requests such that the JVM runs out of usable heap and * ultimately crashes. * * The limit is enforced by the {@link checkRequestSize(int, boolean)} * method which is called by the connection layer ({@link NIOServerCnxn}, * {@link NettyServerCnxn}) before allocating a byte buffer and pulling * data off the TCP socket. The limit is then checked again by the * ZooKeeper server in {@link processPacket(ServerCnxn, ByteBuffer)} which * also atomically updates {@link currentLargeRequestBytes}. The request is * then marked as a large request, with the request size stored in the Request * object so that it can later be decremented from {@link currentLargeRequestsBytes}. * * When a request is completed or dropped, the relevant code path calls the * {@link requestFinished(Request)} method which performs the decrement if * needed. */ private volatile int largeRequestMaxBytes = 100 * 1024 * 1024; /** * The size threshold after which a request is considered a large request * and is checked against the large request byte limit. */ private volatile int largeRequestThreshold = -1; private final AtomicInteger currentLargeRequestBytes = new AtomicInteger(0); private AuthenticationHelper authHelper; void removeCnxn(ServerCnxn cnxn) { zkDb.removeCnxn(cnxn); } /** * Creates a ZooKeeperServer instance. Nothing is setup, use the setX * methods to prepare the instance (eg datadir, datalogdir, ticktime, * builder, etc...) * */ public ZooKeeperServer() { listener = new ZooKeeperServerListenerImpl(this); serverStats = new ServerStats(this); this.requestPathMetricsCollector = new RequestPathMetricsCollector(); this.authHelper = new AuthenticationHelper(); } /** * Keeping this constructor for backward compatibility */ public ZooKeeperServer(FileTxnSnapLog txnLogFactory, int tickTime, int minSessionTimeout, int maxSessionTimeout, int clientPortListenBacklog, ZKDatabase zkDb, String initialConfig) { this(txnLogFactory, tickTime, minSessionTimeout, maxSessionTimeout, clientPortListenBacklog, zkDb, initialConfig, QuorumPeerConfig.isReconfigEnabled()); } /** * * Creates a ZooKeeperServer instance. It sets everything up, but doesn't * actually start listening for clients until run() is invoked. * */ public String getInitialConfig() { return initialConfig; } /** * Adds JvmPauseMonitor and calls * {@link * */ public ZooKeeperServer(JvmPauseMonitor jvmPauseMonitor, FileTxnSnapLog txnLogFactory, int tickTime, int minSessionTimeout, int maxSessionTimeout, int clientPortListenBacklog, ZKDatabase zkDb, String initialConfig) { this(txnLogFactory, tickTime, minSessionTimeout, maxSessionTimeout, clientPortListenBacklog, zkDb, initialConfig, QuorumPeerConfig.isReconfigEnabled()); this.jvmPauseMonitor = jvmPauseMonitor; if (jvmPauseMonitor != null) { LOG.info("Added JvmPauseMonitor to server"); } } /** * creates a zookeeperserver instance. * @param txnLogFactory the file transaction snapshot logging class * @param tickTime the ticktime for the server */ public ZooKeeperServer(FileTxnSnapLog txnLogFactory, int tickTime, String initialConfig) { this(txnLogFactory, tickTime, -1, -1, -1, new ZKDatabase(txnLogFactory), initialConfig, QuorumPeerConfig.isReconfigEnabled()); } public ServerStats serverStats() { return serverStats; } public RequestPathMetricsCollector getRequestPathMetricsCollector() { return requestPathMetricsCollector; } public BlueThrottle connThrottle() { return connThrottle; } public void dumpConf(PrintWriter pwriter) { pwriter.print("clientPort="); pwriter.println(getClientPort()); pwriter.print("secureClientPort="); pwriter.println(getSecureClientPort()); pwriter.print("dataDir="); pwriter.println(zkDb.snapLog.getSnapDir().getAbsolutePath()); pwriter.print("dataDirSize="); pwriter.println(getDataDirSize()); pwriter.print("dataLogDir="); pwriter.println(zkDb.snapLog.getDataDir().getAbsolutePath()); pwriter.print("dataLogSize="); pwriter.println(getLogDirSize()); pwriter.print("tickTime="); pwriter.println(getTickTime()); pwriter.print("maxClientCnxns="); pwriter.println(getMaxClientCnxnsPerHost()); pwriter.print("minSessionTimeout="); pwriter.println(getMinSessionTimeout()); pwriter.print("maxSessionTimeout="); pwriter.println(getMaxSessionTimeout()); pwriter.print("clientPortListenBacklog="); pwriter.println(getClientPortListenBacklog()); pwriter.print("serverId="); pwriter.println(getServerId()); } public ZooKeeperServerConf getConf() { return new ZooKeeperServerConf( getClientPort(), zkDb.snapLog.getSnapDir().getAbsolutePath(), zkDb.snapLog.getDataDir().getAbsolutePath(), getTickTime(), getMaxClientCnxnsPerHost(), getMinSessionTimeout(), getMaxSessionTimeout(), getServerId(), getClientPortListenBacklog()); } /** * This constructor is for backward compatibility with the existing unit * test code. * It defaults to FileLogProvider persistence provider. */ public ZooKeeperServer(File snapDir, File logDir, int tickTime) throws IOException { this(new FileTxnSnapLog(snapDir, logDir), tickTime, ""); } /** * Default constructor, relies on the config for its argument values * * @throws IOException */ public ZooKeeperServer(FileTxnSnapLog txnLogFactory) throws IOException { this(txnLogFactory, DEFAULT_TICK_TIME, -1, -1, -1, new ZKDatabase(txnLogFactory), "", QuorumPeerConfig.isReconfigEnabled()); } /** * get the zookeeper database for this server * @return the zookeeper database for this server */ public ZKDatabase getZKDatabase() { return this.zkDb; } /** * set the zkdatabase for this zookeeper server * @param zkDb */ public void setZKDatabase(ZKDatabase zkDb) { this.zkDb = zkDb; } /** * Restore sessions and data */ public void loadData() throws IOException, InterruptedException { /* * When a new leader starts executing Leader * invokes this method. The database, however, has been * initialized before running leader election so that * the server could pick its zxid for its initial vote. * It does it by invoking QuorumPeer * Consequently, we don't need to initialize it once more * and avoid the penalty of loading it a second time. Not * reloading it is particularly important for applications * that host a large database. * * The following if block checks whether the database has * been initialized or not. Note that this method is * invoked by at least one other method: * ZooKeeperServer * * See ZOOKEEPER-1642 for more detail. */ if (zkDb.isInitialized()) { setZxid(zkDb.getDataTreeLastProcessedZxid()); } else { setZxid(zkDb.loadDataBase()); } zkDb.getSessions().stream() .filter(session -> zkDb.getSessionWithTimeOuts().get(session) == null) .forEach(session -> killSession(session, zkDb.getDataTreeLastProcessedZxid())); takeSnapshot(); } public void takeSnapshot() { takeSnapshot(false); } public void takeSnapshot(boolean syncSnap) { long start = Time.currentElapsedTime(); try { txnLogFactory.save(zkDb.getDataTree(), zkDb.getSessionWithTimeOuts(), syncSnap); } catch (IOException e) { LOG.error("Severe unrecoverable error, exiting", e); ServiceUtils.requestSystemExit(ExitCode.TXNLOG_ERROR_TAKING_SNAPSHOT.getValue()); } long elapsed = Time.currentElapsedTime() - start; LOG.info("Snapshot taken in {} ms", elapsed); ServerMetrics.getMetrics().SNAPSHOT_TIME.add(elapsed); } public boolean shouldForceWriteInitialSnapshotAfterLeaderElection() { return txnLogFactory.shouldForceWriteInitialSnapshotAfterLeaderElection(); } @Override public long getDataDirSize() { if (zkDb == null) { return 0L; } File path = zkDb.snapLog.getDataDir(); return getDirSize(path); } @Override public long getLogDirSize() { if (zkDb == null) { return 0L; } File path = zkDb.snapLog.getSnapDir(); return getDirSize(path); } private long getDirSize(File file) { long size = 0L; if (file.isDirectory()) { File[] files = file.listFiles(); if (files != null) { for (File f : files) { size += getDirSize(f); } } } else { size = file.length(); } return size; } public long getZxid() { return hzxid.get(); } public SessionTracker getSessionTracker() { return sessionTracker; } long getNextZxid() { return hzxid.incrementAndGet(); } public void setZxid(long zxid) { hzxid.set(zxid); } private void close(long sessionId) { Request si = new Request(null, sessionId, 0, OpCode.closeSession, null, null); submitRequest(si); } public void closeSession(long sessionId) { LOG.info("Closing session 0x{}", Long.toHexString(sessionId)); close(sessionId); } protected void killSession(long sessionId, long zxid) { zkDb.killSession(sessionId, zxid); if (LOG.isTraceEnabled()) { ZooTrace.logTraceMessage( LOG, ZooTrace.SESSION_TRACE_MASK, "ZooKeeperServer --- killSession: 0x" + Long.toHexString(sessionId)); } if (sessionTracker != null) { sessionTracker.removeSession(sessionId); } } public void expire(Session session) { long sessionId = session.getSessionId(); LOG.info( "Expiring session 0x{}, timeout of {}ms exceeded", Long.toHexString(sessionId), session.getTimeout()); close(sessionId); } public void expire(long sessionId) { LOG.info("forcibly expiring session 0x{}", Long.toHexString(sessionId)); close(sessionId); } public static class MissingSessionException extends IOException { private static final long serialVersionUID = 7467414635467261007L; public MissingSessionException(String msg) { super(msg); } } void touch(ServerCnxn cnxn) throws MissingSessionException { if (cnxn == null) { return; } long id = cnxn.getSessionId(); int to = cnxn.getSessionTimeout(); if (!sessionTracker.touchSession(id, to)) { throw new MissingSessionException("No session with sessionid 0x" + Long.toHexString(id) + " exists, probably expired and removed"); } } protected void registerJMX() { try { jmxServerBean = new ZooKeeperServerBean(this); MBeanRegistry.getInstance().register(jmxServerBean, null); try { jmxDataTreeBean = new DataTreeBean(zkDb.getDataTree()); MBeanRegistry.getInstance().register(jmxDataTreeBean, jmxServerBean); } catch (Exception e) { LOG.warn("Failed to register with JMX", e); jmxDataTreeBean = null; } } catch (Exception e) { LOG.warn("Failed to register with JMX", e); jmxServerBean = null; } } public void startdata() throws IOException, InterruptedException { if (zkDb == null) { zkDb = new ZKDatabase(this.txnLogFactory); } if (!zkDb.isInitialized()) { loadData(); } } public synchronized void startup() { startupWithServerState(State.RUNNING); } public synchronized void startupWithoutServing() { startupWithServerState(State.INITIAL); } public synchronized void startServing() { setState(State.RUNNING); notifyAll(); } private void startupWithServerState(State state) { if (sessionTracker == null) { createSessionTracker(); } startSessionTracker(); setupRequestProcessors(); startRequestThrottler(); registerJMX(); startJvmPauseMonitor(); registerMetrics(); setState(state); requestPathMetricsCollector.start(); localSessionEnabled = sessionTracker.isLocalSessionsEnabled(); notifyAll(); } protected void startJvmPauseMonitor() { if (this.jvmPauseMonitor != null) { this.jvmPauseMonitor.serviceStart(); } } protected void startRequestThrottler() { requestThrottler = new RequestThrottler(this); requestThrottler.start(); } protected void setupRequestProcessors() { RequestProcessor finalProcessor = new FinalRequestProcessor(this); RequestProcessor syncProcessor = new SyncRequestProcessor(this, finalProcessor); ((SyncRequestProcessor) syncProcessor).start(); firstProcessor = new PrepRequestProcessor(this, syncProcessor); ((PrepRequestProcessor) firstProcessor).start(); } public ZooKeeperServerListener getZooKeeperServerListener() { return listener; } /** * Change the server ID used by {@link * {@link * * @param newId ID to use */ public void setCreateSessionTrackerServerId(int newId) { createSessionTrackerServerId = newId; } protected void createSessionTracker() { sessionTracker = new SessionTrackerImpl(this, zkDb.getSessionWithTimeOuts(), tickTime, createSessionTrackerServerId, getZooKeeperServerListener()); } protected void startSessionTracker() { ((SessionTrackerImpl) sessionTracker).start(); } /** * Sets the state of ZooKeeper server. After changing the state, it notifies * the server state change to a registered shutdown handler, if any. * <p> * The following are the server state transitions: * <ul><li>During startup the server will be in the INITIAL state.</li> * <li>After successfully starting, the server sets the state to RUNNING. * </li> * <li>The server transitions to the ERROR state if it hits an internal * error. {@link ZooKeeperServerListenerImpl} notifies any critical resource * error events, e.g., SyncRequestProcessor not being able to write a txn to * disk.</li> * <li>During shutdown the server sets the state to SHUTDOWN, which * corresponds to the server not running.</li></ul> * * @param state new server state. */ protected void setState(State state) { this.state = state; if (zkShutdownHandler != null) { zkShutdownHandler.handle(state); } else { LOG.debug( "ZKShutdownHandler is not registered, so ZooKeeper server" + " won't take any action on ERROR or SHUTDOWN server state changes"); } } /** * This can be used while shutting down the server to see whether the server * is already shutdown or not. * * @return true if the server is running or server hits an error, false * otherwise. */ protected boolean canShutdown() { return state == State.RUNNING || state == State.ERROR; } /** * @return true if the server is running, false otherwise. */ public boolean isRunning() { return state == State.RUNNING; } public void shutdown() { shutdown(false); } /** * Shut down the server instance * @param fullyShutDown true if another server using the same database will not replace this one in the same process */ public synchronized void shutdown(boolean fullyShutDown) { if (!canShutdown()) { if (fullyShutDown && zkDb != null) { zkDb.clear(); } LOG.debug("ZooKeeper server is not running, so not proceeding to shutdown!"); return; } LOG.info("shutting down"); setState(State.SHUTDOWN); unregisterMetrics(); if (requestThrottler != null) { requestThrottler.shutdown(); } if (sessionTracker != null) { sessionTracker.shutdown(); } if (firstProcessor != null) { firstProcessor.shutdown(); } if (jvmPauseMonitor != null) { jvmPauseMonitor.serviceStop(); } if (zkDb != null) { if (fullyShutDown) { zkDb.clear(); } else { try { zkDb.fastForwardDataBase(); } catch (IOException e) { LOG.error("Error updating DB", e); zkDb.clear(); } } } requestPathMetricsCollector.shutdown(); unregisterJMX(); } protected void unregisterJMX() { try { if (jmxDataTreeBean != null) { MBeanRegistry.getInstance().unregister(jmxDataTreeBean); } } catch (Exception e) { LOG.warn("Failed to unregister with JMX", e); } try { if (jmxServerBean != null) { MBeanRegistry.getInstance().unregister(jmxServerBean); } } catch (Exception e) { LOG.warn("Failed to unregister with JMX", e); } jmxServerBean = null; jmxDataTreeBean = null; } public void incInProcess() { requestsInProcess.incrementAndGet(); } public void decInProcess() { requestsInProcess.decrementAndGet(); if (requestThrottler != null) { requestThrottler.throttleWake(); } } public int getInProcess() { return requestsInProcess.get(); } public int getInflight() { return requestThrottleInflight(); } private int requestThrottleInflight() { if (requestThrottler != null) { return requestThrottler.getInflight(); } return 0; } static class PrecalculatedDigest { final long nodeDigest; final long treeDigest; PrecalculatedDigest(long nodeDigest, long treeDigest) { this.nodeDigest = nodeDigest; this.treeDigest = treeDigest; } } /** * This structure is used to facilitate information sharing between PrepRP * and FinalRP. */ static class ChangeRecord { PrecalculatedDigest precalculatedDigest; byte[] data; ChangeRecord(long zxid, String path, StatPersisted stat, int childCount, List<ACL> acl) { this.zxid = zxid; this.path = path; this.stat = stat; this.childCount = childCount; this.acl = acl; } long zxid; String path; StatPersisted stat; /* Make sure to create a new object when changing */ int childCount; List<ACL> acl; /* Make sure to create a new object when changing */ ChangeRecord duplicate(long zxid) { StatPersisted stat = new StatPersisted(); if (this.stat != null) { DataTree.copyStatPersisted(this.stat, stat); } ChangeRecord changeRecord = new ChangeRecord(zxid, path, stat, childCount, acl == null ? new ArrayList<>() : new ArrayList<>(acl)); changeRecord.precalculatedDigest = precalculatedDigest; changeRecord.data = data; return changeRecord; } } byte[] generatePasswd(long id) { Random r = new Random(id ^ superSecret); byte[] p = new byte[16]; r.nextBytes(p); return p; } protected boolean checkPasswd(long sessionId, byte[] passwd) { return sessionId != 0 && Arrays.equals(passwd, generatePasswd(sessionId)); } long createSession(ServerCnxn cnxn, byte[] passwd, int timeout) { if (passwd == null) { passwd = new byte[0]; } long sessionId = sessionTracker.createSession(timeout); Random r = new Random(sessionId ^ superSecret); r.nextBytes(passwd); ByteBuffer to = ByteBuffer.allocate(4); to.putInt(timeout); cnxn.setSessionId(sessionId); Request si = new Request(cnxn, sessionId, 0, OpCode.createSession, to, null); submitRequest(si); return sessionId; } /** * set the owner of this session as owner * @param id the session id * @param owner the owner of the session * @throws SessionExpiredException */ public void setOwner(long id, Object owner) throws SessionExpiredException { sessionTracker.setOwner(id, owner); } protected void revalidateSession(ServerCnxn cnxn, long sessionId, int sessionTimeout) throws IOException { boolean rc = sessionTracker.touchSession(sessionId, sessionTimeout); if (LOG.isTraceEnabled()) { ZooTrace.logTraceMessage( LOG, ZooTrace.SESSION_TRACE_MASK, "Session 0x" + Long.toHexString(sessionId) + " is valid: " + rc); } finishSessionInit(cnxn, rc); } public void reopenSession(ServerCnxn cnxn, long sessionId, byte[] passwd, int sessionTimeout) throws IOException { if (checkPasswd(sessionId, passwd)) { revalidateSession(cnxn, sessionId, sessionTimeout); } else { LOG.warn( "Incorrect password from {} for session 0x{}", cnxn.getRemoteSocketAddress(), Long.toHexString(sessionId)); finishSessionInit(cnxn, false); } } public void finishSessionInit(ServerCnxn cnxn, boolean valid) { try { if (valid) { if (serverCnxnFactory != null && serverCnxnFactory.cnxns.contains(cnxn)) { serverCnxnFactory.registerConnection(cnxn); } else if (secureServerCnxnFactory != null && secureServerCnxnFactory.cnxns.contains(cnxn)) { secureServerCnxnFactory.registerConnection(cnxn); } } } catch (Exception e) { LOG.warn("Failed to register with JMX", e); } try { ConnectResponse rsp = new ConnectResponse( 0, valid ? cnxn.getSessionTimeout() : 0, valid ? cnxn.getSessionId() : 0, valid ? generatePasswd(cnxn.getSessionId()) : new byte[16]); ByteArrayOutputStream baos = new ByteArrayOutputStream(); BinaryOutputArchive bos = BinaryOutputArchive.getArchive(baos); bos.writeInt(-1, "len"); rsp.serialize(bos, "connect"); if (!cnxn.isOldClient) { bos.writeBool(this instanceof ReadOnlyZooKeeperServer, "readOnly"); } baos.close(); ByteBuffer bb = ByteBuffer.wrap(baos.toByteArray()); bb.putInt(bb.remaining() - 4).rewind(); cnxn.sendBuffer(bb); if (valid) { LOG.debug( "Established session 0x{} with negotiated timeout {} for client {}", Long.toHexString(cnxn.getSessionId()), cnxn.getSessionTimeout(), cnxn.getRemoteSocketAddress()); cnxn.enableRecv(); } else { LOG.info( "Invalid session 0x{} for client {}, probably expired", Long.toHexString(cnxn.getSessionId()), cnxn.getRemoteSocketAddress()); cnxn.sendBuffer(ServerCnxnFactory.closeConn); } } catch (Exception e) { LOG.warn("Exception while establishing session, closing", e); cnxn.close(ServerCnxn.DisconnectReason.IO_EXCEPTION_IN_SESSION_INIT); } } public void closeSession(ServerCnxn cnxn, RequestHeader requestHeader) { closeSession(cnxn.getSessionId()); } public long getServerId() { return 0; } /** * If the underlying Zookeeper server support local session, this method * will set a isLocalSession to true if a request is associated with * a local session. * * @param si */ protected void setLocalSessionFlag(Request si) { } public void submitRequest(Request si) { enqueueRequest(si); } public void enqueueRequest(Request si) { if (requestThrottler == null) { synchronized (this) { try { while (state == State.INITIAL) { wait(1000); } } catch (InterruptedException e) { LOG.warn("Unexpected interruption", e); } if (requestThrottler == null) { throw new RuntimeException("Not started"); } } } requestThrottler.submitRequest(si); } public void submitRequestNow(Request si) { if (firstProcessor == null) { synchronized (this) { try { while (state == State.INITIAL) { wait(1000); } } catch (InterruptedException e) { LOG.warn("Unexpected interruption", e); } if (firstProcessor == null || state != State.RUNNING) { throw new RuntimeException("Not started"); } } } try { touch(si.cnxn); boolean validpacket = Request.isValid(si.type); if (validpacket) { setLocalSessionFlag(si); firstProcessor.processRequest(si); if (si.cnxn != null) { incInProcess(); } } else { LOG.warn("Received packet at server of unknown type {}", si.type); requestFinished(si); new UnimplementedRequestProcessor().processRequest(si); } } catch (MissingSessionException e) { LOG.debug("Dropping request.", e); requestFinished(si); } catch (RequestProcessorException e) { LOG.error("Unable to process request", e); requestFinished(si); } } public static int getSnapCount() { int snapCount = Integer.getInteger(SNAP_COUNT, DEFAULT_SNAP_COUNT); if (snapCount < 2) { LOG.warn("SnapCount should be 2 or more. Now, snapCount is reset to 2"); snapCount = 2; } return snapCount; } public int getGlobalOutstandingLimit() { return Integer.getInteger(GLOBAL_OUTSTANDING_LIMIT, DEFAULT_GLOBAL_OUTSTANDING_LIMIT); } public static long getSnapSizeInBytes() { long size = Long.getLong("zookeeper.snapSizeLimitInKb", 4194304L); if (size <= 0) { LOG.info("zookeeper.snapSizeLimitInKb set to a non-positive value {}; disabling feature", size); } return size * 1024; } public void setServerCnxnFactory(ServerCnxnFactory factory) { serverCnxnFactory = factory; } public ServerCnxnFactory getServerCnxnFactory() { return serverCnxnFactory; } public ServerCnxnFactory getSecureServerCnxnFactory() { return secureServerCnxnFactory; } public void setSecureServerCnxnFactory(ServerCnxnFactory factory) { secureServerCnxnFactory = factory; } /** * return the last processed id from the * datatree */ public long getLastProcessedZxid() { return zkDb.getDataTreeLastProcessedZxid(); } /** * return the outstanding requests * in the queue, which haven't been * processed yet */ public long getOutstandingRequests() { return getInProcess(); } /** * return the total number of client connections that are alive * to this server */ public int getNumAliveConnections() { int numAliveConnections = 0; if (serverCnxnFactory != null) { numAliveConnections += serverCnxnFactory.getNumAliveConnections(); } if (secureServerCnxnFactory != null) { numAliveConnections += secureServerCnxnFactory.getNumAliveConnections(); } return numAliveConnections; } /** * truncate the log to get in sync with others * if in a quorum * @param zxid the zxid that it needs to get in sync * with others * @throws IOException */ public void truncateLog(long zxid) throws IOException { this.zkDb.truncateLog(zxid); } public int getTickTime() { return tickTime; } public void setTickTime(int tickTime) { LOG.info("tickTime set to {} ms", tickTime); this.tickTime = tickTime; } public static int getThrottledOpWaitTime() { return throttledOpWaitTime; } public static void setThrottledOpWaitTime(int time) { LOG.info("throttledOpWaitTime set to {} ms", time); throttledOpWaitTime = time; } public int getMinSessionTimeout() { return minSessionTimeout; } public void setMinSessionTimeout(int min) { this.minSessionTimeout = min == -1 ? tickTime * 2 : min; LOG.info("minSessionTimeout set to {} ms", this.minSessionTimeout); } public int getMaxSessionTimeout() { return maxSessionTimeout; } public void setMaxSessionTimeout(int max) { this.maxSessionTimeout = max == -1 ? tickTime * 20 : max; LOG.info("maxSessionTimeout set to {} ms", this.maxSessionTimeout); } public int getClientPortListenBacklog() { return listenBacklog; } public void setClientPortListenBacklog(int backlog) { this.listenBacklog = backlog; LOG.info("clientPortListenBacklog set to {}", backlog); } public int getClientPort() { return serverCnxnFactory != null ? serverCnxnFactory.getLocalPort() : -1; } public int getSecureClientPort() { return secureServerCnxnFactory != null ? secureServerCnxnFactory.getLocalPort() : -1; } /** Maximum number of connections allowed from particular host (ip) */ public int getMaxClientCnxnsPerHost() { if (serverCnxnFactory != null) { return serverCnxnFactory.getMaxClientCnxnsPerHost(); } if (secureServerCnxnFactory != null) { return secureServerCnxnFactory.getMaxClientCnxnsPerHost(); } return -1; } public void setTxnLogFactory(FileTxnSnapLog txnLog) { this.txnLogFactory = txnLog; } public FileTxnSnapLog getTxnLogFactory() { return this.txnLogFactory; } /** * Returns the elapsed sync of time of transaction log in milliseconds. */ public long getTxnLogElapsedSyncTime() { return txnLogFactory.getTxnLogElapsedSyncTime(); } public String getState() { return "standalone"; } public void dumpEphemerals(PrintWriter pwriter) { zkDb.dumpEphemerals(pwriter); } public Map<Long, Set<String>> getEphemerals() { return zkDb.getEphemerals(); } public double getConnectionDropChance() { return connThrottle.getDropChance(); } public void processConnectRequest(ServerCnxn cnxn, ByteBuffer incomingBuffer) throws IOException, ClientCnxnLimitException { BinaryInputArchive bia = BinaryInputArchive.getArchive(new ByteBufferInputStream(incomingBuffer)); ConnectRequest connReq = new ConnectRequest(); connReq.deserialize(bia, "connect"); LOG.debug( "Session establishment request from client {} client's lastZxid is 0x{}", cnxn.getRemoteSocketAddress(), Long.toHexString(connReq.getLastZxidSeen())); long sessionId = connReq.getSessionId(); int tokensNeeded = 1; if (connThrottle.isConnectionWeightEnabled()) { if (sessionId == 0) { if (localSessionEnabled) { tokensNeeded = connThrottle.getRequiredTokensForLocal(); } else { tokensNeeded = connThrottle.getRequiredTokensForGlobal(); } } else { tokensNeeded = connThrottle.getRequiredTokensForRenew(); } } if (!connThrottle.checkLimit(tokensNeeded)) { throw new ClientCnxnLimitException(); } ServerMetrics.getMetrics().CONNECTION_TOKEN_DEFICIT.add(connThrottle.getDeficit()); ServerMetrics.getMetrics().CONNECTION_REQUEST_COUNT.add(1); boolean readOnly = false; try { readOnly = bia.readBool("readOnly"); cnxn.isOldClient = false; } catch (IOException e) { LOG.warn( "Connection request from old client {}; will be dropped if server is in r-o mode", cnxn.getRemoteSocketAddress()); } if (!readOnly && this instanceof ReadOnlyZooKeeperServer) { String msg = "Refusing session request for not-read-only client " + cnxn.getRemoteSocketAddress(); LOG.info(msg); throw new CloseRequestException(msg, ServerCnxn.DisconnectReason.NOT_READ_ONLY_CLIENT); } if (connReq.getLastZxidSeen() > zkDb.dataTree.lastProcessedZxid) { String msg = "Refusing session request for client " + cnxn.getRemoteSocketAddress() + " as it has seen zxid 0x" + Long.toHexString(connReq.getLastZxidSeen()) + " our last zxid is 0x" + Long.toHexString(getZKDatabase().getDataTreeLastProcessedZxid()) + " client must try another server"; LOG.info(msg); throw new CloseRequestException(msg, ServerCnxn.DisconnectReason.CLIENT_ZXID_AHEAD); } int sessionTimeout = connReq.getTimeOut(); byte[] passwd = connReq.getPasswd(); int minSessionTimeout = getMinSessionTimeout(); if (sessionTimeout < minSessionTimeout) { sessionTimeout = minSessionTimeout; } int maxSessionTimeout = getMaxSessionTimeout(); if (sessionTimeout > maxSessionTimeout) { sessionTimeout = maxSessionTimeout; } cnxn.setSessionTimeout(sessionTimeout); cnxn.disableRecv(); if (sessionId == 0) { long id = createSession(cnxn, passwd, sessionTimeout); LOG.debug( "Client attempting to establish new session: session = 0x{}, zxid = 0x{}, timeout = {}, address = {}", Long.toHexString(id), Long.toHexString(connReq.getLastZxidSeen()), connReq.getTimeOut(), cnxn.getRemoteSocketAddress()); } else { validateSession(cnxn, sessionId); LOG.debug( "Client attempting to renew session: session = 0x{}, zxid = 0x{}, timeout = {}, address = {}", Long.toHexString(sessionId), Long.toHexString(connReq.getLastZxidSeen()), connReq.getTimeOut(), cnxn.getRemoteSocketAddress()); if (serverCnxnFactory != null) { serverCnxnFactory.closeSession(sessionId, ServerCnxn.DisconnectReason.CLIENT_RECONNECT); } if (secureServerCnxnFactory != null) { secureServerCnxnFactory.closeSession(sessionId, ServerCnxn.DisconnectReason.CLIENT_RECONNECT); } cnxn.setSessionId(sessionId); reopenSession(cnxn, sessionId, passwd, sessionTimeout); ServerMetrics.getMetrics().CONNECTION_REVALIDATE_COUNT.add(1); } } /** * Validate if a particular session can be reestablished. * * @param cnxn * @param sessionId */ protected void validateSession(ServerCnxn cnxn, long sessionId) throws IOException { } public boolean shouldThrottle(long outStandingCount) { int globalOutstandingLimit = getGlobalOutstandingLimit(); if (globalOutstandingLimit < getInflight() || globalOutstandingLimit < getInProcess()) { return outStandingCount > 0; } return false; } long getFlushDelay() { return flushDelay; } static void setFlushDelay(long delay) { LOG.info("{} = {} ms", FLUSH_DELAY, delay); flushDelay = delay; } long getMaxWriteQueuePollTime() { return maxWriteQueuePollTime; } static void setMaxWriteQueuePollTime(long maxTime) { LOG.info("{} = {} ms", MAX_WRITE_QUEUE_POLL_SIZE, maxTime); maxWriteQueuePollTime = maxTime; } int getMaxBatchSize() { return maxBatchSize; } static void setMaxBatchSize(int size) { LOG.info("{}={}", MAX_BATCH_SIZE, size); maxBatchSize = size; } private void initLargeRequestThrottlingSettings() { setLargeRequestMaxBytes(Integer.getInteger("zookeeper.largeRequestMaxBytes", largeRequestMaxBytes)); setLargeRequestThreshold(Integer.getInteger("zookeeper.largeRequestThreshold", -1)); } public int getLargeRequestMaxBytes() { return largeRequestMaxBytes; } public void setLargeRequestMaxBytes(int bytes) { if (bytes <= 0) { LOG.warn("Invalid max bytes for all large requests {}. It should be a positive number.", bytes); LOG.warn("Will not change the setting. The max bytes stay at {}", largeRequestMaxBytes); } else { largeRequestMaxBytes = bytes; LOG.info("The max bytes for all large requests are set to {}", largeRequestMaxBytes); } } public int getLargeRequestThreshold() { return largeRequestThreshold; } public void setLargeRequestThreshold(int threshold) { if (threshold == 0 || threshold < -1) { LOG.warn("Invalid large request threshold {}. It should be -1 or positive. Setting to -1 ", threshold); largeRequestThreshold = -1; } else { largeRequestThreshold = threshold; LOG.info("The large request threshold is set to {}", largeRequestThreshold); } } public int getLargeRequestBytes() { return currentLargeRequestBytes.get(); } private boolean isLargeRequest(int length) { if (largeRequestThreshold == -1) { return false; } return length > largeRequestThreshold; } public boolean checkRequestSizeWhenReceivingMessage(int length) throws IOException { if (!isLargeRequest(length)) { return true; } if (currentLargeRequestBytes.get() + length <= largeRequestMaxBytes) { return true; } else { ServerMetrics.getMetrics().LARGE_REQUESTS_REJECTED.add(1); throw new IOException("Rejecting large request"); } } private boolean checkRequestSizeWhenMessageReceived(int length) throws IOException { if (!isLargeRequest(length)) { return true; } int bytes = currentLargeRequestBytes.addAndGet(length); if (bytes > largeRequestMaxBytes) { currentLargeRequestBytes.addAndGet(-length); ServerMetrics.getMetrics().LARGE_REQUESTS_REJECTED.add(1); throw new IOException("Rejecting large request"); } return true; } public void requestFinished(Request request) { int largeRequestLength = request.getLargeRequestSize(); if (largeRequestLength != -1) { currentLargeRequestBytes.addAndGet(-largeRequestLength); } } public void processPacket(ServerCnxn cnxn, ByteBuffer incomingBuffer) throws IOException { InputStream bais = new ByteBufferInputStream(incomingBuffer); BinaryInputArchive bia = BinaryInputArchive.getArchive(bais); RequestHeader h = new RequestHeader(); h.deserialize(bia, "header"); cnxn.incrOutstandingAndCheckThrottle(h); incomingBuffer = incomingBuffer.slice(); if (h.getType() == OpCode.auth) { LOG.info("got auth packet {}", cnxn.getRemoteSocketAddress()); AuthPacket authPacket = new AuthPacket(); ByteBufferInputStream.byteBuffer2Record(incomingBuffer, authPacket); String scheme = authPacket.getScheme(); ServerAuthenticationProvider ap = ProviderRegistry.getServerProvider(scheme); Code authReturn = Code.AUTHFAILED; if (ap != null) { try { authReturn = ap.handleAuthentication( new ServerAuthenticationProvider.ServerObjs(this, cnxn), authPacket.getAuth()); } catch (RuntimeException e) { LOG.warn("Caught runtime exception from AuthenticationProvider: {}", scheme, e); authReturn = Code.AUTHFAILED; } } if (authReturn == Code.OK) { LOG.info("Session 0x{}: auth success for scheme {} and address {}", Long.toHexString(cnxn.getSessionId()), scheme, cnxn.getRemoteSocketAddress()); ReplyHeader rh = new ReplyHeader(h.getXid(), 0, Code.OK.intValue()); cnxn.sendResponse(rh, null, null); } else { if (ap == null) { LOG.warn( "No authentication provider for scheme: {} has {}", scheme, ProviderRegistry.listProviders()); } else { LOG.warn("Authentication failed for scheme: {}", scheme); } ReplyHeader rh = new ReplyHeader(h.getXid(), 0, Code.AUTHFAILED.intValue()); cnxn.sendResponse(rh, null, null); cnxn.sendBuffer(ServerCnxnFactory.closeConn); cnxn.disableRecv(); } return; } else if (h.getType() == OpCode.sasl) { processSasl(incomingBuffer, cnxn, h); } else { if (!authHelper.enforceAuthentication(cnxn, h.getXid())) { return; } else { Request si = new Request(cnxn, cnxn.getSessionId(), h.getXid(), h.getType(), incomingBuffer, cnxn.getAuthInfo()); int length = incomingBuffer.limit(); if (isLargeRequest(length)) { checkRequestSizeWhenMessageReceived(length); si.setLargeRequestSize(length); } si.setOwner(ServerCnxn.me); submitRequest(si); } } } private static boolean isSaslSuperUser(String id) { if (id == null || id.isEmpty()) { return false; } Properties properties = System.getProperties(); int prefixLen = SASL_SUPER_USER.length(); for (String k : properties.stringPropertyNames()) { if (k.startsWith(SASL_SUPER_USER) && (k.length() == prefixLen || k.charAt(prefixLen) == '.')) { String value = properties.getProperty(k); if (value != null && value.equals(id)) { return true; } } } return false; } private static boolean shouldAllowSaslFailedClientsConnect() { return Boolean.getBoolean(ALLOW_SASL_FAILED_CLIENTS); } private void processSasl(ByteBuffer incomingBuffer, ServerCnxn cnxn, RequestHeader requestHeader) throws IOException { LOG.debug("Responding to client SASL token."); GetSASLRequest clientTokenRecord = new GetSASLRequest(); ByteBufferInputStream.byteBuffer2Record(incomingBuffer, clientTokenRecord); byte[] clientToken = clientTokenRecord.getToken(); LOG.debug("Size of client SASL token: {}", clientToken.length); byte[] responseToken = null; try { ZooKeeperSaslServer saslServer = cnxn.zooKeeperSaslServer; try { responseToken = saslServer.evaluateResponse(clientToken); if (saslServer.isComplete()) { String authorizationID = saslServer.getAuthorizationID(); LOG.info("Session 0x{}: adding SASL authorization for authorizationID: {}", Long.toHexString(cnxn.getSessionId()), authorizationID); cnxn.addAuthInfo(new Id("sasl", authorizationID)); if (isSaslSuperUser(authorizationID)) { cnxn.addAuthInfo(new Id("super", "")); LOG.info( "Session 0x{}: Authenticated Id '{}' as super user", Long.toHexString(cnxn.getSessionId()), authorizationID); } } } catch (SaslException e) { LOG.warn("Client {} failed to SASL authenticate: {}", cnxn.getRemoteSocketAddress(), e); if (shouldAllowSaslFailedClientsConnect() && !authHelper.isSaslAuthRequired()) { LOG.warn("Maintaining client connection despite SASL authentication failure."); } else { int error; if (authHelper.isSaslAuthRequired()) { LOG.warn( "Closing client connection due to server requires client SASL authenticaiton," + "but client SASL authentication has failed, or client is not configured with SASL " + "authentication."); error = Code.SESSIONCLOSEDREQUIRESASLAUTH.intValue(); } else { LOG.warn("Closing client connection due to SASL authentication failure."); error = Code.AUTHFAILED.intValue(); } ReplyHeader replyHeader = new ReplyHeader(requestHeader.getXid(), 0, error); cnxn.sendResponse(replyHeader, new SetSASLResponse(null), "response"); cnxn.sendCloseSession(); cnxn.disableRecv(); return; } } } catch (NullPointerException e) { LOG.error("cnxn.saslServer is null: cnxn object did not initialize its saslServer properly."); } if (responseToken != null) { LOG.debug("Size of server SASL response: {}", responseToken.length); } ReplyHeader replyHeader = new ReplyHeader(requestHeader.getXid(), 0, Code.OK.intValue()); Record record = new SetSASLResponse(responseToken); cnxn.sendResponse(replyHeader, record, "response"); } public ProcessTxnResult processTxn(TxnHeader hdr, Record txn) { processTxnForSessionEvents(null, hdr, txn); return processTxnInDB(hdr, txn, null); } public ProcessTxnResult processTxn(Request request) { TxnHeader hdr = request.getHdr(); processTxnForSessionEvents(request, hdr, request.getTxn()); final boolean writeRequest = (hdr != null); final boolean quorumRequest = request.isQuorum(); if (!writeRequest && !quorumRequest) { return new ProcessTxnResult(); } synchronized (outstandingChanges) { ProcessTxnResult rc = processTxnInDB(hdr, request.getTxn(), request.getTxnDigest()); if (writeRequest) { long zxid = hdr.getZxid(); while (!outstandingChanges.isEmpty() && outstandingChanges.peek().zxid <= zxid) { ChangeRecord cr = outstandingChanges.remove(); ServerMetrics.getMetrics().OUTSTANDING_CHANGES_REMOVED.add(1); if (cr.zxid < zxid) { LOG.warn( "Zxid outstanding 0x{} is less than current 0x{}", Long.toHexString(cr.zxid), Long.toHexString(zxid)); } if (outstandingChangesForPath.get(cr.path) == cr) { outstandingChangesForPath.remove(cr.path); } } } if (quorumRequest) { getZKDatabase().addCommittedProposal(request); } return rc; } } private void processTxnForSessionEvents(Request request, TxnHeader hdr, Record txn) { int opCode = (request == null) ? hdr.getType() : request.type; long sessionId = (request == null) ? hdr.getClientId() : request.sessionId; if (opCode == OpCode.createSession) { if (hdr != null && txn instanceof CreateSessionTxn) { CreateSessionTxn cst = (CreateSessionTxn) txn; sessionTracker.commitSession(sessionId, cst.getTimeOut()); } else if (request == null || !request.isLocalSession()) { LOG.warn("*****>>>>> Got {} {}", txn.getClass(), txn.toString()); } } else if (opCode == OpCode.closeSession) { sessionTracker.removeSession(sessionId); } } private ProcessTxnResult processTxnInDB(TxnHeader hdr, Record txn, TxnDigest digest) { if (hdr == null) { return new ProcessTxnResult(); } else { return getZKDatabase().processTxn(hdr, txn, digest); } } public Map<Long, Set<Long>> getSessionExpiryMap() { return sessionTracker.getSessionExpiryMap(); } /** * This method is used to register the ZooKeeperServerShutdownHandler to get * server's error or shutdown state change notifications. * {@link ZooKeeperServerShutdownHandler * every server state changes {@link * * @param zkShutdownHandler shutdown handler */ void registerServerShutdownHandler(ZooKeeperServerShutdownHandler zkShutdownHandler) { this.zkShutdownHandler = zkShutdownHandler; } public boolean isResponseCachingEnabled() { return isResponseCachingEnabled; } public void setResponseCachingEnabled(boolean isEnabled) { isResponseCachingEnabled = isEnabled; } public ResponseCache getReadResponseCache() { return isResponseCachingEnabled ? readResponseCache : null; } public ResponseCache getGetChildrenResponseCache() { return isResponseCachingEnabled ? getChildrenResponseCache : null; } protected void registerMetrics() { MetricsContext rootContext = ServerMetrics.getMetrics().getMetricsProvider().getRootContext(); final ZKDatabase zkdb = this.getZKDatabase(); final ServerStats stats = this.serverStats(); rootContext.registerGauge("avg_latency", stats::getAvgLatency); rootContext.registerGauge("max_latency", stats::getMaxLatency); rootContext.registerGauge("min_latency", stats::getMinLatency); rootContext.registerGauge("packets_received", stats::getPacketsReceived); rootContext.registerGauge("packets_sent", stats::getPacketsSent); rootContext.registerGauge("num_alive_connections", stats::getNumAliveClientConnections); rootContext.registerGauge("outstanding_requests", stats::getOutstandingRequests); rootContext.registerGauge("uptime", stats::getUptime); rootContext.registerGauge("znode_count", zkdb::getNodeCount); rootContext.registerGauge("watch_count", zkdb.getDataTree()::getWatchCount); rootContext.registerGauge("ephemerals_count", zkdb.getDataTree()::getEphemeralsCount); rootContext.registerGauge("approximate_data_size", zkdb.getDataTree()::cachedApproximateDataSize); rootContext.registerGauge("global_sessions", zkdb::getSessionCount); rootContext.registerGauge("local_sessions", this.getSessionTracker()::getLocalSessionCount); OSMXBean osMbean = new OSMXBean(); rootContext.registerGauge("open_file_descriptor_count", osMbean::getOpenFileDescriptorCount); rootContext.registerGauge("max_file_descriptor_count", osMbean::getMaxFileDescriptorCount); rootContext.registerGauge("connection_drop_probability", this::getConnectionDropChance); rootContext.registerGauge("last_client_response_size", stats.getClientResponseStats()::getLastBufferSize); rootContext.registerGauge("max_client_response_size", stats.getClientResponseStats()::getMaxBufferSize); rootContext.registerGauge("min_client_response_size", stats.getClientResponseStats()::getMinBufferSize); rootContext.registerGauge("outstanding_tls_handshake", this::getOutstandingHandshakeNum); rootContext.registerGauge("auth_failed_count", stats::getAuthFailedCount); rootContext.registerGauge("non_mtls_remote_conn_count", stats::getNonMTLSRemoteConnCount); rootContext.registerGauge("non_mtls_local_conn_count", stats::getNonMTLSLocalConnCount); rootContext.registerGaugeSet(QuotaMetricsUtils.QUOTA_COUNT_LIMIT_PER_NAMESPACE, () -> QuotaMetricsUtils.getQuotaCountLimit(zkDb.getDataTree())); rootContext.registerGaugeSet(QuotaMetricsUtils.QUOTA_BYTES_LIMIT_PER_NAMESPACE, () -> QuotaMetricsUtils.getQuotaBytesLimit(zkDb.getDataTree())); rootContext.registerGaugeSet(QuotaMetricsUtils.QUOTA_COUNT_USAGE_PER_NAMESPACE, () -> QuotaMetricsUtils.getQuotaCountUsage(zkDb.getDataTree())); rootContext.registerGaugeSet(QuotaMetricsUtils.QUOTA_BYTES_USAGE_PER_NAMESPACE, () -> QuotaMetricsUtils.getQuotaBytesUsage(zkDb.getDataTree())); } protected void unregisterMetrics() { MetricsContext rootContext = ServerMetrics.getMetrics().getMetricsProvider().getRootContext(); rootContext.unregisterGauge("avg_latency"); rootContext.unregisterGauge("max_latency"); rootContext.unregisterGauge("min_latency"); rootContext.unregisterGauge("packets_received"); rootContext.unregisterGauge("packets_sent"); rootContext.unregisterGauge("num_alive_connections"); rootContext.unregisterGauge("outstanding_requests"); rootContext.unregisterGauge("uptime"); rootContext.unregisterGauge("znode_count"); rootContext.unregisterGauge("watch_count"); rootContext.unregisterGauge("ephemerals_count"); rootContext.unregisterGauge("approximate_data_size"); rootContext.unregisterGauge("global_sessions"); rootContext.unregisterGauge("local_sessions"); rootContext.unregisterGauge("open_file_descriptor_count"); rootContext.unregisterGauge("max_file_descriptor_count"); rootContext.unregisterGauge("connection_drop_probability"); rootContext.unregisterGauge("last_client_response_size"); rootContext.unregisterGauge("max_client_response_size"); rootContext.unregisterGauge("min_client_response_size"); rootContext.unregisterGauge("auth_failed_count"); rootContext.unregisterGauge("non_mtls_remote_conn_count"); rootContext.unregisterGauge("non_mtls_local_conn_count"); rootContext.unregisterGaugeSet(QuotaMetricsUtils.QUOTA_COUNT_LIMIT_PER_NAMESPACE); rootContext.unregisterGaugeSet(QuotaMetricsUtils.QUOTA_BYTES_LIMIT_PER_NAMESPACE); rootContext.unregisterGaugeSet(QuotaMetricsUtils.QUOTA_COUNT_USAGE_PER_NAMESPACE); rootContext.unregisterGaugeSet(QuotaMetricsUtils.QUOTA_BYTES_USAGE_PER_NAMESPACE); } /** * Hook into admin server, useful to expose additional data * that do not represent metrics. * * @param response a sink which collects the data. */ public void dumpMonitorValues(BiConsumer<String, Object> response) { ServerStats stats = serverStats(); response.accept("version", Version.getFullVersion()); response.accept("server_state", stats.getServerState()); } /** * Grant or deny authorization to an operation on a node as a function of: * @param cnxn : the server connection * @param acl : set of ACLs for the node * @param perm : the permission that the client is requesting * @param ids : the credentials supplied by the client * @param path : the ZNode path * @param setAcls : for set ACL operations, the list of ACLs being set. Otherwise null. */ public void checkACL(ServerCnxn cnxn, List<ACL> acl, int perm, List<Id> ids, String path, List<ACL> setAcls) throws KeeperException.NoAuthException { if (skipACL) { return; } LOG.debug("Permission requested: {} ", perm); LOG.debug("ACLs for node: {}", acl); LOG.debug("Client credentials: {}", ids); if (acl == null || acl.size() == 0) { return; } for (Id authId : ids) { if (authId.getScheme().equals("super")) { return; } } for (ACL a : acl) { Id id = a.getId(); if ((a.getPerms() & perm) != 0) { if (id.getScheme().equals("world") && id.getId().equals("anyone")) { return; } ServerAuthenticationProvider ap = ProviderRegistry.getServerProvider(id.getScheme()); if (ap != null) { for (Id authId : ids) { if (authId.getScheme().equals(id.getScheme()) && ap.matches( new ServerAuthenticationProvider.ServerObjs(this, cnxn), new ServerAuthenticationProvider.MatchValues(path, authId.getId(), id.getId(), perm, setAcls))) { return; } } } } } throw new KeeperException.NoAuthException(); } /** * check a path whether exceeded the quota. * * @param path * the path of the node, used for the quota prefix check * @param lastData * the current node data, {@code null} for none * @param data * the data to be set, or {@code null} for none * @param type * currently, create and setData need to check quota */ public void checkQuota(String path, byte[] lastData, byte[] data, int type) throws KeeperException.QuotaExceededException { if (!enforceQuota) { return; } long dataBytes = (data == null) ? 0 : data.length; ZKDatabase zkDatabase = getZKDatabase(); String lastPrefix = zkDatabase.getDataTree().getMaxPrefixWithQuota(path); if (StringUtils.isEmpty(lastPrefix)) { return; } final String namespace = PathUtils.getTopNamespace(path); switch (type) { case OpCode.create: checkQuota(lastPrefix, dataBytes, 1, namespace); break; case OpCode.setData: checkQuota(lastPrefix, dataBytes - (lastData == null ? 0 : lastData.length), 0, namespace); break; default: throw new IllegalArgumentException("Unsupported OpCode for checkQuota: " + type); } } /** * check a path whether exceeded the quota. * * @param lastPrefix the path of the node which has a quota. * @param bytesDiff * the diff to be added to number of bytes * @param countDiff * the diff to be added to the count * @param namespace * the namespace for collecting quota exceeded errors */ private void checkQuota(String lastPrefix, long bytesDiff, long countDiff, String namespace) throws KeeperException.QuotaExceededException { LOG.debug("checkQuota: lastPrefix={}, bytesDiff={}, countDiff={}", lastPrefix, bytesDiff, countDiff); String limitNode = Quotas.limitPath(lastPrefix); DataNode node = getZKDatabase().getNode(limitNode); StatsTrack limitStats; if (node == null) { LOG.error("Missing limit node for quota {}", limitNode); return; } synchronized (node) { limitStats = new StatsTrack(node.data); } boolean checkCountQuota = countDiff != 0 && (limitStats.getCount() > -1 || limitStats.getCountHardLimit() > -1); boolean checkByteQuota = bytesDiff != 0 && (limitStats.getBytes() > -1 || limitStats.getByteHardLimit() > -1); if (!checkCountQuota && !checkByteQuota) { return; } String statNode = Quotas.statPath(lastPrefix); node = getZKDatabase().getNode(statNode); StatsTrack currentStats; if (node == null) { LOG.error("Missing node for stat {}", statNode); return; } synchronized (node) { currentStats = new StatsTrack(node.data); } if (checkCountQuota) { long newCount = currentStats.getCount() + countDiff; boolean isCountHardLimit = limitStats.getCountHardLimit() > -1; long countLimit = isCountHardLimit ? limitStats.getCountHardLimit() : limitStats.getCount(); if (newCount > countLimit) { String msg = "Quota exceeded: " + lastPrefix + " [current count=" + newCount + ", " + (isCountHardLimit ? "hard" : "soft") + "CountLimit=" + countLimit + "]"; RATE_LOGGER.rateLimitLog(msg); if (isCountHardLimit) { updateQuotaExceededMetrics(namespace); throw new KeeperException.QuotaExceededException(lastPrefix); } } } if (checkByteQuota) { long newBytes = currentStats.getBytes() + bytesDiff; boolean isByteHardLimit = limitStats.getByteHardLimit() > -1; long byteLimit = isByteHardLimit ? limitStats.getByteHardLimit() : limitStats.getBytes(); if (newBytes > byteLimit) { String msg = "Quota exceeded: " + lastPrefix + " [current bytes=" + newBytes + ", " + (isByteHardLimit ? "hard" : "soft") + "ByteLimit=" + byteLimit + "]"; RATE_LOGGER.rateLimitLog(msg); if (isByteHardLimit) { updateQuotaExceededMetrics(namespace); throw new KeeperException.QuotaExceededException(lastPrefix); } } } } public static boolean isDigestEnabled() { return digestEnabled; } public static void setDigestEnabled(boolean digestEnabled) { LOG.info("{} = {}", ZOOKEEPER_DIGEST_ENABLED, digestEnabled); ZooKeeperServer.digestEnabled = digestEnabled; } /** * Trim a path to get the immediate predecessor. * * @param path * @return * @throws KeeperException.BadArgumentsException */ private String parentPath(String path) throws KeeperException.BadArgumentsException { int lastSlash = path.lastIndexOf('/'); if (lastSlash == -1 || path.indexOf('\0') != -1 || getZKDatabase().isSpecialPath(path)) { throw new KeeperException.BadArgumentsException(path); } return lastSlash == 0 ? "/" : path.substring(0, lastSlash); } private String effectiveACLPath(Request request) throws KeeperException.BadArgumentsException, KeeperException.InvalidACLException { boolean mustCheckACL = false; String path = null; List<ACL> acl = null; switch (request.type) { case OpCode.create: case OpCode.create2: { CreateRequest req = new CreateRequest(); if (buffer2Record(request.request, req)) { mustCheckACL = true; acl = req.getAcl(); path = parentPath(req.getPath()); } break; } case OpCode.delete: { DeleteRequest req = new DeleteRequest(); if (buffer2Record(request.request, req)) { path = parentPath(req.getPath()); } break; } case OpCode.setData: { SetDataRequest req = new SetDataRequest(); if (buffer2Record(request.request, req)) { path = req.getPath(); } break; } case OpCode.setACL: { SetACLRequest req = new SetACLRequest(); if (buffer2Record(request.request, req)) { mustCheckACL = true; acl = req.getAcl(); path = req.getPath(); } break; } } if (mustCheckACL) { /* we ignore the extrapolated ACL returned by fixupACL because * we only care about it being well-formed (and if it isn't, an * exception will be raised). */ PrepRequestProcessor.fixupACL(path, request.authInfo, acl); } return path; } private int effectiveACLPerms(Request request) { switch (request.type) { case OpCode.create: case OpCode.create2: return ZooDefs.Perms.CREATE; case OpCode.delete: return ZooDefs.Perms.DELETE; case OpCode.setData: return ZooDefs.Perms.WRITE; case OpCode.setACL: return ZooDefs.Perms.ADMIN; default: return ZooDefs.Perms.ALL; } } /** * Check Write Requests for Potential Access Restrictions * <p> * Before a request is being proposed to the quorum, lets check it * against local ACLs. Non-write requests (read, session, etc.) * are passed along. Invalid requests are sent a response. * <p> * While we are at it, if the request will set an ACL: make sure it's * a valid one. * * @param request * @return true if request is permitted, false if not. */ public boolean authWriteRequest(Request request) { int err; String pathToCheck; if (!enableEagerACLCheck) { return true; } err = Code.OK.intValue(); try { pathToCheck = effectiveACLPath(request); if (pathToCheck != null) { checkACL(request.cnxn, zkDb.getACL(pathToCheck, null), effectiveACLPerms(request), request.authInfo, pathToCheck, null); } } catch (KeeperException.NoAuthException e) { LOG.debug("Request failed ACL check", e); err = e.code().intValue(); } catch (KeeperException.InvalidACLException e) { LOG.debug("Request has an invalid ACL check", e); err = e.code().intValue(); } catch (KeeperException.NoNodeException e) { LOG.debug("ACL check against non-existent node: {}", e.getMessage()); } catch (KeeperException.BadArgumentsException e) { LOG.debug("ACL check against illegal node path: {}", e.getMessage()); } catch (Throwable t) { LOG.error("Uncaught exception in authWriteRequest with: ", t); throw t; } finally { if (err != Code.OK.intValue()) { /* This request has a bad ACL, so we are dismissing it early. */ decInProcess(); ReplyHeader rh = new ReplyHeader(request.cxid, 0, err); try { request.cnxn.sendResponse(rh, null, null); } catch (IOException e) { LOG.error("IOException : {}", e); } } } return err == Code.OK.intValue(); } private boolean buffer2Record(ByteBuffer request, Record record) { boolean rv = false; try { ByteBufferInputStream.byteBuffer2Record(request, record); request.rewind(); rv = true; } catch (IOException ex) { } return rv; } public int getOutstandingHandshakeNum() { if (serverCnxnFactory instanceof NettyServerCnxnFactory) { return ((NettyServerCnxnFactory) serverCnxnFactory).getOutstandingHandshakeNum(); } else { return 0; } } public boolean isReconfigEnabled() { return this.reconfigEnabled; } public ZooKeeperServerShutdownHandler getZkShutdownHandler() { return zkShutdownHandler; } static void updateQuotaExceededMetrics(final String namespace) { if (namespace == null) { return; } ServerMetrics.getMetrics().QUOTA_EXCEEDED_ERROR_PER_NAMESPACE.add(namespace, 1); } }
What's the reason for this?
public String getServiceProperty(TaskContext context, String unit, String property) { return newCommandLine(context) .add("systemctl", "show", "--property", property, "--value", unit + ".service") .ignoreExitCode() .execute() .getOutput(); }
.ignoreExitCode()
public String getServiceProperty(TaskContext context, String unit, String property) { return newCommandLine(context) .add("systemctl", "show", "--property", property, "--value", unit + ".service") .execute() .getOutput(); }
class SystemCtl { private static final Pattern PROPERTY_NAME_PATTERN = Pattern.compile("^[a-zA-Z]+$"); private static final Pattern UNIT_FILES_LISTED_PATTERN = Pattern.compile("([0-9]+) unit files listed\\."); private static final Pattern ACTIVE_STATE_PROPERTY_PATTERN = createPropertyPattern("ActiveState"); private final Terminal terminal; private boolean useSudo = false; private static Pattern createPropertyPattern(String propertyName) { if (!PROPERTY_NAME_PATTERN.matcher(propertyName).matches()) { throw new IllegalArgumentException("Property name does not match " + PROPERTY_NAME_PATTERN); } String regex = String.format("(?md)^%s=(.*)$", propertyName); return Pattern.compile(regex); } public SystemCtl(Terminal terminal) { this.terminal = terminal; } /** Call all commands through sudo */ public SystemCtl withSudo() { this.useSudo = true; return this; } /** Returns whether this is configured to use sudo */ public boolean useSudo() { return useSudo; } public void daemonReload(TaskContext taskContext) { newCommandLine(taskContext).add("systemctl", "daemon-reload") .execute(); } public SystemCtlEnable enable(String unit) { return new SystemCtlEnable(unit); } public SystemCtlDisable disable(String unit) { return new SystemCtlDisable(unit); } public SystemCtlStart start(String unit) { return new SystemCtlStart(unit); } public SystemCtlStop stop(String unit) { return new SystemCtlStop(unit); } public SystemCtlRestart restart(String unit) { return new SystemCtlRestart(unit); } public boolean serviceExists(TaskContext context, String unit) { return newCommandLine(context) .add("systemctl", "list-unit-files", unit + ".service").executeSilently() .mapOutput(output -> { Matcher matcher = UNIT_FILES_LISTED_PATTERN.matcher(output); if (!matcher.find()) { throw new IllegalArgumentException(); } return !matcher.group(1).equals("0"); }); } /** Returns true if the unit exists and is active (i.e. running). unit is e.g. "docker". */ public boolean isActive(TaskContext context, String unit) { return newCommandLine(context) .add("systemctl", "--quiet", "is-active", unit + ".service") .ignoreExitCode() .executeSilently() .map(CommandResult::getExitCode) == 0; } private CommandLine newCommandLine(TaskContext context) { var commandLine = terminal.newCommandLine(context); if (useSudo) { commandLine.add("sudo"); } return commandLine; } public class SystemCtlEnable extends SystemCtlCommand { private SystemCtlEnable(String unit) { super("enable", unit); } protected boolean isAlreadyConverged(TaskContext context) { return isUnitEnabled(context); } } public class SystemCtlDisable extends SystemCtlCommand { private SystemCtlDisable(String unit) { super("disable", unit); } protected boolean isAlreadyConverged(TaskContext context) { return !isUnitEnabled(context); } } public class SystemCtlStart extends SystemCtlCommand { private SystemCtlStart(String unit) { super("start", unit); } protected boolean isAlreadyConverged(TaskContext context) { String activeState = getSystemCtlProperty(context, ACTIVE_STATE_PROPERTY_PATTERN); return Objects.equals(activeState, "active"); } } public class SystemCtlStop extends SystemCtlCommand { private SystemCtlStop(String unit) { super("stop", unit); } protected boolean isAlreadyConverged(TaskContext context) { String activeState = getSystemCtlProperty(context, ACTIVE_STATE_PROPERTY_PATTERN); return Objects.equals(activeState, "inactive"); } } public class SystemCtlRestart extends SystemCtlCommand { private SystemCtlRestart(String unit) { super("restart", unit); } protected boolean isAlreadyConverged(TaskContext context) { return false; } } public abstract class SystemCtlCommand { private final String command; private final String unit; private SystemCtlCommand(String command, String unit) { this.command = command; this.unit = unit; } protected abstract boolean isAlreadyConverged(TaskContext context); public boolean converge(TaskContext context) { if (isAlreadyConverged(context)) { return false; } newCommandLine(context).add("systemctl", command, unit) .execute(); return true; } /** Returns true if unit is enabled */ boolean isUnitEnabled(TaskContext context) { return newCommandLine(context).add("systemctl", "--quiet", "is-enabled", unit) .ignoreExitCode() .executeSilently() .map(CommandResult::getExitCode) == 0; } /** * @param propertyPattern Pattern to match the output of systemctl show command with * exactly 1 group. The matchng group must exist. * @return The matched group from the 'systemctl show' output. */ String getSystemCtlProperty(TaskContext context, Pattern propertyPattern) { return newCommandLine(context).add("systemctl", "show", unit) .executeSilently() .mapOutput(output -> extractProperty(output, propertyPattern)); } } /** * Find the systemd property value of the property (given by propertyPattern) * matching the 'systemctl show' output (given by showProcess). */ private static String extractProperty(String showOutput, Pattern propertyPattern) { Matcher matcher = propertyPattern.matcher(showOutput); if (!matcher.find()) { throw new IllegalArgumentException("Pattern '" + propertyPattern + "' didn't match output"); } else if (matcher.groupCount() != 1) { throw new IllegalArgumentException("Property pattern must have exactly 1 group"); } return matcher.group(1); } }
class SystemCtl { private static final Pattern PROPERTY_NAME_PATTERN = Pattern.compile("^[a-zA-Z]+$"); private static final Pattern UNIT_FILES_LISTED_PATTERN = Pattern.compile("([0-9]+) unit files listed\\."); private static final Pattern ACTIVE_STATE_PROPERTY_PATTERN = createPropertyPattern("ActiveState"); private final Terminal terminal; private boolean useSudo = false; private static Pattern createPropertyPattern(String propertyName) { if (!PROPERTY_NAME_PATTERN.matcher(propertyName).matches()) { throw new IllegalArgumentException("Property name does not match " + PROPERTY_NAME_PATTERN); } String regex = String.format("(?md)^%s=(.*)$", propertyName); return Pattern.compile(regex); } public SystemCtl(Terminal terminal) { this.terminal = terminal; } /** Call all commands through sudo */ public SystemCtl withSudo() { this.useSudo = true; return this; } /** Returns whether this is configured to use sudo */ public boolean useSudo() { return useSudo; } public void daemonReload(TaskContext taskContext) { newCommandLine(taskContext).add("systemctl", "daemon-reload") .execute(); } public SystemCtlEnable enable(String unit) { return new SystemCtlEnable(unit); } public SystemCtlDisable disable(String unit) { return new SystemCtlDisable(unit); } public SystemCtlStart start(String unit) { return new SystemCtlStart(unit); } public SystemCtlStop stop(String unit) { return new SystemCtlStop(unit); } public SystemCtlRestart restart(String unit) { return new SystemCtlRestart(unit); } public boolean serviceExists(TaskContext context, String unit) { return newCommandLine(context) .add("systemctl", "list-unit-files", unit + ".service").executeSilently() .mapOutput(output -> { Matcher matcher = UNIT_FILES_LISTED_PATTERN.matcher(output); if (!matcher.find()) { throw new IllegalArgumentException(); } return !matcher.group(1).equals("0"); }); } /** Returns true if the unit exists and is active (i.e. running). unit is e.g. "docker". */ public boolean isActive(TaskContext context, String unit) { return newCommandLine(context) .add("systemctl", "--quiet", "is-active", unit + ".service") .ignoreExitCode() .executeSilently() .map(CommandResult::getExitCode) == 0; } private CommandLine newCommandLine(TaskContext context) { var commandLine = terminal.newCommandLine(context); if (useSudo) { commandLine.add("sudo"); } return commandLine; } public class SystemCtlEnable extends SystemCtlCommand { private SystemCtlEnable(String unit) { super("enable", unit); } protected boolean isAlreadyConverged(TaskContext context) { return isUnitEnabled(context); } } public class SystemCtlDisable extends SystemCtlCommand { private SystemCtlDisable(String unit) { super("disable", unit); } protected boolean isAlreadyConverged(TaskContext context) { return !isUnitEnabled(context); } } public class SystemCtlStart extends SystemCtlCommand { private SystemCtlStart(String unit) { super("start", unit); } protected boolean isAlreadyConverged(TaskContext context) { String activeState = getSystemCtlProperty(context, ACTIVE_STATE_PROPERTY_PATTERN); return Objects.equals(activeState, "active"); } } public class SystemCtlStop extends SystemCtlCommand { private SystemCtlStop(String unit) { super("stop", unit); } protected boolean isAlreadyConverged(TaskContext context) { String activeState = getSystemCtlProperty(context, ACTIVE_STATE_PROPERTY_PATTERN); return Objects.equals(activeState, "inactive"); } } public class SystemCtlRestart extends SystemCtlCommand { private SystemCtlRestart(String unit) { super("restart", unit); } protected boolean isAlreadyConverged(TaskContext context) { return false; } } public abstract class SystemCtlCommand { private final String command; private final String unit; private SystemCtlCommand(String command, String unit) { this.command = command; this.unit = unit; } protected abstract boolean isAlreadyConverged(TaskContext context); public boolean converge(TaskContext context) { if (isAlreadyConverged(context)) { return false; } newCommandLine(context).add("systemctl", command, unit) .execute(); return true; } /** Returns true if unit is enabled */ boolean isUnitEnabled(TaskContext context) { return newCommandLine(context).add("systemctl", "--quiet", "is-enabled", unit) .ignoreExitCode() .executeSilently() .map(CommandResult::getExitCode) == 0; } /** * @param propertyPattern Pattern to match the output of systemctl show command with * exactly 1 group. The matchng group must exist. * @return The matched group from the 'systemctl show' output. */ String getSystemCtlProperty(TaskContext context, Pattern propertyPattern) { return newCommandLine(context).add("systemctl", "show", unit) .executeSilently() .mapOutput(output -> extractProperty(output, propertyPattern)); } } /** * Find the systemd property value of the property (given by propertyPattern) * matching the 'systemctl show' output (given by showProcess). */ private static String extractProperty(String showOutput, Pattern propertyPattern) { Matcher matcher = propertyPattern.matcher(showOutput); if (!matcher.find()) { throw new IllegalArgumentException("Pattern '" + propertyPattern + "' didn't match output"); } else if (matcher.groupCount() != 1) { throw new IllegalArgumentException("Property pattern must have exactly 1 group"); } return matcher.group(1); } }
Shouldn't this be XOR instead of OR, otherwise `softRebuild` is superset of `rebuild`?
public NodeList rebuilding(boolean soft) { return matching(node -> (soft || node.status().wantToRetire()) && node.status().wantToRebuild()); }
return matching(node -> (soft || node.status().wantToRetire()) && node.status().wantToRebuild());
public NodeList rebuilding(boolean soft) { return matching(node -> { if (soft) { return !node.status().wantToRetire() && node.status().wantToRebuild(); } return node.status().wantToRetire() && node.status().wantToRebuild(); }); }
class NodeList extends AbstractFilteringList<Node, NodeList> { protected NodeList(List<Node> nodes, boolean negate) { super(nodes, negate, NodeList::new); } /** Returns the node with the given hostname from this list, or empty if it is not present */ public Optional<Node> node(String hostname) { return matching(node -> node.hostname().equals(hostname)).first(); } /** Returns the subset of nodes which are retired */ public NodeList retired() { return matching(node -> node.allocation().isPresent() && node.allocation().get().membership().retired()); } /** Returns the subset of nodes that are being deprovisioned */ public NodeList deprovisioning() { return matching(node -> node.status().wantToRetire() && node.status().wantToDeprovision()); } /** Returns the subset of nodes that are being rebuilt */ /** Returns the subset of nodes which are removable */ public NodeList removable() { return matching(node -> node.allocation().isPresent() && node.allocation().get().removable()); } /** Returns the subset of nodes which are reusable immediately after removal */ public NodeList reusable() { return matching(node -> node.allocation().isPresent() && node.allocation().get().reusable()); } /** Returns the subset of nodes having exactly the given resources */ public NodeList resources(NodeResources resources) { return matching(node -> node.resources().equals(resources)); } /** Returns the subset of nodes having storage of given type */ public NodeList storageType(NodeResources.StorageType storageType) { return matching(node -> node.resources().storageType() == storageType); } /** Returns the subset of nodes which satisfy the given resources */ public NodeList satisfies(NodeResources resources) { return matching(node -> node.resources().satisfies(resources)); } /** Returns the subset of nodes not in the given set */ public NodeList except(Set<Node> nodes) { return matching(node -> ! nodes.contains(node)); } /** Returns the subset of nodes excluding given node */ public NodeList except(Node node) { return except(Set.of(node)); } /** Returns the subset of nodes assigned to the given cluster type */ public NodeList type(ClusterSpec.Type type) { return matching(node -> node.allocation().isPresent() && node.allocation().get().membership().cluster().type().equals(type)); } /** Returns the subset of nodes that run containers */ public NodeList container() { return matching(node -> node.allocation().isPresent() && node.allocation().get().membership().cluster().type().isContainer()); } /** Returns the subset of nodes that run a stateless service */ public NodeList stateless() { return matching(node -> node.allocation().isPresent() && ! node.allocation().get().membership().cluster().isStateful()); } /** Returns the subset of nodes that run a stateful service */ public NodeList stateful() { return matching(node -> node.allocation().isPresent() && node.allocation().get().membership().cluster().isStateful()); } /** Returns the subset of nodes that are currently changing their Vespa version */ public NodeList changingVersion() { return matching(node -> node.status().vespaVersion().isPresent() && node.allocation().isPresent() && !node.status().vespaVersion().get().equals(node.allocation().get().membership().cluster().vespaVersion())); } /** Returns the subset of nodes with want to fail set to true */ public NodeList failing() { return matching(node -> node.status().wantToFail()); } /** Returns the subset of nodes that are currently changing their OS version to given version */ public NodeList changingOsVersionTo(Version version) { return matching(node -> node.status().osVersion().changingTo(version)); } /** Returns the subset of nodes that are currently changing their OS version */ public NodeList changingOsVersion() { return matching(node -> node.status().osVersion().changing()); } /** Returns a copy of this sorted by current OS version (lowest to highest) */ public NodeList byIncreasingOsVersion() { return sortedBy(Comparator.comparing(node -> node.status() .osVersion() .current() .orElse(Version.emptyVersion))); } /** Returns the subset of nodes that are currently on a lower version than the given version */ public NodeList osVersionIsBefore(Version version) { return matching(node -> node.status().osVersion().isBefore(version)); } /** Returns the subset of nodes that are currently on the given OS version */ public NodeList onOsVersion(Version version) { return matching(node -> node.status().osVersion().matches(version)); } /** Returns the subset of nodes assigned to the given cluster */ public NodeList cluster(ClusterSpec.Id cluster) { return matching(node -> node.allocation().isPresent() && node.allocation().get().membership().cluster().id().equals(cluster)); } /** Returns the subset of nodes owned by the given application */ public NodeList owner(ApplicationId application) { return matching(node -> node.allocation().map(a -> a.owner().equals(application)).orElse(false)); } /** Returns the subset of nodes allocated to a tester instance */ public NodeList tester() { return matching(node -> node.allocation().isPresent() && node.allocation().get().owner().instance().isTester()); } /** Returns the subset of nodes matching any of the given node type(s) */ public NodeList nodeType(NodeType first, NodeType... rest) { if (rest.length == 0) { return matching(node -> node.type() == first); } EnumSet<NodeType> nodeTypes = EnumSet.of(first, rest); return matching(node -> nodeTypes.contains(node.type())); } /** Returns the subset of nodes of the host type */ public NodeList hosts() { return nodeType(NodeType.host); } /** Returns the subset of nodes that are parents */ public NodeList parents() { return matching(node -> node.parentHostname().isEmpty()); } /** Returns the child nodes of the given parent node */ public NodeList childrenOf(String hostname) { return matching(node -> node.hasParent(hostname)); } public NodeList childrenOf(Node parent) { return childrenOf(parent.hostname()); } /** Returns the subset of nodes that are in any of the given state(s) */ public NodeList state(Node.State first, Node.State... rest) { if (rest.length == 0) { return matching(node -> node.state() == first); } return state(EnumSet.of(first, rest)); } /** Returns the subset of nodes that are in any of the given state(s) */ public NodeList state(Set<Node.State> nodeStates) { return matching(node -> nodeStates.contains(node.state())); } /** Returns the subset of nodes which have a record of being down */ public NodeList down() { return matching(Node::isDown); } /** Returns the subset of nodes which are being retired */ public NodeList retiring() { return matching(node -> node.status().wantToRetire() || node.status().preferToRetire()); } /** Returns the parent nodes of the given child nodes */ public NodeList parentsOf(NodeList children) { return children.stream() .map(this::parentOf) .filter(Optional::isPresent) .flatMap(Optional::stream) .collect(collectingAndThen(Collectors.toList(), NodeList::copyOf)); } /** Returns the nodes contained in the group identified by given index */ public NodeList group(int index) { return matching(n -> n.allocation().isPresent() && n.allocation().get().membership().cluster().group().equals(Optional.of(ClusterSpec.Group.from(index)))); } /** Returns the parent node of the given child node */ public Optional<Node> parentOf(Node child) { return child.parentHostname() .flatMap(parentHostname -> stream().filter(node -> node.hostname().equals(parentHostname)) .findFirst()); } /** Returns the hostnames of nodes in this */ public Set<String> hostnames() { return stream().map(Node::hostname).collect(Collectors.toUnmodifiableSet()); } /** Returns the stateful clusters on nodes in this */ public Set<ClusterId> statefulClusters() { return stream().filter(node -> node.allocation().isPresent() && node.allocation().get().membership().cluster().isStateful()) .map(node -> new ClusterId(node.allocation().get().owner(), node.allocation().get().membership().cluster().id())) .collect(Collectors.toUnmodifiableSet()); } /** * Returns the cluster spec of the nodes in this, without any group designation * * @throws IllegalStateException if there are no nodes in thus list or they do not all belong * to the same cluster */ public ClusterSpec clusterSpec() { ensureSingleCluster(); if (isEmpty()) throw new IllegalStateException("No nodes"); return first().get().allocation().get().membership().cluster().with(Optional.empty()); } /** * Returns the resources of the nodes of this. * * NOTE: If the nodes do not all have the same values of node resources, a random pick among those node resources * will be returned. * * @throws IllegalStateException if the nodes in this do not all belong to the same cluster */ public ClusterResources toResources() { ensureSingleCluster(); if (isEmpty()) return new ClusterResources(0, 0, NodeResources.unspecified()); return new ClusterResources(size(), (int)stream().map(node -> node.allocation().get().membership().cluster().group().get()) .distinct() .count(), first().get().resources()); } /** Returns the nodes that are allocated on an exclusive network switch within its cluster */ public NodeList onExclusiveSwitch(NodeList clusterHosts) { ensureSingleCluster(); Map<String, Long> switchCount = clusterHosts.stream() .flatMap(host -> host.switchHostname().stream()) .collect(Collectors.groupingBy(Function.identity(), Collectors.counting())); return matching(node -> { Optional<Node> nodeOnSwitch = clusterHosts.parentOf(node); if (node.parentHostname().isPresent()) { if (nodeOnSwitch.isEmpty()) { throw new IllegalArgumentException("Parent of " + node + ", " + node.parentHostname().get() + ", not found in given cluster hosts"); } } else { nodeOnSwitch = Optional.of(node); } Optional<String> allocatedSwitch = nodeOnSwitch.flatMap(Node::switchHostname); return allocatedSwitch.isEmpty() || switchCount.get(allocatedSwitch.get()) == 1; }); } private void ensureSingleCluster() { if (isEmpty()) return; if (stream().anyMatch(node -> node.allocation().isEmpty())) throw new IllegalStateException("Some nodes are not allocated to a cluster"); ClusterSpec firstNodeSpec = first().get().allocation().get().membership().cluster().with(Optional.empty()); if (stream().map(node -> node.allocation().get().membership().cluster().with(Optional.empty())) .anyMatch(clusterSpec -> ! clusterSpec.id().equals(firstNodeSpec.id()))) throw new IllegalStateException("Nodes belong to multiple clusters"); } /** Returns the nodes of this as a stream */ public Stream<Node> stream() { return asList().stream(); } public static NodeList of(Node ... nodes) { return copyOf(List.of(nodes)); } public static NodeList copyOf(List<Node> nodes) { return new NodeList(nodes, false); } @Override public String toString() { return asList().toString(); } @Override public int hashCode() { return asList().hashCode(); } @Override public boolean equals(Object other) { if (other == this) return true; if ( ! (other instanceof NodeList)) return false; return this.asList().equals(((NodeList) other).asList()); } }
class NodeList extends AbstractFilteringList<Node, NodeList> { protected NodeList(List<Node> nodes, boolean negate) { super(nodes, negate, NodeList::new); } /** Returns the node with the given hostname from this list, or empty if it is not present */ public Optional<Node> node(String hostname) { return matching(node -> node.hostname().equals(hostname)).first(); } /** Returns the subset of nodes which are retired */ public NodeList retired() { return matching(node -> node.allocation().isPresent() && node.allocation().get().membership().retired()); } /** Returns the subset of nodes that are being deprovisioned */ public NodeList deprovisioning() { return matching(node -> node.status().wantToRetire() && node.status().wantToDeprovision()); } /** Returns the subset of nodes that are being rebuilt */ /** Returns the subset of nodes which are removable */ public NodeList removable() { return matching(node -> node.allocation().isPresent() && node.allocation().get().removable()); } /** Returns the subset of nodes which are reusable immediately after removal */ public NodeList reusable() { return matching(node -> node.allocation().isPresent() && node.allocation().get().reusable()); } /** Returns the subset of nodes having exactly the given resources */ public NodeList resources(NodeResources resources) { return matching(node -> node.resources().equals(resources)); } /** Returns the subset of nodes having storage of given type */ public NodeList storageType(NodeResources.StorageType storageType) { return matching(node -> node.resources().storageType() == storageType); } /** Returns the subset of nodes which satisfy the given resources */ public NodeList satisfies(NodeResources resources) { return matching(node -> node.resources().satisfies(resources)); } /** Returns the subset of nodes not in the given set */ public NodeList except(Set<Node> nodes) { return matching(node -> ! nodes.contains(node)); } /** Returns the subset of nodes excluding given node */ public NodeList except(Node node) { return except(Set.of(node)); } /** Returns the subset of nodes assigned to the given cluster type */ public NodeList type(ClusterSpec.Type type) { return matching(node -> node.allocation().isPresent() && node.allocation().get().membership().cluster().type().equals(type)); } /** Returns the subset of nodes that run containers */ public NodeList container() { return matching(node -> node.allocation().isPresent() && node.allocation().get().membership().cluster().type().isContainer()); } /** Returns the subset of nodes that run a stateless service */ public NodeList stateless() { return matching(node -> node.allocation().isPresent() && ! node.allocation().get().membership().cluster().isStateful()); } /** Returns the subset of nodes that run a stateful service */ public NodeList stateful() { return matching(node -> node.allocation().isPresent() && node.allocation().get().membership().cluster().isStateful()); } /** Returns the subset of nodes that are currently changing their Vespa version */ public NodeList changingVersion() { return matching(node -> node.status().vespaVersion().isPresent() && node.allocation().isPresent() && !node.status().vespaVersion().get().equals(node.allocation().get().membership().cluster().vespaVersion())); } /** Returns the subset of nodes with want to fail set to true */ public NodeList failing() { return matching(node -> node.status().wantToFail()); } /** Returns the subset of nodes that are currently changing their OS version to given version */ public NodeList changingOsVersionTo(Version version) { return matching(node -> node.status().osVersion().changingTo(version)); } /** Returns the subset of nodes that are currently changing their OS version */ public NodeList changingOsVersion() { return matching(node -> node.status().osVersion().changing()); } /** Returns a copy of this sorted by current OS version (lowest to highest) */ public NodeList byIncreasingOsVersion() { return sortedBy(Comparator.comparing(node -> node.status() .osVersion() .current() .orElse(Version.emptyVersion))); } /** Returns the subset of nodes that are currently on a lower version than the given version */ public NodeList osVersionIsBefore(Version version) { return matching(node -> node.status().osVersion().isBefore(version)); } /** Returns the subset of nodes that are currently on the given OS version */ public NodeList onOsVersion(Version version) { return matching(node -> node.status().osVersion().matches(version)); } /** Returns the subset of nodes assigned to the given cluster */ public NodeList cluster(ClusterSpec.Id cluster) { return matching(node -> node.allocation().isPresent() && node.allocation().get().membership().cluster().id().equals(cluster)); } /** Returns the subset of nodes owned by the given application */ public NodeList owner(ApplicationId application) { return matching(node -> node.allocation().map(a -> a.owner().equals(application)).orElse(false)); } /** Returns the subset of nodes allocated to a tester instance */ public NodeList tester() { return matching(node -> node.allocation().isPresent() && node.allocation().get().owner().instance().isTester()); } /** Returns the subset of nodes matching any of the given node type(s) */ public NodeList nodeType(NodeType first, NodeType... rest) { if (rest.length == 0) { return matching(node -> node.type() == first); } EnumSet<NodeType> nodeTypes = EnumSet.of(first, rest); return matching(node -> nodeTypes.contains(node.type())); } /** Returns the subset of nodes of the host type */ public NodeList hosts() { return nodeType(NodeType.host); } /** Returns the subset of nodes that are parents */ public NodeList parents() { return matching(node -> node.parentHostname().isEmpty()); } /** Returns the child nodes of the given parent node */ public NodeList childrenOf(String hostname) { return matching(node -> node.hasParent(hostname)); } public NodeList childrenOf(Node parent) { return childrenOf(parent.hostname()); } /** Returns the subset of nodes that are in any of the given state(s) */ public NodeList state(Node.State first, Node.State... rest) { if (rest.length == 0) { return matching(node -> node.state() == first); } return state(EnumSet.of(first, rest)); } /** Returns the subset of nodes that are in any of the given state(s) */ public NodeList state(Set<Node.State> nodeStates) { return matching(node -> nodeStates.contains(node.state())); } /** Returns the subset of nodes which have a record of being down */ public NodeList down() { return matching(Node::isDown); } /** Returns the subset of nodes which are being retired */ public NodeList retiring() { return matching(node -> node.status().wantToRetire() || node.status().preferToRetire()); } /** Returns the parent nodes of the given child nodes */ public NodeList parentsOf(NodeList children) { return children.stream() .map(this::parentOf) .filter(Optional::isPresent) .flatMap(Optional::stream) .collect(collectingAndThen(Collectors.toList(), NodeList::copyOf)); } /** Returns the nodes contained in the group identified by given index */ public NodeList group(int index) { return matching(n -> n.allocation().isPresent() && n.allocation().get().membership().cluster().group().equals(Optional.of(ClusterSpec.Group.from(index)))); } /** Returns the parent node of the given child node */ public Optional<Node> parentOf(Node child) { return child.parentHostname() .flatMap(parentHostname -> stream().filter(node -> node.hostname().equals(parentHostname)) .findFirst()); } /** Returns the hostnames of nodes in this */ public Set<String> hostnames() { return stream().map(Node::hostname).collect(Collectors.toUnmodifiableSet()); } /** Returns the stateful clusters on nodes in this */ public Set<ClusterId> statefulClusters() { return stream().filter(node -> node.allocation().isPresent() && node.allocation().get().membership().cluster().isStateful()) .map(node -> new ClusterId(node.allocation().get().owner(), node.allocation().get().membership().cluster().id())) .collect(Collectors.toUnmodifiableSet()); } /** * Returns the cluster spec of the nodes in this, without any group designation * * @throws IllegalStateException if there are no nodes in thus list or they do not all belong * to the same cluster */ public ClusterSpec clusterSpec() { ensureSingleCluster(); if (isEmpty()) throw new IllegalStateException("No nodes"); return first().get().allocation().get().membership().cluster().with(Optional.empty()); } /** * Returns the resources of the nodes of this. * * NOTE: If the nodes do not all have the same values of node resources, a random pick among those node resources * will be returned. * * @throws IllegalStateException if the nodes in this do not all belong to the same cluster */ public ClusterResources toResources() { ensureSingleCluster(); if (isEmpty()) return new ClusterResources(0, 0, NodeResources.unspecified()); return new ClusterResources(size(), (int)stream().map(node -> node.allocation().get().membership().cluster().group().get()) .distinct() .count(), first().get().resources()); } /** Returns the nodes that are allocated on an exclusive network switch within its cluster */ public NodeList onExclusiveSwitch(NodeList clusterHosts) { ensureSingleCluster(); Map<String, Long> switchCount = clusterHosts.stream() .flatMap(host -> host.switchHostname().stream()) .collect(Collectors.groupingBy(Function.identity(), Collectors.counting())); return matching(node -> { Optional<Node> nodeOnSwitch = clusterHosts.parentOf(node); if (node.parentHostname().isPresent()) { if (nodeOnSwitch.isEmpty()) { throw new IllegalArgumentException("Parent of " + node + ", " + node.parentHostname().get() + ", not found in given cluster hosts"); } } else { nodeOnSwitch = Optional.of(node); } Optional<String> allocatedSwitch = nodeOnSwitch.flatMap(Node::switchHostname); return allocatedSwitch.isEmpty() || switchCount.get(allocatedSwitch.get()) == 1; }); } private void ensureSingleCluster() { if (isEmpty()) return; if (stream().anyMatch(node -> node.allocation().isEmpty())) throw new IllegalStateException("Some nodes are not allocated to a cluster"); ClusterSpec firstNodeSpec = first().get().allocation().get().membership().cluster().with(Optional.empty()); if (stream().map(node -> node.allocation().get().membership().cluster().with(Optional.empty())) .anyMatch(clusterSpec -> ! clusterSpec.id().equals(firstNodeSpec.id()))) throw new IllegalStateException("Nodes belong to multiple clusters"); } /** Returns the nodes of this as a stream */ public Stream<Node> stream() { return asList().stream(); } public static NodeList of(Node ... nodes) { return copyOf(List.of(nodes)); } public static NodeList copyOf(List<Node> nodes) { return new NodeList(nodes, false); } @Override public String toString() { return asList().toString(); } @Override public int hashCode() { return asList().hashCode(); } @Override public boolean equals(Object other) { if (other == this) return true; if ( ! (other instanceof NodeList)) return false; return this.asList().equals(((NodeList) other).asList()); } }
Yes, thanks!
public NodeList rebuilding(boolean soft) { return matching(node -> (soft || node.status().wantToRetire()) && node.status().wantToRebuild()); }
return matching(node -> (soft || node.status().wantToRetire()) && node.status().wantToRebuild());
public NodeList rebuilding(boolean soft) { return matching(node -> { if (soft) { return !node.status().wantToRetire() && node.status().wantToRebuild(); } return node.status().wantToRetire() && node.status().wantToRebuild(); }); }
class NodeList extends AbstractFilteringList<Node, NodeList> { protected NodeList(List<Node> nodes, boolean negate) { super(nodes, negate, NodeList::new); } /** Returns the node with the given hostname from this list, or empty if it is not present */ public Optional<Node> node(String hostname) { return matching(node -> node.hostname().equals(hostname)).first(); } /** Returns the subset of nodes which are retired */ public NodeList retired() { return matching(node -> node.allocation().isPresent() && node.allocation().get().membership().retired()); } /** Returns the subset of nodes that are being deprovisioned */ public NodeList deprovisioning() { return matching(node -> node.status().wantToRetire() && node.status().wantToDeprovision()); } /** Returns the subset of nodes that are being rebuilt */ /** Returns the subset of nodes which are removable */ public NodeList removable() { return matching(node -> node.allocation().isPresent() && node.allocation().get().removable()); } /** Returns the subset of nodes which are reusable immediately after removal */ public NodeList reusable() { return matching(node -> node.allocation().isPresent() && node.allocation().get().reusable()); } /** Returns the subset of nodes having exactly the given resources */ public NodeList resources(NodeResources resources) { return matching(node -> node.resources().equals(resources)); } /** Returns the subset of nodes having storage of given type */ public NodeList storageType(NodeResources.StorageType storageType) { return matching(node -> node.resources().storageType() == storageType); } /** Returns the subset of nodes which satisfy the given resources */ public NodeList satisfies(NodeResources resources) { return matching(node -> node.resources().satisfies(resources)); } /** Returns the subset of nodes not in the given set */ public NodeList except(Set<Node> nodes) { return matching(node -> ! nodes.contains(node)); } /** Returns the subset of nodes excluding given node */ public NodeList except(Node node) { return except(Set.of(node)); } /** Returns the subset of nodes assigned to the given cluster type */ public NodeList type(ClusterSpec.Type type) { return matching(node -> node.allocation().isPresent() && node.allocation().get().membership().cluster().type().equals(type)); } /** Returns the subset of nodes that run containers */ public NodeList container() { return matching(node -> node.allocation().isPresent() && node.allocation().get().membership().cluster().type().isContainer()); } /** Returns the subset of nodes that run a stateless service */ public NodeList stateless() { return matching(node -> node.allocation().isPresent() && ! node.allocation().get().membership().cluster().isStateful()); } /** Returns the subset of nodes that run a stateful service */ public NodeList stateful() { return matching(node -> node.allocation().isPresent() && node.allocation().get().membership().cluster().isStateful()); } /** Returns the subset of nodes that are currently changing their Vespa version */ public NodeList changingVersion() { return matching(node -> node.status().vespaVersion().isPresent() && node.allocation().isPresent() && !node.status().vespaVersion().get().equals(node.allocation().get().membership().cluster().vespaVersion())); } /** Returns the subset of nodes with want to fail set to true */ public NodeList failing() { return matching(node -> node.status().wantToFail()); } /** Returns the subset of nodes that are currently changing their OS version to given version */ public NodeList changingOsVersionTo(Version version) { return matching(node -> node.status().osVersion().changingTo(version)); } /** Returns the subset of nodes that are currently changing their OS version */ public NodeList changingOsVersion() { return matching(node -> node.status().osVersion().changing()); } /** Returns a copy of this sorted by current OS version (lowest to highest) */ public NodeList byIncreasingOsVersion() { return sortedBy(Comparator.comparing(node -> node.status() .osVersion() .current() .orElse(Version.emptyVersion))); } /** Returns the subset of nodes that are currently on a lower version than the given version */ public NodeList osVersionIsBefore(Version version) { return matching(node -> node.status().osVersion().isBefore(version)); } /** Returns the subset of nodes that are currently on the given OS version */ public NodeList onOsVersion(Version version) { return matching(node -> node.status().osVersion().matches(version)); } /** Returns the subset of nodes assigned to the given cluster */ public NodeList cluster(ClusterSpec.Id cluster) { return matching(node -> node.allocation().isPresent() && node.allocation().get().membership().cluster().id().equals(cluster)); } /** Returns the subset of nodes owned by the given application */ public NodeList owner(ApplicationId application) { return matching(node -> node.allocation().map(a -> a.owner().equals(application)).orElse(false)); } /** Returns the subset of nodes allocated to a tester instance */ public NodeList tester() { return matching(node -> node.allocation().isPresent() && node.allocation().get().owner().instance().isTester()); } /** Returns the subset of nodes matching any of the given node type(s) */ public NodeList nodeType(NodeType first, NodeType... rest) { if (rest.length == 0) { return matching(node -> node.type() == first); } EnumSet<NodeType> nodeTypes = EnumSet.of(first, rest); return matching(node -> nodeTypes.contains(node.type())); } /** Returns the subset of nodes of the host type */ public NodeList hosts() { return nodeType(NodeType.host); } /** Returns the subset of nodes that are parents */ public NodeList parents() { return matching(node -> node.parentHostname().isEmpty()); } /** Returns the child nodes of the given parent node */ public NodeList childrenOf(String hostname) { return matching(node -> node.hasParent(hostname)); } public NodeList childrenOf(Node parent) { return childrenOf(parent.hostname()); } /** Returns the subset of nodes that are in any of the given state(s) */ public NodeList state(Node.State first, Node.State... rest) { if (rest.length == 0) { return matching(node -> node.state() == first); } return state(EnumSet.of(first, rest)); } /** Returns the subset of nodes that are in any of the given state(s) */ public NodeList state(Set<Node.State> nodeStates) { return matching(node -> nodeStates.contains(node.state())); } /** Returns the subset of nodes which have a record of being down */ public NodeList down() { return matching(Node::isDown); } /** Returns the subset of nodes which are being retired */ public NodeList retiring() { return matching(node -> node.status().wantToRetire() || node.status().preferToRetire()); } /** Returns the parent nodes of the given child nodes */ public NodeList parentsOf(NodeList children) { return children.stream() .map(this::parentOf) .filter(Optional::isPresent) .flatMap(Optional::stream) .collect(collectingAndThen(Collectors.toList(), NodeList::copyOf)); } /** Returns the nodes contained in the group identified by given index */ public NodeList group(int index) { return matching(n -> n.allocation().isPresent() && n.allocation().get().membership().cluster().group().equals(Optional.of(ClusterSpec.Group.from(index)))); } /** Returns the parent node of the given child node */ public Optional<Node> parentOf(Node child) { return child.parentHostname() .flatMap(parentHostname -> stream().filter(node -> node.hostname().equals(parentHostname)) .findFirst()); } /** Returns the hostnames of nodes in this */ public Set<String> hostnames() { return stream().map(Node::hostname).collect(Collectors.toUnmodifiableSet()); } /** Returns the stateful clusters on nodes in this */ public Set<ClusterId> statefulClusters() { return stream().filter(node -> node.allocation().isPresent() && node.allocation().get().membership().cluster().isStateful()) .map(node -> new ClusterId(node.allocation().get().owner(), node.allocation().get().membership().cluster().id())) .collect(Collectors.toUnmodifiableSet()); } /** * Returns the cluster spec of the nodes in this, without any group designation * * @throws IllegalStateException if there are no nodes in thus list or they do not all belong * to the same cluster */ public ClusterSpec clusterSpec() { ensureSingleCluster(); if (isEmpty()) throw new IllegalStateException("No nodes"); return first().get().allocation().get().membership().cluster().with(Optional.empty()); } /** * Returns the resources of the nodes of this. * * NOTE: If the nodes do not all have the same values of node resources, a random pick among those node resources * will be returned. * * @throws IllegalStateException if the nodes in this do not all belong to the same cluster */ public ClusterResources toResources() { ensureSingleCluster(); if (isEmpty()) return new ClusterResources(0, 0, NodeResources.unspecified()); return new ClusterResources(size(), (int)stream().map(node -> node.allocation().get().membership().cluster().group().get()) .distinct() .count(), first().get().resources()); } /** Returns the nodes that are allocated on an exclusive network switch within its cluster */ public NodeList onExclusiveSwitch(NodeList clusterHosts) { ensureSingleCluster(); Map<String, Long> switchCount = clusterHosts.stream() .flatMap(host -> host.switchHostname().stream()) .collect(Collectors.groupingBy(Function.identity(), Collectors.counting())); return matching(node -> { Optional<Node> nodeOnSwitch = clusterHosts.parentOf(node); if (node.parentHostname().isPresent()) { if (nodeOnSwitch.isEmpty()) { throw new IllegalArgumentException("Parent of " + node + ", " + node.parentHostname().get() + ", not found in given cluster hosts"); } } else { nodeOnSwitch = Optional.of(node); } Optional<String> allocatedSwitch = nodeOnSwitch.flatMap(Node::switchHostname); return allocatedSwitch.isEmpty() || switchCount.get(allocatedSwitch.get()) == 1; }); } private void ensureSingleCluster() { if (isEmpty()) return; if (stream().anyMatch(node -> node.allocation().isEmpty())) throw new IllegalStateException("Some nodes are not allocated to a cluster"); ClusterSpec firstNodeSpec = first().get().allocation().get().membership().cluster().with(Optional.empty()); if (stream().map(node -> node.allocation().get().membership().cluster().with(Optional.empty())) .anyMatch(clusterSpec -> ! clusterSpec.id().equals(firstNodeSpec.id()))) throw new IllegalStateException("Nodes belong to multiple clusters"); } /** Returns the nodes of this as a stream */ public Stream<Node> stream() { return asList().stream(); } public static NodeList of(Node ... nodes) { return copyOf(List.of(nodes)); } public static NodeList copyOf(List<Node> nodes) { return new NodeList(nodes, false); } @Override public String toString() { return asList().toString(); } @Override public int hashCode() { return asList().hashCode(); } @Override public boolean equals(Object other) { if (other == this) return true; if ( ! (other instanceof NodeList)) return false; return this.asList().equals(((NodeList) other).asList()); } }
class NodeList extends AbstractFilteringList<Node, NodeList> { protected NodeList(List<Node> nodes, boolean negate) { super(nodes, negate, NodeList::new); } /** Returns the node with the given hostname from this list, or empty if it is not present */ public Optional<Node> node(String hostname) { return matching(node -> node.hostname().equals(hostname)).first(); } /** Returns the subset of nodes which are retired */ public NodeList retired() { return matching(node -> node.allocation().isPresent() && node.allocation().get().membership().retired()); } /** Returns the subset of nodes that are being deprovisioned */ public NodeList deprovisioning() { return matching(node -> node.status().wantToRetire() && node.status().wantToDeprovision()); } /** Returns the subset of nodes that are being rebuilt */ /** Returns the subset of nodes which are removable */ public NodeList removable() { return matching(node -> node.allocation().isPresent() && node.allocation().get().removable()); } /** Returns the subset of nodes which are reusable immediately after removal */ public NodeList reusable() { return matching(node -> node.allocation().isPresent() && node.allocation().get().reusable()); } /** Returns the subset of nodes having exactly the given resources */ public NodeList resources(NodeResources resources) { return matching(node -> node.resources().equals(resources)); } /** Returns the subset of nodes having storage of given type */ public NodeList storageType(NodeResources.StorageType storageType) { return matching(node -> node.resources().storageType() == storageType); } /** Returns the subset of nodes which satisfy the given resources */ public NodeList satisfies(NodeResources resources) { return matching(node -> node.resources().satisfies(resources)); } /** Returns the subset of nodes not in the given set */ public NodeList except(Set<Node> nodes) { return matching(node -> ! nodes.contains(node)); } /** Returns the subset of nodes excluding given node */ public NodeList except(Node node) { return except(Set.of(node)); } /** Returns the subset of nodes assigned to the given cluster type */ public NodeList type(ClusterSpec.Type type) { return matching(node -> node.allocation().isPresent() && node.allocation().get().membership().cluster().type().equals(type)); } /** Returns the subset of nodes that run containers */ public NodeList container() { return matching(node -> node.allocation().isPresent() && node.allocation().get().membership().cluster().type().isContainer()); } /** Returns the subset of nodes that run a stateless service */ public NodeList stateless() { return matching(node -> node.allocation().isPresent() && ! node.allocation().get().membership().cluster().isStateful()); } /** Returns the subset of nodes that run a stateful service */ public NodeList stateful() { return matching(node -> node.allocation().isPresent() && node.allocation().get().membership().cluster().isStateful()); } /** Returns the subset of nodes that are currently changing their Vespa version */ public NodeList changingVersion() { return matching(node -> node.status().vespaVersion().isPresent() && node.allocation().isPresent() && !node.status().vespaVersion().get().equals(node.allocation().get().membership().cluster().vespaVersion())); } /** Returns the subset of nodes with want to fail set to true */ public NodeList failing() { return matching(node -> node.status().wantToFail()); } /** Returns the subset of nodes that are currently changing their OS version to given version */ public NodeList changingOsVersionTo(Version version) { return matching(node -> node.status().osVersion().changingTo(version)); } /** Returns the subset of nodes that are currently changing their OS version */ public NodeList changingOsVersion() { return matching(node -> node.status().osVersion().changing()); } /** Returns a copy of this sorted by current OS version (lowest to highest) */ public NodeList byIncreasingOsVersion() { return sortedBy(Comparator.comparing(node -> node.status() .osVersion() .current() .orElse(Version.emptyVersion))); } /** Returns the subset of nodes that are currently on a lower version than the given version */ public NodeList osVersionIsBefore(Version version) { return matching(node -> node.status().osVersion().isBefore(version)); } /** Returns the subset of nodes that are currently on the given OS version */ public NodeList onOsVersion(Version version) { return matching(node -> node.status().osVersion().matches(version)); } /** Returns the subset of nodes assigned to the given cluster */ public NodeList cluster(ClusterSpec.Id cluster) { return matching(node -> node.allocation().isPresent() && node.allocation().get().membership().cluster().id().equals(cluster)); } /** Returns the subset of nodes owned by the given application */ public NodeList owner(ApplicationId application) { return matching(node -> node.allocation().map(a -> a.owner().equals(application)).orElse(false)); } /** Returns the subset of nodes allocated to a tester instance */ public NodeList tester() { return matching(node -> node.allocation().isPresent() && node.allocation().get().owner().instance().isTester()); } /** Returns the subset of nodes matching any of the given node type(s) */ public NodeList nodeType(NodeType first, NodeType... rest) { if (rest.length == 0) { return matching(node -> node.type() == first); } EnumSet<NodeType> nodeTypes = EnumSet.of(first, rest); return matching(node -> nodeTypes.contains(node.type())); } /** Returns the subset of nodes of the host type */ public NodeList hosts() { return nodeType(NodeType.host); } /** Returns the subset of nodes that are parents */ public NodeList parents() { return matching(node -> node.parentHostname().isEmpty()); } /** Returns the child nodes of the given parent node */ public NodeList childrenOf(String hostname) { return matching(node -> node.hasParent(hostname)); } public NodeList childrenOf(Node parent) { return childrenOf(parent.hostname()); } /** Returns the subset of nodes that are in any of the given state(s) */ public NodeList state(Node.State first, Node.State... rest) { if (rest.length == 0) { return matching(node -> node.state() == first); } return state(EnumSet.of(first, rest)); } /** Returns the subset of nodes that are in any of the given state(s) */ public NodeList state(Set<Node.State> nodeStates) { return matching(node -> nodeStates.contains(node.state())); } /** Returns the subset of nodes which have a record of being down */ public NodeList down() { return matching(Node::isDown); } /** Returns the subset of nodes which are being retired */ public NodeList retiring() { return matching(node -> node.status().wantToRetire() || node.status().preferToRetire()); } /** Returns the parent nodes of the given child nodes */ public NodeList parentsOf(NodeList children) { return children.stream() .map(this::parentOf) .filter(Optional::isPresent) .flatMap(Optional::stream) .collect(collectingAndThen(Collectors.toList(), NodeList::copyOf)); } /** Returns the nodes contained in the group identified by given index */ public NodeList group(int index) { return matching(n -> n.allocation().isPresent() && n.allocation().get().membership().cluster().group().equals(Optional.of(ClusterSpec.Group.from(index)))); } /** Returns the parent node of the given child node */ public Optional<Node> parentOf(Node child) { return child.parentHostname() .flatMap(parentHostname -> stream().filter(node -> node.hostname().equals(parentHostname)) .findFirst()); } /** Returns the hostnames of nodes in this */ public Set<String> hostnames() { return stream().map(Node::hostname).collect(Collectors.toUnmodifiableSet()); } /** Returns the stateful clusters on nodes in this */ public Set<ClusterId> statefulClusters() { return stream().filter(node -> node.allocation().isPresent() && node.allocation().get().membership().cluster().isStateful()) .map(node -> new ClusterId(node.allocation().get().owner(), node.allocation().get().membership().cluster().id())) .collect(Collectors.toUnmodifiableSet()); } /** * Returns the cluster spec of the nodes in this, without any group designation * * @throws IllegalStateException if there are no nodes in thus list or they do not all belong * to the same cluster */ public ClusterSpec clusterSpec() { ensureSingleCluster(); if (isEmpty()) throw new IllegalStateException("No nodes"); return first().get().allocation().get().membership().cluster().with(Optional.empty()); } /** * Returns the resources of the nodes of this. * * NOTE: If the nodes do not all have the same values of node resources, a random pick among those node resources * will be returned. * * @throws IllegalStateException if the nodes in this do not all belong to the same cluster */ public ClusterResources toResources() { ensureSingleCluster(); if (isEmpty()) return new ClusterResources(0, 0, NodeResources.unspecified()); return new ClusterResources(size(), (int)stream().map(node -> node.allocation().get().membership().cluster().group().get()) .distinct() .count(), first().get().resources()); } /** Returns the nodes that are allocated on an exclusive network switch within its cluster */ public NodeList onExclusiveSwitch(NodeList clusterHosts) { ensureSingleCluster(); Map<String, Long> switchCount = clusterHosts.stream() .flatMap(host -> host.switchHostname().stream()) .collect(Collectors.groupingBy(Function.identity(), Collectors.counting())); return matching(node -> { Optional<Node> nodeOnSwitch = clusterHosts.parentOf(node); if (node.parentHostname().isPresent()) { if (nodeOnSwitch.isEmpty()) { throw new IllegalArgumentException("Parent of " + node + ", " + node.parentHostname().get() + ", not found in given cluster hosts"); } } else { nodeOnSwitch = Optional.of(node); } Optional<String> allocatedSwitch = nodeOnSwitch.flatMap(Node::switchHostname); return allocatedSwitch.isEmpty() || switchCount.get(allocatedSwitch.get()) == 1; }); } private void ensureSingleCluster() { if (isEmpty()) return; if (stream().anyMatch(node -> node.allocation().isEmpty())) throw new IllegalStateException("Some nodes are not allocated to a cluster"); ClusterSpec firstNodeSpec = first().get().allocation().get().membership().cluster().with(Optional.empty()); if (stream().map(node -> node.allocation().get().membership().cluster().with(Optional.empty())) .anyMatch(clusterSpec -> ! clusterSpec.id().equals(firstNodeSpec.id()))) throw new IllegalStateException("Nodes belong to multiple clusters"); } /** Returns the nodes of this as a stream */ public Stream<Node> stream() { return asList().stream(); } public static NodeList of(Node ... nodes) { return copyOf(List.of(nodes)); } public static NodeList copyOf(List<Node> nodes) { return new NodeList(nodes, false); } @Override public String toString() { return asList().toString(); } @Override public int hashCode() { return asList().hashCode(); } @Override public boolean equals(Object other) { if (other == this) return true; if ( ! (other instanceof NodeList)) return false; return this.asList().equals(((NodeList) other).asList()); } }
```suggestion && ! dependent.map(JobId::type).map(status::findCloud).map(List.of(CloudName.AWS, CloudName.GCP)::contains).orElse(true) ```
public Optional<Instant> coolingDownUntil(Change change, Optional<JobId> dependent) { if (job.lastTriggered().isEmpty()) return Optional.empty(); if (job.lastCompleted().isEmpty()) return Optional.empty(); if (job.firstFailing().isEmpty() || ! job.firstFailing().get().hasEnded()) return Optional.empty(); Versions lastVersions = job.lastCompleted().get().versions(); Versions toRun = Versions.from(change, status.application, dependent.flatMap(status::deploymentFor), status.fallbackPlatform(change, job.id())); if ( ! toRun.targetsMatch(lastVersions)) return Optional.empty(); if ( job.id().type().environment().isTest() && dependent.map(JobId::type).map(status::findCloud).map(CloudName.DEFAULT::equals).orElse(false) && job.isNodeAllocationFailure()) return Optional.empty(); Instant firstFailing = job.firstFailing().get().end().get(); Instant lastCompleted = job.lastCompleted().get().end().get(); return firstFailing.equals(lastCompleted) ? Optional.of(lastCompleted) : Optional.of(lastCompleted.plus(Duration.ofMinutes(10)) .plus(Duration.between(firstFailing, lastCompleted) .dividedBy(2))) .filter(status.now::isBefore); }
&& dependent.map(JobId::type).map(status::findCloud).map(CloudName.DEFAULT::equals).orElse(false)
public Optional<Instant> coolingDownUntil(Change change, Optional<JobId> dependent) { return Optional.empty(); }
class StepStatus { private final StepType type; private final DeploymentSpec.Step step; private final List<StepStatus> dependencies; private final InstanceName instance; private StepStatus(StepType type, DeploymentSpec.Step step, List<StepStatus> dependencies, InstanceName instance) { this.type = requireNonNull(type); this.step = requireNonNull(step); this.dependencies = List.copyOf(dependencies); this.instance = instance; } /** The type of step this is. */ public final StepType type() { return type; } /** The step defining this. */ public final DeploymentSpec.Step step() { return step; } /** The list of steps that need to be complete before this may start. */ public final List<StepStatus> dependencies() { return dependencies; } /** The instance of this. */ public final InstanceName instance() { return instance; } /** The id of the job this corresponds to, if any. */ public Optional<JobId> job() { return Optional.empty(); } /** The time at which this is, or was, complete on the given change and / or versions. */ public Optional<Instant> completedAt(Change change) { return completedAt(change, Optional.empty()); } /** The time at which this is, or was, complete on the given change and / or versions. */ abstract Optional<Instant> completedAt(Change change, Optional<JobId> dependent); /** The time at which this step is ready to run the specified change and / or versions. */ public Optional<Instant> readyAt(Change change) { return readyAt(change, Optional.empty()); } /** The time at which this step is ready to run the specified change and / or versions. */ Optional<Instant> readyAt(Change change, Optional<JobId> dependent) { return dependenciesCompletedAt(change, dependent) .map(ready -> Stream.of(blockedUntil(change), pausedUntil(), coolingDownUntil(change, dependent)) .flatMap(Optional::stream) .reduce(ready, maxBy(naturalOrder()))); } /** The time at which all dependencies completed on the given change and / or versions. */ Optional<Instant> dependenciesCompletedAt(Change change, Optional<JobId> dependent) { Instant latest = Instant.EPOCH; for (StepStatus step : dependencies) { Optional<Instant> completedAt = step.completedAt(change, dependent); if (completedAt.isEmpty()) return Optional.empty(); latest = latest.isBefore(completedAt.get()) ? completedAt.get() : latest; } return Optional.of(latest); } /** The time until which this step is blocked by a change blocker. */ public Optional<Instant> blockedUntil(Change change) { return Optional.empty(); } /** The time until which this step is paused by user intervention. */ public Optional<Instant> pausedUntil() { return Optional.empty(); } /** The time until which this step is cooling down, due to consecutive failures. */ /** Whether this step is declared in the deployment spec, or is an implicit step. */ public boolean isDeclared() { return true; } }
class StepStatus { private final StepType type; private final DeploymentSpec.Step step; private final List<StepStatus> dependencies; private final InstanceName instance; private StepStatus(StepType type, DeploymentSpec.Step step, List<StepStatus> dependencies, InstanceName instance) { this.type = requireNonNull(type); this.step = requireNonNull(step); this.dependencies = List.copyOf(dependencies); this.instance = instance; } /** The type of step this is. */ public final StepType type() { return type; } /** The step defining this. */ public final DeploymentSpec.Step step() { return step; } /** The list of steps that need to be complete before this may start. */ public final List<StepStatus> dependencies() { return dependencies; } /** The instance of this. */ public final InstanceName instance() { return instance; } /** The id of the job this corresponds to, if any. */ public Optional<JobId> job() { return Optional.empty(); } /** The time at which this is, or was, complete on the given change and / or versions. */ public Optional<Instant> completedAt(Change change) { return completedAt(change, Optional.empty()); } /** The time at which this is, or was, complete on the given change and / or versions. */ abstract Optional<Instant> completedAt(Change change, Optional<JobId> dependent); /** The time at which this step is ready to run the specified change and / or versions. */ public Optional<Instant> readyAt(Change change) { return readyAt(change, Optional.empty()); } /** The time at which this step is ready to run the specified change and / or versions. */ Optional<Instant> readyAt(Change change, Optional<JobId> dependent) { return dependenciesCompletedAt(change, dependent) .map(ready -> Stream.of(blockedUntil(change), pausedUntil(), coolingDownUntil(change, dependent)) .flatMap(Optional::stream) .reduce(ready, maxBy(naturalOrder()))); } /** The time at which all dependencies completed on the given change and / or versions. */ Optional<Instant> dependenciesCompletedAt(Change change, Optional<JobId> dependent) { Instant latest = Instant.EPOCH; for (StepStatus step : dependencies) { Optional<Instant> completedAt = step.completedAt(change, dependent); if (completedAt.isEmpty()) return Optional.empty(); latest = latest.isBefore(completedAt.get()) ? completedAt.get() : latest; } return Optional.of(latest); } /** The time until which this step is blocked by a change blocker. */ public Optional<Instant> blockedUntil(Change change) { return Optional.empty(); } /** The time until which this step is paused by user intervention. */ public Optional<Instant> pausedUntil() { return Optional.empty(); } /** The time until which this step is cooling down, due to consecutive failures. */ /** Whether this step is declared in the deployment spec, or is an implicit step. */ public boolean isDeclared() { return true; } }
Intentionally left undone?
private boolean addFieldPathUpdates(DocumentUpdate update, TokenBuffer buffer, String fieldPath) { int localNesting = buffer.nesting(); buffer.next(); while (localNesting <= buffer.nesting()) { String fieldPathOperation = buffer.currentName().toLowerCase(); FieldPathUpdate fieldPathUpdate; if (fieldPathOperation.equals(UPDATE_ASSIGN)) { fieldPathUpdate = readAssignFieldPathUpdate(update.getType(), fieldPath, buffer); } else if (fieldPathOperation.equals(UPDATE_ADD)) { fieldPathUpdate = readAddFieldPathUpdate(update.getType(), fieldPath, buffer); } else if (fieldPathOperation.equals(UPDATE_REMOVE)) { fieldPathUpdate = readRemoveFieldPathUpdate(update.getType(), fieldPath, buffer); } else if (SingleValueReader.UPDATE_OPERATION_TO_ARITHMETIC_SIGN.containsKey(fieldPathOperation)) { fieldPathUpdate = readArithmeticFieldPathUpdate(update.getType(), fieldPath, buffer, fieldPathOperation); } else { throw new IllegalArgumentException("Field path update type '" + fieldPathOperation + "' not supported."); } update.addFieldPathUpdate(fieldPathUpdate); buffer.next(); } return true; }
return true;
private boolean addFieldPathUpdates(DocumentUpdate update, TokenBuffer buffer, String fieldPath) { int localNesting = buffer.nesting(); buffer.next(); while (localNesting <= buffer.nesting()) { String fieldPathOperation = buffer.currentName().toLowerCase(); FieldPathUpdate fieldPathUpdate; if (fieldPathOperation.equals(UPDATE_ASSIGN)) { fieldPathUpdate = readAssignFieldPathUpdate(update.getType(), fieldPath, buffer); } else if (fieldPathOperation.equals(UPDATE_ADD)) { fieldPathUpdate = readAddFieldPathUpdate(update.getType(), fieldPath, buffer); } else if (fieldPathOperation.equals(UPDATE_REMOVE)) { fieldPathUpdate = readRemoveFieldPathUpdate(update.getType(), fieldPath, buffer); } else if (SingleValueReader.UPDATE_OPERATION_TO_ARITHMETIC_SIGN.containsKey(fieldPathOperation)) { fieldPathUpdate = readArithmeticFieldPathUpdate(update.getType(), fieldPath, buffer, fieldPathOperation); } else { throw new IllegalArgumentException("Field path update type '" + fieldPathOperation + "' not supported."); } update.addFieldPathUpdate(fieldPathUpdate); buffer.next(); } return true; }
class VespaJsonDocumentReader { private static final String UPDATE_REMOVE = "remove"; private static final String UPDATE_ADD = "add"; private final boolean ignoreUndefinedFields; public VespaJsonDocumentReader(boolean ignoreUndefinedFields) { this.ignoreUndefinedFields = ignoreUndefinedFields; } public ParsedDocumentOperation createDocumentOperation(DocumentType documentType, DocumentParseInfo documentParseInfo) { final DocumentOperation documentOperation; boolean fullyApplied = true; try { switch (documentParseInfo.operationType) { case PUT -> { documentOperation = new DocumentPut(new Document(documentType, documentParseInfo.documentId)); fullyApplied = readPut(documentParseInfo.fieldsBuffer, (DocumentPut) documentOperation); verifyEndState(documentParseInfo.fieldsBuffer, JsonToken.END_OBJECT); } case REMOVE -> documentOperation = new DocumentRemove(documentParseInfo.documentId); case UPDATE -> { documentOperation = new DocumentUpdate(documentType, documentParseInfo.documentId); fullyApplied = readUpdate(documentParseInfo.fieldsBuffer, (DocumentUpdate) documentOperation); verifyEndState(documentParseInfo.fieldsBuffer, JsonToken.END_OBJECT); } default -> throw new IllegalStateException("Implementation out of sync with itself. This is a bug."); } } catch (JsonReaderException e) { throw JsonReaderException.addDocId(e, documentParseInfo.documentId); } if (documentParseInfo.create.isPresent()) { if (! (documentOperation instanceof DocumentUpdate update)) { throw new IllegalArgumentException("Could not set create flag on non update operation."); } update.setCreateIfNonExistent(documentParseInfo.create.get()); } return new ParsedDocumentOperation(documentOperation, fullyApplied); } public boolean readPut(TokenBuffer buffer, DocumentPut put) { try { if (buffer.isEmpty()) throw new IllegalArgumentException(put + " is missing a 'fields' map"); return populateComposite(buffer, put.getDocument(), ignoreUndefinedFields); } catch (JsonReaderException e) { throw JsonReaderException.addDocId(e, put.getId()); } } public boolean readUpdate(TokenBuffer buffer, DocumentUpdate update) { if (buffer.isEmpty()) throw new IllegalArgumentException("Update of document " + update.getId() + " is missing a 'fields' map"); expectObjectStart(buffer.currentToken()); int localNesting = buffer.nesting(); buffer.next(); boolean fullyApplied = true; while (localNesting <= buffer.nesting()) { expectObjectStart(buffer.currentToken()); String fieldName = buffer.currentName(); try { if (isFieldPath(fieldName)) { fullyApplied &= addFieldPathUpdates(update, buffer, fieldName); } else { fullyApplied &= addFieldUpdates(update, buffer, fieldName); } expectObjectEnd(buffer.currentToken()); } catch (IllegalArgumentException | IndexOutOfBoundsException e) { throw new IllegalArgumentException("Error in '" + fieldName + "'", e); } buffer.next(); } return fullyApplied; } private boolean addFieldUpdates(DocumentUpdate update, TokenBuffer buffer, String fieldName) { Field field = update.getType().getField(fieldName); if (field == null) { if (! ignoreUndefinedFields) throw new IllegalArgumentException("No field named '" + fieldName + "' in " + update.getType()); buffer.skipToRelativeNesting(-1); return false; } int localNesting = buffer.nesting(); FieldUpdate fieldUpdate = FieldUpdate.create(field); buffer.next(); while (localNesting <= buffer.nesting()) { switch (buffer.currentName()) { case UPDATE_REMOVE: if (isTensorField(field)) { fieldUpdate.addValueUpdate(createTensorRemoveUpdate(buffer, field)); } else { createRemoves(buffer, field, fieldUpdate, ignoreUndefinedFields); } break; case UPDATE_ADD: if (isTensorField(field)) { fieldUpdate.addValueUpdate(createTensorAddUpdate(buffer, field)); } else { createAdds(buffer, field, fieldUpdate, ignoreUndefinedFields); } break; case UPDATE_MATCH: fieldUpdate.addValueUpdate(createMapUpdate(buffer, field, ignoreUndefinedFields)); break; case UPDATE_MODIFY: fieldUpdate.addValueUpdate(createModifyUpdate(buffer, field)); break; default: String action = buffer.currentName(); fieldUpdate.addValueUpdate(readSingleUpdate(buffer, field.getDataType(), action, ignoreUndefinedFields)); } buffer.next(); } update.addFieldUpdate(fieldUpdate); return true; } private AssignFieldPathUpdate readAssignFieldPathUpdate(DocumentType documentType, String fieldPath, TokenBuffer buffer) { AssignFieldPathUpdate fieldPathUpdate = new AssignFieldPathUpdate(documentType, fieldPath); FieldValue fv = SingleValueReader.readSingleValue(buffer, fieldPathUpdate.getFieldPath().getResultingDataType(), ignoreUndefinedFields); fieldPathUpdate.setNewValue(fv); return fieldPathUpdate; } private AddFieldPathUpdate readAddFieldPathUpdate(DocumentType documentType, String fieldPath, TokenBuffer buffer) { AddFieldPathUpdate fieldPathUpdate = new AddFieldPathUpdate(documentType, fieldPath); FieldValue fv = SingleValueReader.readSingleValue(buffer, fieldPathUpdate.getFieldPath().getResultingDataType(), ignoreUndefinedFields); fieldPathUpdate.setNewValues((Array) fv); return fieldPathUpdate; } private RemoveFieldPathUpdate readRemoveFieldPathUpdate(DocumentType documentType, String fieldPath, TokenBuffer buffer) { expectScalarValue(buffer.currentToken()); return new RemoveFieldPathUpdate(documentType, fieldPath); } private AssignFieldPathUpdate readArithmeticFieldPathUpdate(DocumentType documentType, String fieldPath, TokenBuffer buffer, String fieldPathOperation) { AssignFieldPathUpdate fieldPathUpdate = new AssignFieldPathUpdate(documentType, fieldPath); String arithmeticSign = SingleValueReader.UPDATE_OPERATION_TO_ARITHMETIC_SIGN.get(fieldPathOperation); double value = Double.valueOf(buffer.currentText()); String expression = String.format("$value %s %s", arithmeticSign, value); fieldPathUpdate.setExpression(expression); return fieldPathUpdate; } private static boolean isFieldPath(String field) { return field.matches("^.*?[.\\[\\{].*$"); } private static void verifyEndState(TokenBuffer buffer, JsonToken expectedFinalToken) { Preconditions.checkState(buffer.currentToken() == expectedFinalToken, "Expected end of JSON struct (%s), got %s", expectedFinalToken, buffer.currentToken()); Preconditions.checkState(buffer.nesting() == 0, "Nesting not zero at end of operation"); Preconditions.checkState(buffer.next() == null, "Dangling data at end of operation"); Preconditions.checkState(buffer.size() == 0, "Dangling data at end of operation"); } }
class VespaJsonDocumentReader { private static final String UPDATE_REMOVE = "remove"; private static final String UPDATE_ADD = "add"; private final boolean ignoreUndefinedFields; public VespaJsonDocumentReader(boolean ignoreUndefinedFields) { this.ignoreUndefinedFields = ignoreUndefinedFields; } public ParsedDocumentOperation createDocumentOperation(DocumentType documentType, DocumentParseInfo documentParseInfo) { final DocumentOperation documentOperation; boolean fullyApplied = true; try { switch (documentParseInfo.operationType) { case PUT -> { documentOperation = new DocumentPut(new Document(documentType, documentParseInfo.documentId)); fullyApplied = readPut(documentParseInfo.fieldsBuffer, (DocumentPut) documentOperation); verifyEndState(documentParseInfo.fieldsBuffer, JsonToken.END_OBJECT); } case REMOVE -> documentOperation = new DocumentRemove(documentParseInfo.documentId); case UPDATE -> { documentOperation = new DocumentUpdate(documentType, documentParseInfo.documentId); fullyApplied = readUpdate(documentParseInfo.fieldsBuffer, (DocumentUpdate) documentOperation); verifyEndState(documentParseInfo.fieldsBuffer, JsonToken.END_OBJECT); } default -> throw new IllegalStateException("Implementation out of sync with itself. This is a bug."); } } catch (JsonReaderException e) { throw JsonReaderException.addDocId(e, documentParseInfo.documentId); } if (documentParseInfo.create.isPresent()) { if (! (documentOperation instanceof DocumentUpdate update)) { throw new IllegalArgumentException("Could not set create flag on non update operation."); } update.setCreateIfNonExistent(documentParseInfo.create.get()); } return new ParsedDocumentOperation(documentOperation, fullyApplied); } public boolean readPut(TokenBuffer buffer, DocumentPut put) { try { if (buffer.isEmpty()) throw new IllegalArgumentException(put + " is missing a 'fields' map"); return populateComposite(buffer, put.getDocument(), ignoreUndefinedFields); } catch (JsonReaderException e) { throw JsonReaderException.addDocId(e, put.getId()); } } public boolean readUpdate(TokenBuffer buffer, DocumentUpdate update) { if (buffer.isEmpty()) throw new IllegalArgumentException("Update of document " + update.getId() + " is missing a 'fields' map"); expectObjectStart(buffer.currentToken()); int localNesting = buffer.nesting(); buffer.next(); boolean fullyApplied = true; while (localNesting <= buffer.nesting()) { expectObjectStart(buffer.currentToken()); String fieldName = buffer.currentName(); try { if (isFieldPath(fieldName)) { fullyApplied &= addFieldPathUpdates(update, buffer, fieldName); } else { fullyApplied &= addFieldUpdates(update, buffer, fieldName); } expectObjectEnd(buffer.currentToken()); } catch (IllegalArgumentException | IndexOutOfBoundsException e) { throw new IllegalArgumentException("Error in '" + fieldName + "'", e); } buffer.next(); } return fullyApplied; } private boolean addFieldUpdates(DocumentUpdate update, TokenBuffer buffer, String fieldName) { Field field = update.getType().getField(fieldName); if (field == null) { if (! ignoreUndefinedFields) throw new IllegalArgumentException("No field named '" + fieldName + "' in " + update.getType()); buffer.skipToRelativeNesting(-1); return false; } int localNesting = buffer.nesting(); FieldUpdate fieldUpdate = FieldUpdate.create(field); buffer.next(); while (localNesting <= buffer.nesting()) { switch (buffer.currentName()) { case UPDATE_REMOVE: if (isTensorField(field)) { fieldUpdate.addValueUpdate(createTensorRemoveUpdate(buffer, field)); } else { createRemoves(buffer, field, fieldUpdate, ignoreUndefinedFields); } break; case UPDATE_ADD: if (isTensorField(field)) { fieldUpdate.addValueUpdate(createTensorAddUpdate(buffer, field)); } else { createAdds(buffer, field, fieldUpdate, ignoreUndefinedFields); } break; case UPDATE_MATCH: fieldUpdate.addValueUpdate(createMapUpdate(buffer, field, ignoreUndefinedFields)); break; case UPDATE_MODIFY: fieldUpdate.addValueUpdate(createModifyUpdate(buffer, field)); break; default: String action = buffer.currentName(); fieldUpdate.addValueUpdate(readSingleUpdate(buffer, field.getDataType(), action, ignoreUndefinedFields)); } buffer.next(); } update.addFieldUpdate(fieldUpdate); return true; } private AssignFieldPathUpdate readAssignFieldPathUpdate(DocumentType documentType, String fieldPath, TokenBuffer buffer) { AssignFieldPathUpdate fieldPathUpdate = new AssignFieldPathUpdate(documentType, fieldPath); FieldValue fv = SingleValueReader.readSingleValue(buffer, fieldPathUpdate.getFieldPath().getResultingDataType(), ignoreUndefinedFields); fieldPathUpdate.setNewValue(fv); return fieldPathUpdate; } private AddFieldPathUpdate readAddFieldPathUpdate(DocumentType documentType, String fieldPath, TokenBuffer buffer) { AddFieldPathUpdate fieldPathUpdate = new AddFieldPathUpdate(documentType, fieldPath); FieldValue fv = SingleValueReader.readSingleValue(buffer, fieldPathUpdate.getFieldPath().getResultingDataType(), ignoreUndefinedFields); fieldPathUpdate.setNewValues((Array) fv); return fieldPathUpdate; } private RemoveFieldPathUpdate readRemoveFieldPathUpdate(DocumentType documentType, String fieldPath, TokenBuffer buffer) { expectScalarValue(buffer.currentToken()); return new RemoveFieldPathUpdate(documentType, fieldPath); } private AssignFieldPathUpdate readArithmeticFieldPathUpdate(DocumentType documentType, String fieldPath, TokenBuffer buffer, String fieldPathOperation) { AssignFieldPathUpdate fieldPathUpdate = new AssignFieldPathUpdate(documentType, fieldPath); String arithmeticSign = SingleValueReader.UPDATE_OPERATION_TO_ARITHMETIC_SIGN.get(fieldPathOperation); double value = Double.valueOf(buffer.currentText()); String expression = String.format("$value %s %s", arithmeticSign, value); fieldPathUpdate.setExpression(expression); return fieldPathUpdate; } private static boolean isFieldPath(String field) { return field.matches("^.*?[.\\[\\{].*$"); } private static void verifyEndState(TokenBuffer buffer, JsonToken expectedFinalToken) { Preconditions.checkState(buffer.currentToken() == expectedFinalToken, "Expected end of JSON struct (%s), got %s", expectedFinalToken, buffer.currentToken()); Preconditions.checkState(buffer.nesting() == 0, "Nesting not zero at end of operation"); Preconditions.checkState(buffer.next() == null, "Dangling data at end of operation"); Preconditions.checkState(buffer.size() == 0, "Dangling data at end of operation"); } }
Intentionally commented out?
public void nonExistingFieldsCanBeIgnoredInUpdate() throws IOException{ JsonReader r = createReader(inputJson( "{ ", " 'update': 'id:unittest:smoke::doc1',", " 'fields': {", " 'nonexisting1': { 'assign': 'ignored value' },", " 'field1': { 'assign': 'value1' },", " 'field2': { 'assign': 'value2' }", " }", "}")); DocumentParseInfo parseInfo = r.parseDocument().get(); DocumentType docType = r.readDocumentType(parseInfo.documentId); DocumentUpdate update = new DocumentUpdate(docType, parseInfo.documentId); boolean fullyApplied = new VespaJsonDocumentReader(true).readUpdate(parseInfo.fieldsBuffer, update); assertFalse(fullyApplied); assertNull(update.getFieldUpdate("nonexisting1")); assertEquals("value1", update.getFieldUpdate("field1").getValueUpdates().get(0).getValue().getWrappedValue().toString()); assertNull(update.getFieldUpdate("nonexisting2")); assertEquals("value2", update.getFieldUpdate("field2").getValueUpdates().get(0).getValue().getWrappedValue().toString()); assertNull(update.getFieldUpdate("nonexisting3")); }
public void nonExistingFieldsCanBeIgnoredInUpdate() throws IOException{ JsonReader r = createReader(inputJson( "{ ", " 'update': 'id:unittest:smoke::doc1',", " 'fields': {", " 'nonexisting1': { 'assign': 'ignored value' },", " 'field1': { 'assign': 'value1' },", " 'nonexisting2': { " + " 'assign': {", " 'blocks': {", " 'a':[2.0,3.0],", " 'b':[4.0,5.0]", " }", " }", " },", " 'field2': { 'assign': 'value2' },", " 'nonexisting3': { 'assign': 'ignored value' }", " }", "}")); DocumentParseInfo parseInfo = r.parseDocument().get(); DocumentType docType = r.readDocumentType(parseInfo.documentId); DocumentUpdate update = new DocumentUpdate(docType, parseInfo.documentId); boolean fullyApplied = new VespaJsonDocumentReader(true).readUpdate(parseInfo.fieldsBuffer, update); assertFalse(fullyApplied); assertNull(update.getFieldUpdate("nonexisting1")); assertEquals("value1", update.getFieldUpdate("field1").getValueUpdates().get(0).getValue().getWrappedValue().toString()); assertNull(update.getFieldUpdate("nonexisting2")); assertEquals("value2", update.getFieldUpdate("field2").getValueUpdates().get(0).getValue().getWrappedValue().toString()); assertNull(update.getFieldUpdate("nonexisting3")); }
class JsonReaderTestCase { private DocumentTypeManager types; private JsonFactory parserFactory; @Before public void setUp() throws Exception { parserFactory = new JsonFactory(); types = new DocumentTypeManager(); { DocumentType x = new DocumentType("smoke"); x.addField(new Field("something", DataType.STRING)); x.addField(new Field("nalle", DataType.STRING)); x.addField(new Field("field1", DataType.STRING)); x.addField(new Field("field2", DataType.STRING)); x.addField(new Field("int1", DataType.INT)); x.addField(new Field("flag", DataType.BOOL)); types.registerDocumentType(x); } { DocumentType x = new DocumentType("mirrors"); StructDataType woo = new StructDataType("woo"); woo.addField(new Field("sandra", DataType.STRING)); woo.addField(new Field("cloud", DataType.STRING)); x.addField(new Field("skuggsjaa", woo)); types.registerDocumentType(x); } { DocumentType x = new DocumentType("testarray"); DataType d = new ArrayDataType(DataType.STRING); x.addField(new Field("actualarray", d)); types.registerDocumentType(x); } { DocumentType x = new DocumentType("testset"); DataType d = new WeightedSetDataType(DataType.STRING, true, true); x.addField(new Field("actualset", d)); types.registerDocumentType(x); } { DocumentType x = new DocumentType("testmap"); DataType d = new MapDataType(DataType.STRING, DataType.STRING); x.addField(new Field("actualmap", d)); types.registerDocumentType(x); } { DocumentType x = new DocumentType("testraw"); DataType d = DataType.RAW; x.addField(new Field("actualraw", d)); types.registerDocumentType(x); } { DocumentType x = new DocumentType("testMapStringToArrayOfInt"); DataType value = new ArrayDataType(DataType.INT); DataType d = new MapDataType(DataType.STRING, value); x.addField(new Field("actualMapStringToArrayOfInt", d)); types.registerDocumentType(x); } { DocumentType x = new DocumentType("testsinglepos"); DataType d = PositionDataType.INSTANCE; x.addField(new Field("singlepos", d)); x.addField(new Field("geopos", new GeoPosType(8))); types.registerDocumentType(x); } { DocumentType x = new DocumentType("testtensor"); x.addField(new Field("sparse_single_dimension_tensor", new TensorDataType(new TensorType.Builder().mapped("x").build()))); x.addField(new Field("sparse_tensor", new TensorDataType(new TensorType.Builder().mapped("x").mapped("y").build()))); x.addField(new Field("dense_tensor", new TensorDataType(new TensorType.Builder().indexed("x", 2).indexed("y", 3).build()))); x.addField(new Field("dense_int8_tensor", new TensorDataType(TensorType.fromSpec("tensor<int8>(x[2],y[3])")))); x.addField(new Field("dense_unbound_tensor", new TensorDataType(new TensorType.Builder().indexed("x").indexed("y").build()))); x.addField(new Field("mixed_tensor", new TensorDataType(new TensorType.Builder().mapped("x").indexed("y", 3).build()))); x.addField(new Field("mixed_bfloat16_tensor", new TensorDataType(TensorType.fromSpec("tensor<bfloat16>(x{},y[3])")))); x.addField(new Field("mixed_tensor_adv", new TensorDataType(new TensorType.Builder().mapped("x").mapped("y").mapped("z").indexed("a", 3).build()))); types.registerDocumentType(x); } { DocumentType x = new DocumentType("testpredicate"); x.addField(new Field("boolean", DataType.PREDICATE)); types.registerDocumentType(x); } { DocumentType x = new DocumentType("testint"); x.addField(new Field("integerfield", DataType.INT)); types.registerDocumentType(x); } { DocumentType x = new DocumentType("testnull"); x.addField(new Field("intfield", DataType.INT)); x.addField(new Field("stringfield", DataType.STRING)); x.addField(new Field("arrayfield", new ArrayDataType(DataType.STRING))); x.addField(new Field("weightedsetfield", new WeightedSetDataType(DataType.STRING, true, true))); x.addField(new Field("mapfield", new MapDataType(DataType.STRING, DataType.STRING))); x.addField(new Field("tensorfield", new TensorDataType(new TensorType.Builder().indexed("x").build()))); types.registerDocumentType(x); } } @After public void tearDown() throws Exception { types = null; parserFactory = null; } private JsonReader createReader(String jsonInput) { InputStream input = new ByteArrayInputStream(Utf8.toBytes(jsonInput)); return new JsonReader(types, input, parserFactory); } @Test public void readSingleDocumentPut() { JsonReader r = createReader(inputJson("{ 'put': 'id:unittest:smoke::doc1',", " 'fields': {", " 'something': 'smoketest',", " 'flag': true,", " 'nalle': 'bamse'", " }", "}")); DocumentPut put = (DocumentPut) r.readSingleDocument(DocumentOperationType.PUT, "id:unittest:smoke::doc1").operation(); smokeTestDoc(put.getDocument()); } @Test public final void readSingleDocumentUpdate() { JsonReader r = createReader(inputJson("{ 'update': 'id:unittest:smoke::whee',", " 'fields': {", " 'something': {", " 'assign': 'orOther' }}}")); DocumentUpdate doc = (DocumentUpdate) r.readSingleDocument(DocumentOperationType.UPDATE, "id:unittest:smoke::whee").operation(); FieldUpdate f = doc.getFieldUpdate("something"); assertEquals(1, f.size()); assertTrue(f.getValueUpdate(0) instanceof AssignValueUpdate); } @Test public void readClearField() { JsonReader r = createReader(inputJson("{ 'update': 'id:unittest:smoke::whee',", " 'fields': {", " 'int1': {", " 'assign': null }}}")); DocumentUpdate doc = (DocumentUpdate) r.readSingleDocument(DocumentOperationType.UPDATE, "id:unittest:smoke::whee").operation(); FieldUpdate f = doc.getFieldUpdate("int1"); assertEquals(1, f.size()); assertTrue(f.getValueUpdate(0) instanceof ClearValueUpdate); assertNull(f.getValueUpdate(0).getValue()); } @Test public void smokeTest() throws IOException { JsonReader r = createReader(inputJson("{ 'put': 'id:unittest:smoke::doc1',", " 'fields': {", " 'something': 'smoketest',", " 'flag': true,", " 'nalle': 'bamse'", " }", "}")); DocumentParseInfo parseInfo = r.parseDocument().get(); DocumentType docType = r.readDocumentType(parseInfo.documentId); DocumentPut put = new DocumentPut(new Document(docType, parseInfo.documentId)); new VespaJsonDocumentReader(false).readPut(parseInfo.fieldsBuffer, put); smokeTestDoc(put.getDocument()); } @Test public void docIdLookaheadTest() throws IOException { JsonReader r = createReader(inputJson( "{ 'fields': {", " 'something': 'smoketest',", " 'flag': true,", " 'nalle': 'bamse'", " },", " 'put': 'id:unittest:smoke::doc1'", " }", "}")); DocumentParseInfo parseInfo = r.parseDocument().get(); DocumentType docType = r.readDocumentType(parseInfo.documentId); DocumentPut put = new DocumentPut(new Document(docType, parseInfo.documentId)); new VespaJsonDocumentReader(false).readPut(parseInfo.fieldsBuffer, put); smokeTestDoc(put.getDocument()); }
class JsonReaderTestCase { private DocumentTypeManager types; private JsonFactory parserFactory; @Before public void setUp() throws Exception { parserFactory = new JsonFactory(); types = new DocumentTypeManager(); { DocumentType x = new DocumentType("smoke"); x.addField(new Field("something", DataType.STRING)); x.addField(new Field("nalle", DataType.STRING)); x.addField(new Field("field1", DataType.STRING)); x.addField(new Field("field2", DataType.STRING)); x.addField(new Field("int1", DataType.INT)); x.addField(new Field("flag", DataType.BOOL)); types.registerDocumentType(x); } { DocumentType x = new DocumentType("mirrors"); StructDataType woo = new StructDataType("woo"); woo.addField(new Field("sandra", DataType.STRING)); woo.addField(new Field("cloud", DataType.STRING)); x.addField(new Field("skuggsjaa", woo)); types.registerDocumentType(x); } { DocumentType x = new DocumentType("testarray"); DataType d = new ArrayDataType(DataType.STRING); x.addField(new Field("actualarray", d)); types.registerDocumentType(x); } { DocumentType x = new DocumentType("testset"); DataType d = new WeightedSetDataType(DataType.STRING, true, true); x.addField(new Field("actualset", d)); types.registerDocumentType(x); } { DocumentType x = new DocumentType("testmap"); DataType d = new MapDataType(DataType.STRING, DataType.STRING); x.addField(new Field("actualmap", d)); types.registerDocumentType(x); } { DocumentType x = new DocumentType("testraw"); DataType d = DataType.RAW; x.addField(new Field("actualraw", d)); types.registerDocumentType(x); } { DocumentType x = new DocumentType("testMapStringToArrayOfInt"); DataType value = new ArrayDataType(DataType.INT); DataType d = new MapDataType(DataType.STRING, value); x.addField(new Field("actualMapStringToArrayOfInt", d)); types.registerDocumentType(x); } { DocumentType x = new DocumentType("testsinglepos"); DataType d = PositionDataType.INSTANCE; x.addField(new Field("singlepos", d)); x.addField(new Field("geopos", new GeoPosType(8))); types.registerDocumentType(x); } { DocumentType x = new DocumentType("testtensor"); x.addField(new Field("sparse_single_dimension_tensor", new TensorDataType(new TensorType.Builder().mapped("x").build()))); x.addField(new Field("sparse_tensor", new TensorDataType(new TensorType.Builder().mapped("x").mapped("y").build()))); x.addField(new Field("dense_tensor", new TensorDataType(new TensorType.Builder().indexed("x", 2).indexed("y", 3).build()))); x.addField(new Field("dense_int8_tensor", new TensorDataType(TensorType.fromSpec("tensor<int8>(x[2],y[3])")))); x.addField(new Field("dense_unbound_tensor", new TensorDataType(new TensorType.Builder().indexed("x").indexed("y").build()))); x.addField(new Field("mixed_tensor", new TensorDataType(new TensorType.Builder().mapped("x").indexed("y", 3).build()))); x.addField(new Field("mixed_bfloat16_tensor", new TensorDataType(TensorType.fromSpec("tensor<bfloat16>(x{},y[3])")))); x.addField(new Field("mixed_tensor_adv", new TensorDataType(new TensorType.Builder().mapped("x").mapped("y").mapped("z").indexed("a", 3).build()))); types.registerDocumentType(x); } { DocumentType x = new DocumentType("testpredicate"); x.addField(new Field("boolean", DataType.PREDICATE)); types.registerDocumentType(x); } { DocumentType x = new DocumentType("testint"); x.addField(new Field("integerfield", DataType.INT)); types.registerDocumentType(x); } { DocumentType x = new DocumentType("testnull"); x.addField(new Field("intfield", DataType.INT)); x.addField(new Field("stringfield", DataType.STRING)); x.addField(new Field("arrayfield", new ArrayDataType(DataType.STRING))); x.addField(new Field("weightedsetfield", new WeightedSetDataType(DataType.STRING, true, true))); x.addField(new Field("mapfield", new MapDataType(DataType.STRING, DataType.STRING))); x.addField(new Field("tensorfield", new TensorDataType(new TensorType.Builder().indexed("x").build()))); types.registerDocumentType(x); } } @After public void tearDown() throws Exception { types = null; parserFactory = null; } private JsonReader createReader(String jsonInput) { InputStream input = new ByteArrayInputStream(Utf8.toBytes(jsonInput)); return new JsonReader(types, input, parserFactory); } @Test public void readSingleDocumentPut() { JsonReader r = createReader(inputJson("{ 'put': 'id:unittest:smoke::doc1',", " 'fields': {", " 'something': 'smoketest',", " 'flag': true,", " 'nalle': 'bamse'", " }", "}")); DocumentPut put = (DocumentPut) r.readSingleDocument(DocumentOperationType.PUT, "id:unittest:smoke::doc1").operation(); smokeTestDoc(put.getDocument()); } @Test public final void readSingleDocumentUpdate() { JsonReader r = createReader(inputJson("{ 'update': 'id:unittest:smoke::whee',", " 'fields': {", " 'something': {", " 'assign': 'orOther' }}}")); DocumentUpdate doc = (DocumentUpdate) r.readSingleDocument(DocumentOperationType.UPDATE, "id:unittest:smoke::whee").operation(); FieldUpdate f = doc.getFieldUpdate("something"); assertEquals(1, f.size()); assertTrue(f.getValueUpdate(0) instanceof AssignValueUpdate); } @Test public void readClearField() { JsonReader r = createReader(inputJson("{ 'update': 'id:unittest:smoke::whee',", " 'fields': {", " 'int1': {", " 'assign': null }}}")); DocumentUpdate doc = (DocumentUpdate) r.readSingleDocument(DocumentOperationType.UPDATE, "id:unittest:smoke::whee").operation(); FieldUpdate f = doc.getFieldUpdate("int1"); assertEquals(1, f.size()); assertTrue(f.getValueUpdate(0) instanceof ClearValueUpdate); assertNull(f.getValueUpdate(0).getValue()); } @Test public void smokeTest() throws IOException { JsonReader r = createReader(inputJson("{ 'put': 'id:unittest:smoke::doc1',", " 'fields': {", " 'something': 'smoketest',", " 'flag': true,", " 'nalle': 'bamse'", " }", "}")); DocumentParseInfo parseInfo = r.parseDocument().get(); DocumentType docType = r.readDocumentType(parseInfo.documentId); DocumentPut put = new DocumentPut(new Document(docType, parseInfo.documentId)); new VespaJsonDocumentReader(false).readPut(parseInfo.fieldsBuffer, put); smokeTestDoc(put.getDocument()); } @Test public void docIdLookaheadTest() throws IOException { JsonReader r = createReader(inputJson( "{ 'fields': {", " 'something': 'smoketest',", " 'flag': true,", " 'nalle': 'bamse'", " },", " 'put': 'id:unittest:smoke::doc1'", " }", "}")); DocumentParseInfo parseInfo = r.parseDocument().get(); DocumentType docType = r.readDocumentType(parseInfo.documentId); DocumentPut put = new DocumentPut(new Document(docType, parseInfo.documentId)); new VespaJsonDocumentReader(false).readPut(parseInfo.fieldsBuffer, put); smokeTestDoc(put.getDocument()); }
No, thanks!
public void nonExistingFieldsCanBeIgnoredInUpdate() throws IOException{ JsonReader r = createReader(inputJson( "{ ", " 'update': 'id:unittest:smoke::doc1',", " 'fields': {", " 'nonexisting1': { 'assign': 'ignored value' },", " 'field1': { 'assign': 'value1' },", " 'field2': { 'assign': 'value2' }", " }", "}")); DocumentParseInfo parseInfo = r.parseDocument().get(); DocumentType docType = r.readDocumentType(parseInfo.documentId); DocumentUpdate update = new DocumentUpdate(docType, parseInfo.documentId); boolean fullyApplied = new VespaJsonDocumentReader(true).readUpdate(parseInfo.fieldsBuffer, update); assertFalse(fullyApplied); assertNull(update.getFieldUpdate("nonexisting1")); assertEquals("value1", update.getFieldUpdate("field1").getValueUpdates().get(0).getValue().getWrappedValue().toString()); assertNull(update.getFieldUpdate("nonexisting2")); assertEquals("value2", update.getFieldUpdate("field2").getValueUpdates().get(0).getValue().getWrappedValue().toString()); assertNull(update.getFieldUpdate("nonexisting3")); }
public void nonExistingFieldsCanBeIgnoredInUpdate() throws IOException{ JsonReader r = createReader(inputJson( "{ ", " 'update': 'id:unittest:smoke::doc1',", " 'fields': {", " 'nonexisting1': { 'assign': 'ignored value' },", " 'field1': { 'assign': 'value1' },", " 'nonexisting2': { " + " 'assign': {", " 'blocks': {", " 'a':[2.0,3.0],", " 'b':[4.0,5.0]", " }", " }", " },", " 'field2': { 'assign': 'value2' },", " 'nonexisting3': { 'assign': 'ignored value' }", " }", "}")); DocumentParseInfo parseInfo = r.parseDocument().get(); DocumentType docType = r.readDocumentType(parseInfo.documentId); DocumentUpdate update = new DocumentUpdate(docType, parseInfo.documentId); boolean fullyApplied = new VespaJsonDocumentReader(true).readUpdate(parseInfo.fieldsBuffer, update); assertFalse(fullyApplied); assertNull(update.getFieldUpdate("nonexisting1")); assertEquals("value1", update.getFieldUpdate("field1").getValueUpdates().get(0).getValue().getWrappedValue().toString()); assertNull(update.getFieldUpdate("nonexisting2")); assertEquals("value2", update.getFieldUpdate("field2").getValueUpdates().get(0).getValue().getWrappedValue().toString()); assertNull(update.getFieldUpdate("nonexisting3")); }
class JsonReaderTestCase { private DocumentTypeManager types; private JsonFactory parserFactory; @Before public void setUp() throws Exception { parserFactory = new JsonFactory(); types = new DocumentTypeManager(); { DocumentType x = new DocumentType("smoke"); x.addField(new Field("something", DataType.STRING)); x.addField(new Field("nalle", DataType.STRING)); x.addField(new Field("field1", DataType.STRING)); x.addField(new Field("field2", DataType.STRING)); x.addField(new Field("int1", DataType.INT)); x.addField(new Field("flag", DataType.BOOL)); types.registerDocumentType(x); } { DocumentType x = new DocumentType("mirrors"); StructDataType woo = new StructDataType("woo"); woo.addField(new Field("sandra", DataType.STRING)); woo.addField(new Field("cloud", DataType.STRING)); x.addField(new Field("skuggsjaa", woo)); types.registerDocumentType(x); } { DocumentType x = new DocumentType("testarray"); DataType d = new ArrayDataType(DataType.STRING); x.addField(new Field("actualarray", d)); types.registerDocumentType(x); } { DocumentType x = new DocumentType("testset"); DataType d = new WeightedSetDataType(DataType.STRING, true, true); x.addField(new Field("actualset", d)); types.registerDocumentType(x); } { DocumentType x = new DocumentType("testmap"); DataType d = new MapDataType(DataType.STRING, DataType.STRING); x.addField(new Field("actualmap", d)); types.registerDocumentType(x); } { DocumentType x = new DocumentType("testraw"); DataType d = DataType.RAW; x.addField(new Field("actualraw", d)); types.registerDocumentType(x); } { DocumentType x = new DocumentType("testMapStringToArrayOfInt"); DataType value = new ArrayDataType(DataType.INT); DataType d = new MapDataType(DataType.STRING, value); x.addField(new Field("actualMapStringToArrayOfInt", d)); types.registerDocumentType(x); } { DocumentType x = new DocumentType("testsinglepos"); DataType d = PositionDataType.INSTANCE; x.addField(new Field("singlepos", d)); x.addField(new Field("geopos", new GeoPosType(8))); types.registerDocumentType(x); } { DocumentType x = new DocumentType("testtensor"); x.addField(new Field("sparse_single_dimension_tensor", new TensorDataType(new TensorType.Builder().mapped("x").build()))); x.addField(new Field("sparse_tensor", new TensorDataType(new TensorType.Builder().mapped("x").mapped("y").build()))); x.addField(new Field("dense_tensor", new TensorDataType(new TensorType.Builder().indexed("x", 2).indexed("y", 3).build()))); x.addField(new Field("dense_int8_tensor", new TensorDataType(TensorType.fromSpec("tensor<int8>(x[2],y[3])")))); x.addField(new Field("dense_unbound_tensor", new TensorDataType(new TensorType.Builder().indexed("x").indexed("y").build()))); x.addField(new Field("mixed_tensor", new TensorDataType(new TensorType.Builder().mapped("x").indexed("y", 3).build()))); x.addField(new Field("mixed_bfloat16_tensor", new TensorDataType(TensorType.fromSpec("tensor<bfloat16>(x{},y[3])")))); x.addField(new Field("mixed_tensor_adv", new TensorDataType(new TensorType.Builder().mapped("x").mapped("y").mapped("z").indexed("a", 3).build()))); types.registerDocumentType(x); } { DocumentType x = new DocumentType("testpredicate"); x.addField(new Field("boolean", DataType.PREDICATE)); types.registerDocumentType(x); } { DocumentType x = new DocumentType("testint"); x.addField(new Field("integerfield", DataType.INT)); types.registerDocumentType(x); } { DocumentType x = new DocumentType("testnull"); x.addField(new Field("intfield", DataType.INT)); x.addField(new Field("stringfield", DataType.STRING)); x.addField(new Field("arrayfield", new ArrayDataType(DataType.STRING))); x.addField(new Field("weightedsetfield", new WeightedSetDataType(DataType.STRING, true, true))); x.addField(new Field("mapfield", new MapDataType(DataType.STRING, DataType.STRING))); x.addField(new Field("tensorfield", new TensorDataType(new TensorType.Builder().indexed("x").build()))); types.registerDocumentType(x); } } @After public void tearDown() throws Exception { types = null; parserFactory = null; } private JsonReader createReader(String jsonInput) { InputStream input = new ByteArrayInputStream(Utf8.toBytes(jsonInput)); return new JsonReader(types, input, parserFactory); } @Test public void readSingleDocumentPut() { JsonReader r = createReader(inputJson("{ 'put': 'id:unittest:smoke::doc1',", " 'fields': {", " 'something': 'smoketest',", " 'flag': true,", " 'nalle': 'bamse'", " }", "}")); DocumentPut put = (DocumentPut) r.readSingleDocument(DocumentOperationType.PUT, "id:unittest:smoke::doc1").operation(); smokeTestDoc(put.getDocument()); } @Test public final void readSingleDocumentUpdate() { JsonReader r = createReader(inputJson("{ 'update': 'id:unittest:smoke::whee',", " 'fields': {", " 'something': {", " 'assign': 'orOther' }}}")); DocumentUpdate doc = (DocumentUpdate) r.readSingleDocument(DocumentOperationType.UPDATE, "id:unittest:smoke::whee").operation(); FieldUpdate f = doc.getFieldUpdate("something"); assertEquals(1, f.size()); assertTrue(f.getValueUpdate(0) instanceof AssignValueUpdate); } @Test public void readClearField() { JsonReader r = createReader(inputJson("{ 'update': 'id:unittest:smoke::whee',", " 'fields': {", " 'int1': {", " 'assign': null }}}")); DocumentUpdate doc = (DocumentUpdate) r.readSingleDocument(DocumentOperationType.UPDATE, "id:unittest:smoke::whee").operation(); FieldUpdate f = doc.getFieldUpdate("int1"); assertEquals(1, f.size()); assertTrue(f.getValueUpdate(0) instanceof ClearValueUpdate); assertNull(f.getValueUpdate(0).getValue()); } @Test public void smokeTest() throws IOException { JsonReader r = createReader(inputJson("{ 'put': 'id:unittest:smoke::doc1',", " 'fields': {", " 'something': 'smoketest',", " 'flag': true,", " 'nalle': 'bamse'", " }", "}")); DocumentParseInfo parseInfo = r.parseDocument().get(); DocumentType docType = r.readDocumentType(parseInfo.documentId); DocumentPut put = new DocumentPut(new Document(docType, parseInfo.documentId)); new VespaJsonDocumentReader(false).readPut(parseInfo.fieldsBuffer, put); smokeTestDoc(put.getDocument()); } @Test public void docIdLookaheadTest() throws IOException { JsonReader r = createReader(inputJson( "{ 'fields': {", " 'something': 'smoketest',", " 'flag': true,", " 'nalle': 'bamse'", " },", " 'put': 'id:unittest:smoke::doc1'", " }", "}")); DocumentParseInfo parseInfo = r.parseDocument().get(); DocumentType docType = r.readDocumentType(parseInfo.documentId); DocumentPut put = new DocumentPut(new Document(docType, parseInfo.documentId)); new VespaJsonDocumentReader(false).readPut(parseInfo.fieldsBuffer, put); smokeTestDoc(put.getDocument()); }
class JsonReaderTestCase { private DocumentTypeManager types; private JsonFactory parserFactory; @Before public void setUp() throws Exception { parserFactory = new JsonFactory(); types = new DocumentTypeManager(); { DocumentType x = new DocumentType("smoke"); x.addField(new Field("something", DataType.STRING)); x.addField(new Field("nalle", DataType.STRING)); x.addField(new Field("field1", DataType.STRING)); x.addField(new Field("field2", DataType.STRING)); x.addField(new Field("int1", DataType.INT)); x.addField(new Field("flag", DataType.BOOL)); types.registerDocumentType(x); } { DocumentType x = new DocumentType("mirrors"); StructDataType woo = new StructDataType("woo"); woo.addField(new Field("sandra", DataType.STRING)); woo.addField(new Field("cloud", DataType.STRING)); x.addField(new Field("skuggsjaa", woo)); types.registerDocumentType(x); } { DocumentType x = new DocumentType("testarray"); DataType d = new ArrayDataType(DataType.STRING); x.addField(new Field("actualarray", d)); types.registerDocumentType(x); } { DocumentType x = new DocumentType("testset"); DataType d = new WeightedSetDataType(DataType.STRING, true, true); x.addField(new Field("actualset", d)); types.registerDocumentType(x); } { DocumentType x = new DocumentType("testmap"); DataType d = new MapDataType(DataType.STRING, DataType.STRING); x.addField(new Field("actualmap", d)); types.registerDocumentType(x); } { DocumentType x = new DocumentType("testraw"); DataType d = DataType.RAW; x.addField(new Field("actualraw", d)); types.registerDocumentType(x); } { DocumentType x = new DocumentType("testMapStringToArrayOfInt"); DataType value = new ArrayDataType(DataType.INT); DataType d = new MapDataType(DataType.STRING, value); x.addField(new Field("actualMapStringToArrayOfInt", d)); types.registerDocumentType(x); } { DocumentType x = new DocumentType("testsinglepos"); DataType d = PositionDataType.INSTANCE; x.addField(new Field("singlepos", d)); x.addField(new Field("geopos", new GeoPosType(8))); types.registerDocumentType(x); } { DocumentType x = new DocumentType("testtensor"); x.addField(new Field("sparse_single_dimension_tensor", new TensorDataType(new TensorType.Builder().mapped("x").build()))); x.addField(new Field("sparse_tensor", new TensorDataType(new TensorType.Builder().mapped("x").mapped("y").build()))); x.addField(new Field("dense_tensor", new TensorDataType(new TensorType.Builder().indexed("x", 2).indexed("y", 3).build()))); x.addField(new Field("dense_int8_tensor", new TensorDataType(TensorType.fromSpec("tensor<int8>(x[2],y[3])")))); x.addField(new Field("dense_unbound_tensor", new TensorDataType(new TensorType.Builder().indexed("x").indexed("y").build()))); x.addField(new Field("mixed_tensor", new TensorDataType(new TensorType.Builder().mapped("x").indexed("y", 3).build()))); x.addField(new Field("mixed_bfloat16_tensor", new TensorDataType(TensorType.fromSpec("tensor<bfloat16>(x{},y[3])")))); x.addField(new Field("mixed_tensor_adv", new TensorDataType(new TensorType.Builder().mapped("x").mapped("y").mapped("z").indexed("a", 3).build()))); types.registerDocumentType(x); } { DocumentType x = new DocumentType("testpredicate"); x.addField(new Field("boolean", DataType.PREDICATE)); types.registerDocumentType(x); } { DocumentType x = new DocumentType("testint"); x.addField(new Field("integerfield", DataType.INT)); types.registerDocumentType(x); } { DocumentType x = new DocumentType("testnull"); x.addField(new Field("intfield", DataType.INT)); x.addField(new Field("stringfield", DataType.STRING)); x.addField(new Field("arrayfield", new ArrayDataType(DataType.STRING))); x.addField(new Field("weightedsetfield", new WeightedSetDataType(DataType.STRING, true, true))); x.addField(new Field("mapfield", new MapDataType(DataType.STRING, DataType.STRING))); x.addField(new Field("tensorfield", new TensorDataType(new TensorType.Builder().indexed("x").build()))); types.registerDocumentType(x); } } @After public void tearDown() throws Exception { types = null; parserFactory = null; } private JsonReader createReader(String jsonInput) { InputStream input = new ByteArrayInputStream(Utf8.toBytes(jsonInput)); return new JsonReader(types, input, parserFactory); } @Test public void readSingleDocumentPut() { JsonReader r = createReader(inputJson("{ 'put': 'id:unittest:smoke::doc1',", " 'fields': {", " 'something': 'smoketest',", " 'flag': true,", " 'nalle': 'bamse'", " }", "}")); DocumentPut put = (DocumentPut) r.readSingleDocument(DocumentOperationType.PUT, "id:unittest:smoke::doc1").operation(); smokeTestDoc(put.getDocument()); } @Test public final void readSingleDocumentUpdate() { JsonReader r = createReader(inputJson("{ 'update': 'id:unittest:smoke::whee',", " 'fields': {", " 'something': {", " 'assign': 'orOther' }}}")); DocumentUpdate doc = (DocumentUpdate) r.readSingleDocument(DocumentOperationType.UPDATE, "id:unittest:smoke::whee").operation(); FieldUpdate f = doc.getFieldUpdate("something"); assertEquals(1, f.size()); assertTrue(f.getValueUpdate(0) instanceof AssignValueUpdate); } @Test public void readClearField() { JsonReader r = createReader(inputJson("{ 'update': 'id:unittest:smoke::whee',", " 'fields': {", " 'int1': {", " 'assign': null }}}")); DocumentUpdate doc = (DocumentUpdate) r.readSingleDocument(DocumentOperationType.UPDATE, "id:unittest:smoke::whee").operation(); FieldUpdate f = doc.getFieldUpdate("int1"); assertEquals(1, f.size()); assertTrue(f.getValueUpdate(0) instanceof ClearValueUpdate); assertNull(f.getValueUpdate(0).getValue()); } @Test public void smokeTest() throws IOException { JsonReader r = createReader(inputJson("{ 'put': 'id:unittest:smoke::doc1',", " 'fields': {", " 'something': 'smoketest',", " 'flag': true,", " 'nalle': 'bamse'", " }", "}")); DocumentParseInfo parseInfo = r.parseDocument().get(); DocumentType docType = r.readDocumentType(parseInfo.documentId); DocumentPut put = new DocumentPut(new Document(docType, parseInfo.documentId)); new VespaJsonDocumentReader(false).readPut(parseInfo.fieldsBuffer, put); smokeTestDoc(put.getDocument()); } @Test public void docIdLookaheadTest() throws IOException { JsonReader r = createReader(inputJson( "{ 'fields': {", " 'something': 'smoketest',", " 'flag': true,", " 'nalle': 'bamse'", " },", " 'put': 'id:unittest:smoke::doc1'", " }", "}")); DocumentParseInfo parseInfo = r.parseDocument().get(); DocumentType docType = r.readDocumentType(parseInfo.documentId); DocumentPut put = new DocumentPut(new Document(docType, parseInfo.documentId)); new VespaJsonDocumentReader(false).readPut(parseInfo.fieldsBuffer, put); smokeTestDoc(put.getDocument()); }
Yes.
private boolean addFieldPathUpdates(DocumentUpdate update, TokenBuffer buffer, String fieldPath) { int localNesting = buffer.nesting(); buffer.next(); while (localNesting <= buffer.nesting()) { String fieldPathOperation = buffer.currentName().toLowerCase(); FieldPathUpdate fieldPathUpdate; if (fieldPathOperation.equals(UPDATE_ASSIGN)) { fieldPathUpdate = readAssignFieldPathUpdate(update.getType(), fieldPath, buffer); } else if (fieldPathOperation.equals(UPDATE_ADD)) { fieldPathUpdate = readAddFieldPathUpdate(update.getType(), fieldPath, buffer); } else if (fieldPathOperation.equals(UPDATE_REMOVE)) { fieldPathUpdate = readRemoveFieldPathUpdate(update.getType(), fieldPath, buffer); } else if (SingleValueReader.UPDATE_OPERATION_TO_ARITHMETIC_SIGN.containsKey(fieldPathOperation)) { fieldPathUpdate = readArithmeticFieldPathUpdate(update.getType(), fieldPath, buffer, fieldPathOperation); } else { throw new IllegalArgumentException("Field path update type '" + fieldPathOperation + "' not supported."); } update.addFieldPathUpdate(fieldPathUpdate); buffer.next(); } return true; }
return true;
private boolean addFieldPathUpdates(DocumentUpdate update, TokenBuffer buffer, String fieldPath) { int localNesting = buffer.nesting(); buffer.next(); while (localNesting <= buffer.nesting()) { String fieldPathOperation = buffer.currentName().toLowerCase(); FieldPathUpdate fieldPathUpdate; if (fieldPathOperation.equals(UPDATE_ASSIGN)) { fieldPathUpdate = readAssignFieldPathUpdate(update.getType(), fieldPath, buffer); } else if (fieldPathOperation.equals(UPDATE_ADD)) { fieldPathUpdate = readAddFieldPathUpdate(update.getType(), fieldPath, buffer); } else if (fieldPathOperation.equals(UPDATE_REMOVE)) { fieldPathUpdate = readRemoveFieldPathUpdate(update.getType(), fieldPath, buffer); } else if (SingleValueReader.UPDATE_OPERATION_TO_ARITHMETIC_SIGN.containsKey(fieldPathOperation)) { fieldPathUpdate = readArithmeticFieldPathUpdate(update.getType(), fieldPath, buffer, fieldPathOperation); } else { throw new IllegalArgumentException("Field path update type '" + fieldPathOperation + "' not supported."); } update.addFieldPathUpdate(fieldPathUpdate); buffer.next(); } return true; }
class VespaJsonDocumentReader { private static final String UPDATE_REMOVE = "remove"; private static final String UPDATE_ADD = "add"; private final boolean ignoreUndefinedFields; public VespaJsonDocumentReader(boolean ignoreUndefinedFields) { this.ignoreUndefinedFields = ignoreUndefinedFields; } public ParsedDocumentOperation createDocumentOperation(DocumentType documentType, DocumentParseInfo documentParseInfo) { final DocumentOperation documentOperation; boolean fullyApplied = true; try { switch (documentParseInfo.operationType) { case PUT -> { documentOperation = new DocumentPut(new Document(documentType, documentParseInfo.documentId)); fullyApplied = readPut(documentParseInfo.fieldsBuffer, (DocumentPut) documentOperation); verifyEndState(documentParseInfo.fieldsBuffer, JsonToken.END_OBJECT); } case REMOVE -> documentOperation = new DocumentRemove(documentParseInfo.documentId); case UPDATE -> { documentOperation = new DocumentUpdate(documentType, documentParseInfo.documentId); fullyApplied = readUpdate(documentParseInfo.fieldsBuffer, (DocumentUpdate) documentOperation); verifyEndState(documentParseInfo.fieldsBuffer, JsonToken.END_OBJECT); } default -> throw new IllegalStateException("Implementation out of sync with itself. This is a bug."); } } catch (JsonReaderException e) { throw JsonReaderException.addDocId(e, documentParseInfo.documentId); } if (documentParseInfo.create.isPresent()) { if (! (documentOperation instanceof DocumentUpdate update)) { throw new IllegalArgumentException("Could not set create flag on non update operation."); } update.setCreateIfNonExistent(documentParseInfo.create.get()); } return new ParsedDocumentOperation(documentOperation, fullyApplied); } public boolean readPut(TokenBuffer buffer, DocumentPut put) { try { if (buffer.isEmpty()) throw new IllegalArgumentException(put + " is missing a 'fields' map"); return populateComposite(buffer, put.getDocument(), ignoreUndefinedFields); } catch (JsonReaderException e) { throw JsonReaderException.addDocId(e, put.getId()); } } public boolean readUpdate(TokenBuffer buffer, DocumentUpdate update) { if (buffer.isEmpty()) throw new IllegalArgumentException("Update of document " + update.getId() + " is missing a 'fields' map"); expectObjectStart(buffer.currentToken()); int localNesting = buffer.nesting(); buffer.next(); boolean fullyApplied = true; while (localNesting <= buffer.nesting()) { expectObjectStart(buffer.currentToken()); String fieldName = buffer.currentName(); try { if (isFieldPath(fieldName)) { fullyApplied &= addFieldPathUpdates(update, buffer, fieldName); } else { fullyApplied &= addFieldUpdates(update, buffer, fieldName); } expectObjectEnd(buffer.currentToken()); } catch (IllegalArgumentException | IndexOutOfBoundsException e) { throw new IllegalArgumentException("Error in '" + fieldName + "'", e); } buffer.next(); } return fullyApplied; } private boolean addFieldUpdates(DocumentUpdate update, TokenBuffer buffer, String fieldName) { Field field = update.getType().getField(fieldName); if (field == null) { if (! ignoreUndefinedFields) throw new IllegalArgumentException("No field named '" + fieldName + "' in " + update.getType()); buffer.skipToRelativeNesting(-1); return false; } int localNesting = buffer.nesting(); FieldUpdate fieldUpdate = FieldUpdate.create(field); buffer.next(); while (localNesting <= buffer.nesting()) { switch (buffer.currentName()) { case UPDATE_REMOVE: if (isTensorField(field)) { fieldUpdate.addValueUpdate(createTensorRemoveUpdate(buffer, field)); } else { createRemoves(buffer, field, fieldUpdate, ignoreUndefinedFields); } break; case UPDATE_ADD: if (isTensorField(field)) { fieldUpdate.addValueUpdate(createTensorAddUpdate(buffer, field)); } else { createAdds(buffer, field, fieldUpdate, ignoreUndefinedFields); } break; case UPDATE_MATCH: fieldUpdate.addValueUpdate(createMapUpdate(buffer, field, ignoreUndefinedFields)); break; case UPDATE_MODIFY: fieldUpdate.addValueUpdate(createModifyUpdate(buffer, field)); break; default: String action = buffer.currentName(); fieldUpdate.addValueUpdate(readSingleUpdate(buffer, field.getDataType(), action, ignoreUndefinedFields)); } buffer.next(); } update.addFieldUpdate(fieldUpdate); return true; } private AssignFieldPathUpdate readAssignFieldPathUpdate(DocumentType documentType, String fieldPath, TokenBuffer buffer) { AssignFieldPathUpdate fieldPathUpdate = new AssignFieldPathUpdate(documentType, fieldPath); FieldValue fv = SingleValueReader.readSingleValue(buffer, fieldPathUpdate.getFieldPath().getResultingDataType(), ignoreUndefinedFields); fieldPathUpdate.setNewValue(fv); return fieldPathUpdate; } private AddFieldPathUpdate readAddFieldPathUpdate(DocumentType documentType, String fieldPath, TokenBuffer buffer) { AddFieldPathUpdate fieldPathUpdate = new AddFieldPathUpdate(documentType, fieldPath); FieldValue fv = SingleValueReader.readSingleValue(buffer, fieldPathUpdate.getFieldPath().getResultingDataType(), ignoreUndefinedFields); fieldPathUpdate.setNewValues((Array) fv); return fieldPathUpdate; } private RemoveFieldPathUpdate readRemoveFieldPathUpdate(DocumentType documentType, String fieldPath, TokenBuffer buffer) { expectScalarValue(buffer.currentToken()); return new RemoveFieldPathUpdate(documentType, fieldPath); } private AssignFieldPathUpdate readArithmeticFieldPathUpdate(DocumentType documentType, String fieldPath, TokenBuffer buffer, String fieldPathOperation) { AssignFieldPathUpdate fieldPathUpdate = new AssignFieldPathUpdate(documentType, fieldPath); String arithmeticSign = SingleValueReader.UPDATE_OPERATION_TO_ARITHMETIC_SIGN.get(fieldPathOperation); double value = Double.valueOf(buffer.currentText()); String expression = String.format("$value %s %s", arithmeticSign, value); fieldPathUpdate.setExpression(expression); return fieldPathUpdate; } private static boolean isFieldPath(String field) { return field.matches("^.*?[.\\[\\{].*$"); } private static void verifyEndState(TokenBuffer buffer, JsonToken expectedFinalToken) { Preconditions.checkState(buffer.currentToken() == expectedFinalToken, "Expected end of JSON struct (%s), got %s", expectedFinalToken, buffer.currentToken()); Preconditions.checkState(buffer.nesting() == 0, "Nesting not zero at end of operation"); Preconditions.checkState(buffer.next() == null, "Dangling data at end of operation"); Preconditions.checkState(buffer.size() == 0, "Dangling data at end of operation"); } }
class VespaJsonDocumentReader { private static final String UPDATE_REMOVE = "remove"; private static final String UPDATE_ADD = "add"; private final boolean ignoreUndefinedFields; public VespaJsonDocumentReader(boolean ignoreUndefinedFields) { this.ignoreUndefinedFields = ignoreUndefinedFields; } public ParsedDocumentOperation createDocumentOperation(DocumentType documentType, DocumentParseInfo documentParseInfo) { final DocumentOperation documentOperation; boolean fullyApplied = true; try { switch (documentParseInfo.operationType) { case PUT -> { documentOperation = new DocumentPut(new Document(documentType, documentParseInfo.documentId)); fullyApplied = readPut(documentParseInfo.fieldsBuffer, (DocumentPut) documentOperation); verifyEndState(documentParseInfo.fieldsBuffer, JsonToken.END_OBJECT); } case REMOVE -> documentOperation = new DocumentRemove(documentParseInfo.documentId); case UPDATE -> { documentOperation = new DocumentUpdate(documentType, documentParseInfo.documentId); fullyApplied = readUpdate(documentParseInfo.fieldsBuffer, (DocumentUpdate) documentOperation); verifyEndState(documentParseInfo.fieldsBuffer, JsonToken.END_OBJECT); } default -> throw new IllegalStateException("Implementation out of sync with itself. This is a bug."); } } catch (JsonReaderException e) { throw JsonReaderException.addDocId(e, documentParseInfo.documentId); } if (documentParseInfo.create.isPresent()) { if (! (documentOperation instanceof DocumentUpdate update)) { throw new IllegalArgumentException("Could not set create flag on non update operation."); } update.setCreateIfNonExistent(documentParseInfo.create.get()); } return new ParsedDocumentOperation(documentOperation, fullyApplied); } public boolean readPut(TokenBuffer buffer, DocumentPut put) { try { if (buffer.isEmpty()) throw new IllegalArgumentException(put + " is missing a 'fields' map"); return populateComposite(buffer, put.getDocument(), ignoreUndefinedFields); } catch (JsonReaderException e) { throw JsonReaderException.addDocId(e, put.getId()); } } public boolean readUpdate(TokenBuffer buffer, DocumentUpdate update) { if (buffer.isEmpty()) throw new IllegalArgumentException("Update of document " + update.getId() + " is missing a 'fields' map"); expectObjectStart(buffer.currentToken()); int localNesting = buffer.nesting(); buffer.next(); boolean fullyApplied = true; while (localNesting <= buffer.nesting()) { expectObjectStart(buffer.currentToken()); String fieldName = buffer.currentName(); try { if (isFieldPath(fieldName)) { fullyApplied &= addFieldPathUpdates(update, buffer, fieldName); } else { fullyApplied &= addFieldUpdates(update, buffer, fieldName); } expectObjectEnd(buffer.currentToken()); } catch (IllegalArgumentException | IndexOutOfBoundsException e) { throw new IllegalArgumentException("Error in '" + fieldName + "'", e); } buffer.next(); } return fullyApplied; } private boolean addFieldUpdates(DocumentUpdate update, TokenBuffer buffer, String fieldName) { Field field = update.getType().getField(fieldName); if (field == null) { if (! ignoreUndefinedFields) throw new IllegalArgumentException("No field named '" + fieldName + "' in " + update.getType()); buffer.skipToRelativeNesting(-1); return false; } int localNesting = buffer.nesting(); FieldUpdate fieldUpdate = FieldUpdate.create(field); buffer.next(); while (localNesting <= buffer.nesting()) { switch (buffer.currentName()) { case UPDATE_REMOVE: if (isTensorField(field)) { fieldUpdate.addValueUpdate(createTensorRemoveUpdate(buffer, field)); } else { createRemoves(buffer, field, fieldUpdate, ignoreUndefinedFields); } break; case UPDATE_ADD: if (isTensorField(field)) { fieldUpdate.addValueUpdate(createTensorAddUpdate(buffer, field)); } else { createAdds(buffer, field, fieldUpdate, ignoreUndefinedFields); } break; case UPDATE_MATCH: fieldUpdate.addValueUpdate(createMapUpdate(buffer, field, ignoreUndefinedFields)); break; case UPDATE_MODIFY: fieldUpdate.addValueUpdate(createModifyUpdate(buffer, field)); break; default: String action = buffer.currentName(); fieldUpdate.addValueUpdate(readSingleUpdate(buffer, field.getDataType(), action, ignoreUndefinedFields)); } buffer.next(); } update.addFieldUpdate(fieldUpdate); return true; } private AssignFieldPathUpdate readAssignFieldPathUpdate(DocumentType documentType, String fieldPath, TokenBuffer buffer) { AssignFieldPathUpdate fieldPathUpdate = new AssignFieldPathUpdate(documentType, fieldPath); FieldValue fv = SingleValueReader.readSingleValue(buffer, fieldPathUpdate.getFieldPath().getResultingDataType(), ignoreUndefinedFields); fieldPathUpdate.setNewValue(fv); return fieldPathUpdate; } private AddFieldPathUpdate readAddFieldPathUpdate(DocumentType documentType, String fieldPath, TokenBuffer buffer) { AddFieldPathUpdate fieldPathUpdate = new AddFieldPathUpdate(documentType, fieldPath); FieldValue fv = SingleValueReader.readSingleValue(buffer, fieldPathUpdate.getFieldPath().getResultingDataType(), ignoreUndefinedFields); fieldPathUpdate.setNewValues((Array) fv); return fieldPathUpdate; } private RemoveFieldPathUpdate readRemoveFieldPathUpdate(DocumentType documentType, String fieldPath, TokenBuffer buffer) { expectScalarValue(buffer.currentToken()); return new RemoveFieldPathUpdate(documentType, fieldPath); } private AssignFieldPathUpdate readArithmeticFieldPathUpdate(DocumentType documentType, String fieldPath, TokenBuffer buffer, String fieldPathOperation) { AssignFieldPathUpdate fieldPathUpdate = new AssignFieldPathUpdate(documentType, fieldPath); String arithmeticSign = SingleValueReader.UPDATE_OPERATION_TO_ARITHMETIC_SIGN.get(fieldPathOperation); double value = Double.valueOf(buffer.currentText()); String expression = String.format("$value %s %s", arithmeticSign, value); fieldPathUpdate.setExpression(expression); return fieldPathUpdate; } private static boolean isFieldPath(String field) { return field.matches("^.*?[.\\[\\{].*$"); } private static void verifyEndState(TokenBuffer buffer, JsonToken expectedFinalToken) { Preconditions.checkState(buffer.currentToken() == expectedFinalToken, "Expected end of JSON struct (%s), got %s", expectedFinalToken, buffer.currentToken()); Preconditions.checkState(buffer.nesting() == 0, "Nesting not zero at end of operation"); Preconditions.checkState(buffer.next() == null, "Dangling data at end of operation"); Preconditions.checkState(buffer.size() == 0, "Dangling data at end of operation"); } }
I think this one got misplaced, should be on "all". And maybe deprecate and add note about removing in Vespa 9?
private static List<Tuple> collapseMetrics(MetricSnapshot snapshot, String consumer) { return switch (consumer) { case HEALTH_PATH -> collapseHealthMetrics(snapshot); case "all", METRICS_PATH -> flattenAllMetrics(snapshot); default -> throw new IllegalArgumentException("Unknown consumer '" + consumer + "'."); }; }
case HEALTH_PATH -> collapseHealthMetrics(snapshot);
private static List<Tuple> collapseMetrics(MetricSnapshot snapshot, String consumer) { return switch (consumer) { case HEALTH_PATH -> collapseHealthMetrics(snapshot); case "all", METRICS_PATH -> flattenAllMetrics(snapshot); default -> throw new IllegalArgumentException("Unknown consumer '" + consumer + "'."); }; }
class StateHandler extends AbstractRequestHandler { private static final ObjectMapper jsonMapper = new ObjectMapper(); public static final String STATE_API_ROOT = "/state/v1"; private static final String METRICS_PATH = "metrics"; private static final String HISTOGRAMS_PATH = "metrics/histograms"; private static final String CONFIG_GENERATION_PATH = "config"; private static final String HEALTH_PATH = "health"; private static final String VERSION_PATH = "version"; private final static MetricDimensions NULL_DIMENSIONS = StateMetricContext.newInstance(null); private final StateMonitor monitor; private final Timer timer; private final byte[] config; private final SnapshotProvider snapshotProvider; @Inject public StateHandler(StateMonitor monitor, Timer timer, ApplicationMetadataConfig config, ComponentRegistry<SnapshotProvider> snapshotProviders) { this.monitor = monitor; this.timer = timer; this.config = buildConfigOutput(config); snapshotProvider = getSnapshotProviderOrThrow(snapshotProviders); } static SnapshotProvider getSnapshotProviderOrThrow(ComponentRegistry<SnapshotProvider> preprocessors) { List<SnapshotProvider> allPreprocessors = preprocessors.allComponents(); if (allPreprocessors.size() > 0) { return allPreprocessors.get(0); } else { throw new IllegalArgumentException("At least one snapshot provider is required."); } } @Override public ContentChannel handleRequest(Request request, ResponseHandler handler) { new ResponseDispatch() { @Override protected Response newResponse() { Response response = new Response(Response.Status.OK); response.headers().add(HttpHeaders.Names.CONTENT_TYPE, resolveContentType(request.getUri())); return response; } @Override protected Iterable<ByteBuffer> responseContent() { return Collections.singleton(buildContent(request.getUri())); } }.dispatch(handler); return null; } private String resolveContentType(URI requestUri) { if (resolvePath(requestUri).equals(HISTOGRAMS_PATH)) { return "text/plain; charset=utf-8"; } else { return "application/json"; } } private ByteBuffer buildContent(URI requestUri) { String suffix = resolvePath(requestUri); switch (suffix) { case "": return ByteBuffer.wrap(apiLinks(requestUri)); case CONFIG_GENERATION_PATH: return ByteBuffer.wrap(config); case HISTOGRAMS_PATH: return ByteBuffer.wrap(buildHistogramsOutput()); case HEALTH_PATH: case METRICS_PATH: return ByteBuffer.wrap(buildMetricOutput(suffix)); case VERSION_PATH: return ByteBuffer.wrap(buildVersionOutput()); default: return ByteBuffer.wrap(buildMetricOutput(suffix)); } } private byte[] apiLinks(URI requestUri) { try { int port = requestUri.getPort(); String host = requestUri.getHost(); StringBuilder base = new StringBuilder("http: base.append(host); if (port != -1) { base.append(":").append(port); } base.append(STATE_API_ROOT); String uriBase = base.toString(); ArrayNode linkList = jsonMapper.createArrayNode(); for (String api : new String[] {METRICS_PATH, CONFIG_GENERATION_PATH, HEALTH_PATH, VERSION_PATH}) { ObjectNode resource = jsonMapper.createObjectNode(); resource.put("url", uriBase + "/" + api); linkList.add(resource); } JsonNode resources = jsonMapper.createObjectNode().set("resources", linkList); return toPrettyString(resources); } catch (JsonProcessingException e) { throw new RuntimeException("Bad JSON construction", e); } } private static String resolvePath(URI uri) { String path = uri.getPath(); if (path.endsWith("/")) { path = path.substring(0, path.length() - 1); } if (path.startsWith(STATE_API_ROOT)) { path = path.substring(STATE_API_ROOT.length()); } if (path.startsWith("/")) { path = path.substring(1); } return path; } private static byte[] buildConfigOutput(ApplicationMetadataConfig config) { try { return toPrettyString( jsonMapper.createObjectNode() .set(CONFIG_GENERATION_PATH, jsonMapper.createObjectNode() .put("generation", config.generation()) .set("container", jsonMapper.createObjectNode() .put("generation", config.generation())))); } catch (JsonProcessingException e) { throw new RuntimeException("Bad JSON construction", e); } } private static byte[] buildVersionOutput() { try { return toPrettyString( jsonMapper.createObjectNode() .put("version", Vtag.currentVersion.toString())); } catch (JsonProcessingException e) { throw new RuntimeException("Bad JSON construction", e); } } private byte[] buildMetricOutput(String consumer) { try { return toPrettyString(buildJsonForConsumer(consumer)); } catch (JsonProcessingException e) { throw new RuntimeException("Bad JSON construction", e); } } private byte[] buildHistogramsOutput() { ByteArrayOutputStream baos = new ByteArrayOutputStream(); if (snapshotProvider != null) { snapshotProvider.histogram(new PrintStream(baos)); } return baos.toByteArray(); } private ObjectNode buildJsonForConsumer(String consumer) { ObjectNode ret = jsonMapper.createObjectNode(); ret.put("time", timer.currentTimeMillis()); ret.set("status", jsonMapper.createObjectNode().put("code", getStatus().name())); ret.set(METRICS_PATH, buildJsonForSnapshot(consumer, getSnapshot())); return ret; } private MetricSnapshot getSnapshot() { return snapshotProvider.latestSnapshot(); } private StateMonitor.Status getStatus() { return monitor.status(); } private ObjectNode buildJsonForSnapshot(String consumer, MetricSnapshot metricSnapshot) { if (metricSnapshot == null) { return jsonMapper.createObjectNode(); } ObjectNode jsonMetric = jsonMapper.createObjectNode(); jsonMetric.set("snapshot", jsonMapper.createObjectNode() .put("from", sanitizeDouble(metricSnapshot.getFromTime(TimeUnit.MILLISECONDS) / 1000.0)) .put("to", sanitizeDouble(metricSnapshot.getToTime(TimeUnit.MILLISECONDS) / 1000.0))); boolean includeDimensions = !consumer.equals(HEALTH_PATH); long periodInMillis = metricSnapshot.getToTime(TimeUnit.MILLISECONDS) - metricSnapshot.getFromTime(TimeUnit.MILLISECONDS); for (Tuple tuple : collapseMetrics(metricSnapshot, consumer)) { ObjectNode jsonTuple = jsonMapper.createObjectNode(); jsonTuple.put("name", tuple.key); if (tuple.val instanceof CountMetric) { CountMetric count = (CountMetric)tuple.val; jsonTuple.set("values", jsonMapper.createObjectNode() .put("count", count.getCount()) .put("rate", sanitizeDouble(count.getCount() * 1000.0) / periodInMillis)); } else if (tuple.val instanceof GaugeMetric) { GaugeMetric gauge = (GaugeMetric) tuple.val; ObjectNode valueFields = jsonMapper.createObjectNode(); valueFields.put("average", sanitizeDouble(gauge.getAverage())) .put("sum", sanitizeDouble(gauge.getSum())) .put("count", gauge.getCount()) .put("last", sanitizeDouble(gauge.getLast())) .put("max", sanitizeDouble(gauge.getMax())) .put("min", sanitizeDouble(gauge.getMin())) .put("rate", sanitizeDouble((gauge.getCount() * 1000.0) / periodInMillis)); if (gauge.getPercentiles().isPresent()) { for (Tuple2<String, Double> prefixAndValue : gauge.getPercentiles().get()) { valueFields.put(prefixAndValue.first + "percentile", sanitizeDouble(prefixAndValue.second)); } } jsonTuple.set("values", valueFields); } else { throw new UnsupportedOperationException(tuple.val.getClass().getName()); } if (tuple.dim != null) { Iterator<Map.Entry<String, String>> it = tuple.dim.iterator(); if (it.hasNext() && includeDimensions) { ObjectNode jsonDim = jsonMapper.createObjectNode(); while (it.hasNext()) { Map.Entry<String, String> entry = it.next(); jsonDim.put(entry.getKey(), entry.getValue()); } jsonTuple.set("dimensions", jsonDim); } } ArrayNode values = (ArrayNode) jsonMetric.get("values"); if (values == null) { values = jsonMapper.createArrayNode(); jsonMetric.set("values", values); } values.add(jsonTuple); } return jsonMetric; } private static List<Tuple> collapseHealthMetrics(MetricSnapshot snapshot) { Tuple requestsPerSecond = new Tuple(NULL_DIMENSIONS, "requestsPerSecond", null); Tuple latencySeconds = new Tuple(NULL_DIMENSIONS, "latencySeconds", null); for (Map.Entry<MetricDimensions, MetricSet> entry : snapshot) { MetricSet metricSet = entry.getValue(); MetricValue val = metricSet.get("serverTotalSuccessfulResponseLatency"); if (val instanceof GaugeMetric) { GaugeMetric gauge = (GaugeMetric)val; latencySeconds.add(GaugeMetric.newInstance(gauge.getLast() / 1000, gauge.getMax() / 1000, gauge.getMin() / 1000, gauge.getSum() / 1000, gauge.getCount())); } requestsPerSecond.add(metricSet.get("serverNumSuccessfulResponses")); } List<Tuple> lst = new ArrayList<>(); if (requestsPerSecond.val != null) { lst.add(requestsPerSecond); } if (latencySeconds.val != null) { lst.add(latencySeconds); } return lst; } /** Produces a flat list of metric entries from a snapshot (which organizes metrics by dimensions) */ static List<Tuple> flattenAllMetrics(MetricSnapshot snapshot) { List<Tuple> metrics = new ArrayList<>(); for (Map.Entry<MetricDimensions, MetricSet> snapshotEntry : snapshot) { for (Map.Entry<String, MetricValue> metricSetEntry : snapshotEntry.getValue()) { metrics.add(new Tuple(snapshotEntry.getKey(), metricSetEntry.getKey(), metricSetEntry.getValue())); } } return metrics; } private static byte[] toPrettyString(JsonNode resources) throws JsonProcessingException { return jsonMapper.writerWithDefaultPrettyPrinter() .writeValueAsString(resources) .getBytes(); } static class Tuple { final MetricDimensions dim; final String key; MetricValue val; Tuple(MetricDimensions dim, String key, MetricValue val) { this.dim = dim; this.key = key; this.val = val; } void add(MetricValue val) { if (val == null) { return; } if (this.val == null) { this.val = val; } else { this.val.add(val); } } } }
class StateHandler extends AbstractRequestHandler { private static final ObjectMapper jsonMapper = new ObjectMapper(); public static final String STATE_API_ROOT = "/state/v1"; private static final String METRICS_PATH = "metrics"; private static final String HISTOGRAMS_PATH = "metrics/histograms"; private static final String CONFIG_GENERATION_PATH = "config"; private static final String HEALTH_PATH = "health"; private static final String VERSION_PATH = "version"; private final static MetricDimensions NULL_DIMENSIONS = StateMetricContext.newInstance(null); private final StateMonitor monitor; private final Timer timer; private final byte[] config; private final SnapshotProvider snapshotProvider; @Inject public StateHandler(StateMonitor monitor, Timer timer, ApplicationMetadataConfig config, ComponentRegistry<SnapshotProvider> snapshotProviders) { this.monitor = monitor; this.timer = timer; this.config = buildConfigOutput(config); snapshotProvider = getSnapshotProviderOrThrow(snapshotProviders); } static SnapshotProvider getSnapshotProviderOrThrow(ComponentRegistry<SnapshotProvider> preprocessors) { List<SnapshotProvider> allPreprocessors = preprocessors.allComponents(); if (allPreprocessors.size() > 0) { return allPreprocessors.get(0); } else { throw new IllegalArgumentException("At least one snapshot provider is required."); } } @Override public ContentChannel handleRequest(Request request, ResponseHandler handler) { new ResponseDispatch() { @Override protected Response newResponse() { Response response = new Response(Response.Status.OK); response.headers().add(HttpHeaders.Names.CONTENT_TYPE, resolveContentType(request.getUri())); return response; } @Override protected Iterable<ByteBuffer> responseContent() { return Collections.singleton(buildContent(request.getUri())); } }.dispatch(handler); return null; } private String resolveContentType(URI requestUri) { if (resolvePath(requestUri).equals(HISTOGRAMS_PATH)) { return "text/plain; charset=utf-8"; } else { return "application/json"; } } private ByteBuffer buildContent(URI requestUri) { String suffix = resolvePath(requestUri); switch (suffix) { case "": return ByteBuffer.wrap(apiLinks(requestUri)); case CONFIG_GENERATION_PATH: return ByteBuffer.wrap(config); case HISTOGRAMS_PATH: return ByteBuffer.wrap(buildHistogramsOutput()); case HEALTH_PATH: case METRICS_PATH: return ByteBuffer.wrap(buildMetricOutput(suffix)); case VERSION_PATH: return ByteBuffer.wrap(buildVersionOutput()); default: return ByteBuffer.wrap(buildMetricOutput(suffix)); } } private byte[] apiLinks(URI requestUri) { try { int port = requestUri.getPort(); String host = requestUri.getHost(); StringBuilder base = new StringBuilder("http: base.append(host); if (port != -1) { base.append(":").append(port); } base.append(STATE_API_ROOT); String uriBase = base.toString(); ArrayNode linkList = jsonMapper.createArrayNode(); for (String api : new String[] {METRICS_PATH, CONFIG_GENERATION_PATH, HEALTH_PATH, VERSION_PATH}) { ObjectNode resource = jsonMapper.createObjectNode(); resource.put("url", uriBase + "/" + api); linkList.add(resource); } JsonNode resources = jsonMapper.createObjectNode().set("resources", linkList); return toPrettyString(resources); } catch (JsonProcessingException e) { throw new RuntimeException("Bad JSON construction", e); } } private static String resolvePath(URI uri) { String path = uri.getPath(); if (path.endsWith("/")) { path = path.substring(0, path.length() - 1); } if (path.startsWith(STATE_API_ROOT)) { path = path.substring(STATE_API_ROOT.length()); } if (path.startsWith("/")) { path = path.substring(1); } return path; } private static byte[] buildConfigOutput(ApplicationMetadataConfig config) { try { return toPrettyString( jsonMapper.createObjectNode() .set(CONFIG_GENERATION_PATH, jsonMapper.createObjectNode() .put("generation", config.generation()) .set("container", jsonMapper.createObjectNode() .put("generation", config.generation())))); } catch (JsonProcessingException e) { throw new RuntimeException("Bad JSON construction", e); } } private static byte[] buildVersionOutput() { try { return toPrettyString( jsonMapper.createObjectNode() .put("version", Vtag.currentVersion.toString())); } catch (JsonProcessingException e) { throw new RuntimeException("Bad JSON construction", e); } } private byte[] buildMetricOutput(String consumer) { try { return toPrettyString(buildJsonForConsumer(consumer)); } catch (JsonProcessingException e) { throw new RuntimeException("Bad JSON construction", e); } } private byte[] buildHistogramsOutput() { ByteArrayOutputStream baos = new ByteArrayOutputStream(); if (snapshotProvider != null) { snapshotProvider.histogram(new PrintStream(baos)); } return baos.toByteArray(); } private ObjectNode buildJsonForConsumer(String consumer) { ObjectNode ret = jsonMapper.createObjectNode(); ret.put("time", timer.currentTimeMillis()); ret.set("status", jsonMapper.createObjectNode().put("code", getStatus().name())); ret.set(METRICS_PATH, buildJsonForSnapshot(consumer, getSnapshot())); return ret; } private MetricSnapshot getSnapshot() { return snapshotProvider.latestSnapshot(); } private StateMonitor.Status getStatus() { return monitor.status(); } private ObjectNode buildJsonForSnapshot(String consumer, MetricSnapshot metricSnapshot) { if (metricSnapshot == null) { return jsonMapper.createObjectNode(); } ObjectNode jsonMetric = jsonMapper.createObjectNode(); jsonMetric.set("snapshot", jsonMapper.createObjectNode() .put("from", sanitizeDouble(metricSnapshot.getFromTime(TimeUnit.MILLISECONDS) / 1000.0)) .put("to", sanitizeDouble(metricSnapshot.getToTime(TimeUnit.MILLISECONDS) / 1000.0))); boolean includeDimensions = !consumer.equals(HEALTH_PATH); long periodInMillis = metricSnapshot.getToTime(TimeUnit.MILLISECONDS) - metricSnapshot.getFromTime(TimeUnit.MILLISECONDS); for (Tuple tuple : collapseMetrics(metricSnapshot, consumer)) { ObjectNode jsonTuple = jsonMapper.createObjectNode(); jsonTuple.put("name", tuple.key); if (tuple.val instanceof CountMetric) { CountMetric count = (CountMetric)tuple.val; jsonTuple.set("values", jsonMapper.createObjectNode() .put("count", count.getCount()) .put("rate", sanitizeDouble(count.getCount() * 1000.0) / periodInMillis)); } else if (tuple.val instanceof GaugeMetric) { GaugeMetric gauge = (GaugeMetric) tuple.val; ObjectNode valueFields = jsonMapper.createObjectNode(); valueFields.put("average", sanitizeDouble(gauge.getAverage())) .put("sum", sanitizeDouble(gauge.getSum())) .put("count", gauge.getCount()) .put("last", sanitizeDouble(gauge.getLast())) .put("max", sanitizeDouble(gauge.getMax())) .put("min", sanitizeDouble(gauge.getMin())) .put("rate", sanitizeDouble((gauge.getCount() * 1000.0) / periodInMillis)); if (gauge.getPercentiles().isPresent()) { for (Tuple2<String, Double> prefixAndValue : gauge.getPercentiles().get()) { valueFields.put(prefixAndValue.first + "percentile", sanitizeDouble(prefixAndValue.second)); } } jsonTuple.set("values", valueFields); } else { throw new UnsupportedOperationException(tuple.val.getClass().getName()); } if (tuple.dim != null) { Iterator<Map.Entry<String, String>> it = tuple.dim.iterator(); if (it.hasNext() && includeDimensions) { ObjectNode jsonDim = jsonMapper.createObjectNode(); while (it.hasNext()) { Map.Entry<String, String> entry = it.next(); jsonDim.put(entry.getKey(), entry.getValue()); } jsonTuple.set("dimensions", jsonDim); } } ArrayNode values = (ArrayNode) jsonMetric.get("values"); if (values == null) { values = jsonMapper.createArrayNode(); jsonMetric.set("values", values); } values.add(jsonTuple); } return jsonMetric; } private static List<Tuple> collapseHealthMetrics(MetricSnapshot snapshot) { Tuple requestsPerSecond = new Tuple(NULL_DIMENSIONS, "requestsPerSecond", null); Tuple latencySeconds = new Tuple(NULL_DIMENSIONS, "latencySeconds", null); for (Map.Entry<MetricDimensions, MetricSet> entry : snapshot) { MetricSet metricSet = entry.getValue(); MetricValue val = metricSet.get("serverTotalSuccessfulResponseLatency"); if (val instanceof GaugeMetric) { GaugeMetric gauge = (GaugeMetric)val; latencySeconds.add(GaugeMetric.newInstance(gauge.getLast() / 1000, gauge.getMax() / 1000, gauge.getMin() / 1000, gauge.getSum() / 1000, gauge.getCount())); } requestsPerSecond.add(metricSet.get("serverNumSuccessfulResponses")); } List<Tuple> lst = new ArrayList<>(); if (requestsPerSecond.val != null) { lst.add(requestsPerSecond); } if (latencySeconds.val != null) { lst.add(latencySeconds); } return lst; } /** Produces a flat list of metric entries from a snapshot (which organizes metrics by dimensions) */ static List<Tuple> flattenAllMetrics(MetricSnapshot snapshot) { List<Tuple> metrics = new ArrayList<>(); for (Map.Entry<MetricDimensions, MetricSet> snapshotEntry : snapshot) { for (Map.Entry<String, MetricValue> metricSetEntry : snapshotEntry.getValue()) { metrics.add(new Tuple(snapshotEntry.getKey(), metricSetEntry.getKey(), metricSetEntry.getValue())); } } return metrics; } private static byte[] toPrettyString(JsonNode resources) throws JsonProcessingException { return jsonMapper.writerWithDefaultPrettyPrinter() .writeValueAsString(resources) .getBytes(); } static class Tuple { final MetricDimensions dim; final String key; MetricValue val; Tuple(MetricDimensions dim, String key, MetricValue val) { this.dim = dim; this.key = key; this.val = val; } void add(MetricValue val) { if (val == null) { return; } if (this.val == null) { this.val = val; } else { this.val.add(val); } } } }
Yes, added a better comment in https://github.com/vespa-engine/vespa/pull/24354
private static List<Tuple> collapseMetrics(MetricSnapshot snapshot, String consumer) { return switch (consumer) { case HEALTH_PATH -> collapseHealthMetrics(snapshot); case "all", METRICS_PATH -> flattenAllMetrics(snapshot); default -> throw new IllegalArgumentException("Unknown consumer '" + consumer + "'."); }; }
case HEALTH_PATH -> collapseHealthMetrics(snapshot);
private static List<Tuple> collapseMetrics(MetricSnapshot snapshot, String consumer) { return switch (consumer) { case HEALTH_PATH -> collapseHealthMetrics(snapshot); case "all", METRICS_PATH -> flattenAllMetrics(snapshot); default -> throw new IllegalArgumentException("Unknown consumer '" + consumer + "'."); }; }
class StateHandler extends AbstractRequestHandler { private static final ObjectMapper jsonMapper = new ObjectMapper(); public static final String STATE_API_ROOT = "/state/v1"; private static final String METRICS_PATH = "metrics"; private static final String HISTOGRAMS_PATH = "metrics/histograms"; private static final String CONFIG_GENERATION_PATH = "config"; private static final String HEALTH_PATH = "health"; private static final String VERSION_PATH = "version"; private final static MetricDimensions NULL_DIMENSIONS = StateMetricContext.newInstance(null); private final StateMonitor monitor; private final Timer timer; private final byte[] config; private final SnapshotProvider snapshotProvider; @Inject public StateHandler(StateMonitor monitor, Timer timer, ApplicationMetadataConfig config, ComponentRegistry<SnapshotProvider> snapshotProviders) { this.monitor = monitor; this.timer = timer; this.config = buildConfigOutput(config); snapshotProvider = getSnapshotProviderOrThrow(snapshotProviders); } static SnapshotProvider getSnapshotProviderOrThrow(ComponentRegistry<SnapshotProvider> preprocessors) { List<SnapshotProvider> allPreprocessors = preprocessors.allComponents(); if (allPreprocessors.size() > 0) { return allPreprocessors.get(0); } else { throw new IllegalArgumentException("At least one snapshot provider is required."); } } @Override public ContentChannel handleRequest(Request request, ResponseHandler handler) { new ResponseDispatch() { @Override protected Response newResponse() { Response response = new Response(Response.Status.OK); response.headers().add(HttpHeaders.Names.CONTENT_TYPE, resolveContentType(request.getUri())); return response; } @Override protected Iterable<ByteBuffer> responseContent() { return Collections.singleton(buildContent(request.getUri())); } }.dispatch(handler); return null; } private String resolveContentType(URI requestUri) { if (resolvePath(requestUri).equals(HISTOGRAMS_PATH)) { return "text/plain; charset=utf-8"; } else { return "application/json"; } } private ByteBuffer buildContent(URI requestUri) { String suffix = resolvePath(requestUri); switch (suffix) { case "": return ByteBuffer.wrap(apiLinks(requestUri)); case CONFIG_GENERATION_PATH: return ByteBuffer.wrap(config); case HISTOGRAMS_PATH: return ByteBuffer.wrap(buildHistogramsOutput()); case HEALTH_PATH: case METRICS_PATH: return ByteBuffer.wrap(buildMetricOutput(suffix)); case VERSION_PATH: return ByteBuffer.wrap(buildVersionOutput()); default: return ByteBuffer.wrap(buildMetricOutput(suffix)); } } private byte[] apiLinks(URI requestUri) { try { int port = requestUri.getPort(); String host = requestUri.getHost(); StringBuilder base = new StringBuilder("http: base.append(host); if (port != -1) { base.append(":").append(port); } base.append(STATE_API_ROOT); String uriBase = base.toString(); ArrayNode linkList = jsonMapper.createArrayNode(); for (String api : new String[] {METRICS_PATH, CONFIG_GENERATION_PATH, HEALTH_PATH, VERSION_PATH}) { ObjectNode resource = jsonMapper.createObjectNode(); resource.put("url", uriBase + "/" + api); linkList.add(resource); } JsonNode resources = jsonMapper.createObjectNode().set("resources", linkList); return toPrettyString(resources); } catch (JsonProcessingException e) { throw new RuntimeException("Bad JSON construction", e); } } private static String resolvePath(URI uri) { String path = uri.getPath(); if (path.endsWith("/")) { path = path.substring(0, path.length() - 1); } if (path.startsWith(STATE_API_ROOT)) { path = path.substring(STATE_API_ROOT.length()); } if (path.startsWith("/")) { path = path.substring(1); } return path; } private static byte[] buildConfigOutput(ApplicationMetadataConfig config) { try { return toPrettyString( jsonMapper.createObjectNode() .set(CONFIG_GENERATION_PATH, jsonMapper.createObjectNode() .put("generation", config.generation()) .set("container", jsonMapper.createObjectNode() .put("generation", config.generation())))); } catch (JsonProcessingException e) { throw new RuntimeException("Bad JSON construction", e); } } private static byte[] buildVersionOutput() { try { return toPrettyString( jsonMapper.createObjectNode() .put("version", Vtag.currentVersion.toString())); } catch (JsonProcessingException e) { throw new RuntimeException("Bad JSON construction", e); } } private byte[] buildMetricOutput(String consumer) { try { return toPrettyString(buildJsonForConsumer(consumer)); } catch (JsonProcessingException e) { throw new RuntimeException("Bad JSON construction", e); } } private byte[] buildHistogramsOutput() { ByteArrayOutputStream baos = new ByteArrayOutputStream(); if (snapshotProvider != null) { snapshotProvider.histogram(new PrintStream(baos)); } return baos.toByteArray(); } private ObjectNode buildJsonForConsumer(String consumer) { ObjectNode ret = jsonMapper.createObjectNode(); ret.put("time", timer.currentTimeMillis()); ret.set("status", jsonMapper.createObjectNode().put("code", getStatus().name())); ret.set(METRICS_PATH, buildJsonForSnapshot(consumer, getSnapshot())); return ret; } private MetricSnapshot getSnapshot() { return snapshotProvider.latestSnapshot(); } private StateMonitor.Status getStatus() { return monitor.status(); } private ObjectNode buildJsonForSnapshot(String consumer, MetricSnapshot metricSnapshot) { if (metricSnapshot == null) { return jsonMapper.createObjectNode(); } ObjectNode jsonMetric = jsonMapper.createObjectNode(); jsonMetric.set("snapshot", jsonMapper.createObjectNode() .put("from", sanitizeDouble(metricSnapshot.getFromTime(TimeUnit.MILLISECONDS) / 1000.0)) .put("to", sanitizeDouble(metricSnapshot.getToTime(TimeUnit.MILLISECONDS) / 1000.0))); boolean includeDimensions = !consumer.equals(HEALTH_PATH); long periodInMillis = metricSnapshot.getToTime(TimeUnit.MILLISECONDS) - metricSnapshot.getFromTime(TimeUnit.MILLISECONDS); for (Tuple tuple : collapseMetrics(metricSnapshot, consumer)) { ObjectNode jsonTuple = jsonMapper.createObjectNode(); jsonTuple.put("name", tuple.key); if (tuple.val instanceof CountMetric) { CountMetric count = (CountMetric)tuple.val; jsonTuple.set("values", jsonMapper.createObjectNode() .put("count", count.getCount()) .put("rate", sanitizeDouble(count.getCount() * 1000.0) / periodInMillis)); } else if (tuple.val instanceof GaugeMetric) { GaugeMetric gauge = (GaugeMetric) tuple.val; ObjectNode valueFields = jsonMapper.createObjectNode(); valueFields.put("average", sanitizeDouble(gauge.getAverage())) .put("sum", sanitizeDouble(gauge.getSum())) .put("count", gauge.getCount()) .put("last", sanitizeDouble(gauge.getLast())) .put("max", sanitizeDouble(gauge.getMax())) .put("min", sanitizeDouble(gauge.getMin())) .put("rate", sanitizeDouble((gauge.getCount() * 1000.0) / periodInMillis)); if (gauge.getPercentiles().isPresent()) { for (Tuple2<String, Double> prefixAndValue : gauge.getPercentiles().get()) { valueFields.put(prefixAndValue.first + "percentile", sanitizeDouble(prefixAndValue.second)); } } jsonTuple.set("values", valueFields); } else { throw new UnsupportedOperationException(tuple.val.getClass().getName()); } if (tuple.dim != null) { Iterator<Map.Entry<String, String>> it = tuple.dim.iterator(); if (it.hasNext() && includeDimensions) { ObjectNode jsonDim = jsonMapper.createObjectNode(); while (it.hasNext()) { Map.Entry<String, String> entry = it.next(); jsonDim.put(entry.getKey(), entry.getValue()); } jsonTuple.set("dimensions", jsonDim); } } ArrayNode values = (ArrayNode) jsonMetric.get("values"); if (values == null) { values = jsonMapper.createArrayNode(); jsonMetric.set("values", values); } values.add(jsonTuple); } return jsonMetric; } private static List<Tuple> collapseHealthMetrics(MetricSnapshot snapshot) { Tuple requestsPerSecond = new Tuple(NULL_DIMENSIONS, "requestsPerSecond", null); Tuple latencySeconds = new Tuple(NULL_DIMENSIONS, "latencySeconds", null); for (Map.Entry<MetricDimensions, MetricSet> entry : snapshot) { MetricSet metricSet = entry.getValue(); MetricValue val = metricSet.get("serverTotalSuccessfulResponseLatency"); if (val instanceof GaugeMetric) { GaugeMetric gauge = (GaugeMetric)val; latencySeconds.add(GaugeMetric.newInstance(gauge.getLast() / 1000, gauge.getMax() / 1000, gauge.getMin() / 1000, gauge.getSum() / 1000, gauge.getCount())); } requestsPerSecond.add(metricSet.get("serverNumSuccessfulResponses")); } List<Tuple> lst = new ArrayList<>(); if (requestsPerSecond.val != null) { lst.add(requestsPerSecond); } if (latencySeconds.val != null) { lst.add(latencySeconds); } return lst; } /** Produces a flat list of metric entries from a snapshot (which organizes metrics by dimensions) */ static List<Tuple> flattenAllMetrics(MetricSnapshot snapshot) { List<Tuple> metrics = new ArrayList<>(); for (Map.Entry<MetricDimensions, MetricSet> snapshotEntry : snapshot) { for (Map.Entry<String, MetricValue> metricSetEntry : snapshotEntry.getValue()) { metrics.add(new Tuple(snapshotEntry.getKey(), metricSetEntry.getKey(), metricSetEntry.getValue())); } } return metrics; } private static byte[] toPrettyString(JsonNode resources) throws JsonProcessingException { return jsonMapper.writerWithDefaultPrettyPrinter() .writeValueAsString(resources) .getBytes(); } static class Tuple { final MetricDimensions dim; final String key; MetricValue val; Tuple(MetricDimensions dim, String key, MetricValue val) { this.dim = dim; this.key = key; this.val = val; } void add(MetricValue val) { if (val == null) { return; } if (this.val == null) { this.val = val; } else { this.val.add(val); } } } }
class StateHandler extends AbstractRequestHandler { private static final ObjectMapper jsonMapper = new ObjectMapper(); public static final String STATE_API_ROOT = "/state/v1"; private static final String METRICS_PATH = "metrics"; private static final String HISTOGRAMS_PATH = "metrics/histograms"; private static final String CONFIG_GENERATION_PATH = "config"; private static final String HEALTH_PATH = "health"; private static final String VERSION_PATH = "version"; private final static MetricDimensions NULL_DIMENSIONS = StateMetricContext.newInstance(null); private final StateMonitor monitor; private final Timer timer; private final byte[] config; private final SnapshotProvider snapshotProvider; @Inject public StateHandler(StateMonitor monitor, Timer timer, ApplicationMetadataConfig config, ComponentRegistry<SnapshotProvider> snapshotProviders) { this.monitor = monitor; this.timer = timer; this.config = buildConfigOutput(config); snapshotProvider = getSnapshotProviderOrThrow(snapshotProviders); } static SnapshotProvider getSnapshotProviderOrThrow(ComponentRegistry<SnapshotProvider> preprocessors) { List<SnapshotProvider> allPreprocessors = preprocessors.allComponents(); if (allPreprocessors.size() > 0) { return allPreprocessors.get(0); } else { throw new IllegalArgumentException("At least one snapshot provider is required."); } } @Override public ContentChannel handleRequest(Request request, ResponseHandler handler) { new ResponseDispatch() { @Override protected Response newResponse() { Response response = new Response(Response.Status.OK); response.headers().add(HttpHeaders.Names.CONTENT_TYPE, resolveContentType(request.getUri())); return response; } @Override protected Iterable<ByteBuffer> responseContent() { return Collections.singleton(buildContent(request.getUri())); } }.dispatch(handler); return null; } private String resolveContentType(URI requestUri) { if (resolvePath(requestUri).equals(HISTOGRAMS_PATH)) { return "text/plain; charset=utf-8"; } else { return "application/json"; } } private ByteBuffer buildContent(URI requestUri) { String suffix = resolvePath(requestUri); switch (suffix) { case "": return ByteBuffer.wrap(apiLinks(requestUri)); case CONFIG_GENERATION_PATH: return ByteBuffer.wrap(config); case HISTOGRAMS_PATH: return ByteBuffer.wrap(buildHistogramsOutput()); case HEALTH_PATH: case METRICS_PATH: return ByteBuffer.wrap(buildMetricOutput(suffix)); case VERSION_PATH: return ByteBuffer.wrap(buildVersionOutput()); default: return ByteBuffer.wrap(buildMetricOutput(suffix)); } } private byte[] apiLinks(URI requestUri) { try { int port = requestUri.getPort(); String host = requestUri.getHost(); StringBuilder base = new StringBuilder("http: base.append(host); if (port != -1) { base.append(":").append(port); } base.append(STATE_API_ROOT); String uriBase = base.toString(); ArrayNode linkList = jsonMapper.createArrayNode(); for (String api : new String[] {METRICS_PATH, CONFIG_GENERATION_PATH, HEALTH_PATH, VERSION_PATH}) { ObjectNode resource = jsonMapper.createObjectNode(); resource.put("url", uriBase + "/" + api); linkList.add(resource); } JsonNode resources = jsonMapper.createObjectNode().set("resources", linkList); return toPrettyString(resources); } catch (JsonProcessingException e) { throw new RuntimeException("Bad JSON construction", e); } } private static String resolvePath(URI uri) { String path = uri.getPath(); if (path.endsWith("/")) { path = path.substring(0, path.length() - 1); } if (path.startsWith(STATE_API_ROOT)) { path = path.substring(STATE_API_ROOT.length()); } if (path.startsWith("/")) { path = path.substring(1); } return path; } private static byte[] buildConfigOutput(ApplicationMetadataConfig config) { try { return toPrettyString( jsonMapper.createObjectNode() .set(CONFIG_GENERATION_PATH, jsonMapper.createObjectNode() .put("generation", config.generation()) .set("container", jsonMapper.createObjectNode() .put("generation", config.generation())))); } catch (JsonProcessingException e) { throw new RuntimeException("Bad JSON construction", e); } } private static byte[] buildVersionOutput() { try { return toPrettyString( jsonMapper.createObjectNode() .put("version", Vtag.currentVersion.toString())); } catch (JsonProcessingException e) { throw new RuntimeException("Bad JSON construction", e); } } private byte[] buildMetricOutput(String consumer) { try { return toPrettyString(buildJsonForConsumer(consumer)); } catch (JsonProcessingException e) { throw new RuntimeException("Bad JSON construction", e); } } private byte[] buildHistogramsOutput() { ByteArrayOutputStream baos = new ByteArrayOutputStream(); if (snapshotProvider != null) { snapshotProvider.histogram(new PrintStream(baos)); } return baos.toByteArray(); } private ObjectNode buildJsonForConsumer(String consumer) { ObjectNode ret = jsonMapper.createObjectNode(); ret.put("time", timer.currentTimeMillis()); ret.set("status", jsonMapper.createObjectNode().put("code", getStatus().name())); ret.set(METRICS_PATH, buildJsonForSnapshot(consumer, getSnapshot())); return ret; } private MetricSnapshot getSnapshot() { return snapshotProvider.latestSnapshot(); } private StateMonitor.Status getStatus() { return monitor.status(); } private ObjectNode buildJsonForSnapshot(String consumer, MetricSnapshot metricSnapshot) { if (metricSnapshot == null) { return jsonMapper.createObjectNode(); } ObjectNode jsonMetric = jsonMapper.createObjectNode(); jsonMetric.set("snapshot", jsonMapper.createObjectNode() .put("from", sanitizeDouble(metricSnapshot.getFromTime(TimeUnit.MILLISECONDS) / 1000.0)) .put("to", sanitizeDouble(metricSnapshot.getToTime(TimeUnit.MILLISECONDS) / 1000.0))); boolean includeDimensions = !consumer.equals(HEALTH_PATH); long periodInMillis = metricSnapshot.getToTime(TimeUnit.MILLISECONDS) - metricSnapshot.getFromTime(TimeUnit.MILLISECONDS); for (Tuple tuple : collapseMetrics(metricSnapshot, consumer)) { ObjectNode jsonTuple = jsonMapper.createObjectNode(); jsonTuple.put("name", tuple.key); if (tuple.val instanceof CountMetric) { CountMetric count = (CountMetric)tuple.val; jsonTuple.set("values", jsonMapper.createObjectNode() .put("count", count.getCount()) .put("rate", sanitizeDouble(count.getCount() * 1000.0) / periodInMillis)); } else if (tuple.val instanceof GaugeMetric) { GaugeMetric gauge = (GaugeMetric) tuple.val; ObjectNode valueFields = jsonMapper.createObjectNode(); valueFields.put("average", sanitizeDouble(gauge.getAverage())) .put("sum", sanitizeDouble(gauge.getSum())) .put("count", gauge.getCount()) .put("last", sanitizeDouble(gauge.getLast())) .put("max", sanitizeDouble(gauge.getMax())) .put("min", sanitizeDouble(gauge.getMin())) .put("rate", sanitizeDouble((gauge.getCount() * 1000.0) / periodInMillis)); if (gauge.getPercentiles().isPresent()) { for (Tuple2<String, Double> prefixAndValue : gauge.getPercentiles().get()) { valueFields.put(prefixAndValue.first + "percentile", sanitizeDouble(prefixAndValue.second)); } } jsonTuple.set("values", valueFields); } else { throw new UnsupportedOperationException(tuple.val.getClass().getName()); } if (tuple.dim != null) { Iterator<Map.Entry<String, String>> it = tuple.dim.iterator(); if (it.hasNext() && includeDimensions) { ObjectNode jsonDim = jsonMapper.createObjectNode(); while (it.hasNext()) { Map.Entry<String, String> entry = it.next(); jsonDim.put(entry.getKey(), entry.getValue()); } jsonTuple.set("dimensions", jsonDim); } } ArrayNode values = (ArrayNode) jsonMetric.get("values"); if (values == null) { values = jsonMapper.createArrayNode(); jsonMetric.set("values", values); } values.add(jsonTuple); } return jsonMetric; } private static List<Tuple> collapseHealthMetrics(MetricSnapshot snapshot) { Tuple requestsPerSecond = new Tuple(NULL_DIMENSIONS, "requestsPerSecond", null); Tuple latencySeconds = new Tuple(NULL_DIMENSIONS, "latencySeconds", null); for (Map.Entry<MetricDimensions, MetricSet> entry : snapshot) { MetricSet metricSet = entry.getValue(); MetricValue val = metricSet.get("serverTotalSuccessfulResponseLatency"); if (val instanceof GaugeMetric) { GaugeMetric gauge = (GaugeMetric)val; latencySeconds.add(GaugeMetric.newInstance(gauge.getLast() / 1000, gauge.getMax() / 1000, gauge.getMin() / 1000, gauge.getSum() / 1000, gauge.getCount())); } requestsPerSecond.add(metricSet.get("serverNumSuccessfulResponses")); } List<Tuple> lst = new ArrayList<>(); if (requestsPerSecond.val != null) { lst.add(requestsPerSecond); } if (latencySeconds.val != null) { lst.add(latencySeconds); } return lst; } /** Produces a flat list of metric entries from a snapshot (which organizes metrics by dimensions) */ static List<Tuple> flattenAllMetrics(MetricSnapshot snapshot) { List<Tuple> metrics = new ArrayList<>(); for (Map.Entry<MetricDimensions, MetricSet> snapshotEntry : snapshot) { for (Map.Entry<String, MetricValue> metricSetEntry : snapshotEntry.getValue()) { metrics.add(new Tuple(snapshotEntry.getKey(), metricSetEntry.getKey(), metricSetEntry.getValue())); } } return metrics; } private static byte[] toPrettyString(JsonNode resources) throws JsonProcessingException { return jsonMapper.writerWithDefaultPrettyPrinter() .writeValueAsString(resources) .getBytes(); } static class Tuple { final MetricDimensions dim; final String key; MetricValue val; Tuple(MetricDimensions dim, String key, MetricValue val) { this.dim = dim; this.key = key; this.val = val; } void add(MetricValue val) { if (val == null) { return; } if (this.val == null) { this.val = val; } else { this.val.add(val); } } } }
Throw UncheckedIOException?
private void createFiles() { IntStream.of(0,1,2,3).forEach(i -> { try { writeFileAndSetLastAccessedTime(cachedFileReferences, "fileReference" + i); writeFileAndSetLastAccessedTime(cachedDownloads, "download" + i); clock.advance(Duration.ofMinutes(1)); } catch (IOException e) { throw new RuntimeException(e); } }); }
throw new RuntimeException(e);
private void createFiles() { IntStream.of(0,1,2,3).forEach(i -> { try { writeFileAndSetLastAccessedTime(cachedFileReferences, "fileReference" + i); writeFileAndSetLastAccessedTime(cachedDownloads, "download" + i); clock.advance(Duration.ofMinutes(1)); } catch (IOException e) { throw new UncheckedIOException(e); } }); }
class CachedFilesMaintainerTest { private static final int numberToAlwaysKeep = 2; private File cachedFileReferences; private File cachedDownloads; private CachedFilesMaintainer cachedFilesMaintainer; private final ManualClock clock = new ManualClock(); @TempDir public File tempFolder; @BeforeEach public void setup() throws IOException { cachedFileReferences = newFolder(tempFolder, "cachedFileReferences"); cachedDownloads = newFolder(tempFolder, "cachedDownloads"); cachedFilesMaintainer = new CachedFilesMaintainer(cachedFileReferences, cachedDownloads, Duration.ofMinutes(2), clock, numberToAlwaysKeep); } @Test void require_old_files_to_be_deleted() { runMaintainerAndAssertFiles(0, 0); clock.advance(Duration.ofSeconds(55)); createFiles(); runMaintainerAndAssertFiles(4, 4); clock.advance(Duration.ofMinutes(1)); runMaintainerAndAssertFiles(3, 3); clock.advance(Duration.ofMinutes(100)); runMaintainerAndAssertFiles(numberToAlwaysKeep, numberToAlwaysKeep); } private void runMaintainerAndAssertFiles(int fileReferenceCount, int downloadCount) { cachedFilesMaintainer.run(); File[] fileReferences = cachedFileReferences.listFiles(); assertNotNull(fileReferences); assertEquals(fileReferenceCount, fileReferences.length); File[] downloads = cachedDownloads.listFiles(); assertNotNull(downloads); assertEquals(downloadCount, downloads.length); } private void writeFileAndSetLastAccessedTime(File directory, String filename) throws IOException { File file = new File(directory, filename); IOUtils.writeFile(file, filename, false); Files.setAttribute(file.toPath(), "lastAccessTime", FileTime.from(clock.instant())); } private static File newFolder(File root, String... subDirs) throws IOException { String subFolder = String.join("/", subDirs); File result = new File(root, subFolder); if (!result.mkdirs()) { throw new IOException("Couldn't create folders " + root); } return result; } }
class CachedFilesMaintainerTest { private static final int numberToAlwaysKeep = 2; private File cachedFileReferences; private File cachedDownloads; private CachedFilesMaintainer cachedFilesMaintainer; private final ManualClock clock = new ManualClock(); @TempDir public File tempFolder; @BeforeEach public void setup() throws IOException { cachedFileReferences = newFolder(tempFolder, "cachedFileReferences"); cachedDownloads = newFolder(tempFolder, "cachedDownloads"); cachedFilesMaintainer = new CachedFilesMaintainer(cachedFileReferences, cachedDownloads, Duration.ofMinutes(2), clock, numberToAlwaysKeep); } @Test void require_old_files_to_be_deleted() { runMaintainerAndAssertFiles(0, 0); clock.advance(Duration.ofSeconds(55)); createFiles(); runMaintainerAndAssertFiles(4, 4); clock.advance(Duration.ofMinutes(1)); runMaintainerAndAssertFiles(3, 3); clock.advance(Duration.ofMinutes(100)); runMaintainerAndAssertFiles(numberToAlwaysKeep, numberToAlwaysKeep); } private void runMaintainerAndAssertFiles(int fileReferenceCount, int downloadCount) { cachedFilesMaintainer.run(); File[] fileReferences = cachedFileReferences.listFiles(); assertNotNull(fileReferences); assertEquals(fileReferenceCount, fileReferences.length); File[] downloads = cachedDownloads.listFiles(); assertNotNull(downloads); assertEquals(downloadCount, downloads.length); } private void writeFileAndSetLastAccessedTime(File directory, String filename) throws IOException { File file = new File(directory, filename); IOUtils.writeFile(file, filename, false); Files.setAttribute(file.toPath(), "lastAccessTime", FileTime.from(clock.instant())); } private static File newFolder(File root, String... subDirs) throws IOException { String subFolder = String.join("/", subDirs); File result = new File(root, subFolder); if (!result.mkdirs()) { throw new IOException("Couldn't create folders " + root); } return result; } }
Yes, good point, will do
private void createFiles() { IntStream.of(0,1,2,3).forEach(i -> { try { writeFileAndSetLastAccessedTime(cachedFileReferences, "fileReference" + i); writeFileAndSetLastAccessedTime(cachedDownloads, "download" + i); clock.advance(Duration.ofMinutes(1)); } catch (IOException e) { throw new RuntimeException(e); } }); }
throw new RuntimeException(e);
private void createFiles() { IntStream.of(0,1,2,3).forEach(i -> { try { writeFileAndSetLastAccessedTime(cachedFileReferences, "fileReference" + i); writeFileAndSetLastAccessedTime(cachedDownloads, "download" + i); clock.advance(Duration.ofMinutes(1)); } catch (IOException e) { throw new UncheckedIOException(e); } }); }
class CachedFilesMaintainerTest { private static final int numberToAlwaysKeep = 2; private File cachedFileReferences; private File cachedDownloads; private CachedFilesMaintainer cachedFilesMaintainer; private final ManualClock clock = new ManualClock(); @TempDir public File tempFolder; @BeforeEach public void setup() throws IOException { cachedFileReferences = newFolder(tempFolder, "cachedFileReferences"); cachedDownloads = newFolder(tempFolder, "cachedDownloads"); cachedFilesMaintainer = new CachedFilesMaintainer(cachedFileReferences, cachedDownloads, Duration.ofMinutes(2), clock, numberToAlwaysKeep); } @Test void require_old_files_to_be_deleted() { runMaintainerAndAssertFiles(0, 0); clock.advance(Duration.ofSeconds(55)); createFiles(); runMaintainerAndAssertFiles(4, 4); clock.advance(Duration.ofMinutes(1)); runMaintainerAndAssertFiles(3, 3); clock.advance(Duration.ofMinutes(100)); runMaintainerAndAssertFiles(numberToAlwaysKeep, numberToAlwaysKeep); } private void runMaintainerAndAssertFiles(int fileReferenceCount, int downloadCount) { cachedFilesMaintainer.run(); File[] fileReferences = cachedFileReferences.listFiles(); assertNotNull(fileReferences); assertEquals(fileReferenceCount, fileReferences.length); File[] downloads = cachedDownloads.listFiles(); assertNotNull(downloads); assertEquals(downloadCount, downloads.length); } private void writeFileAndSetLastAccessedTime(File directory, String filename) throws IOException { File file = new File(directory, filename); IOUtils.writeFile(file, filename, false); Files.setAttribute(file.toPath(), "lastAccessTime", FileTime.from(clock.instant())); } private static File newFolder(File root, String... subDirs) throws IOException { String subFolder = String.join("/", subDirs); File result = new File(root, subFolder); if (!result.mkdirs()) { throw new IOException("Couldn't create folders " + root); } return result; } }
class CachedFilesMaintainerTest { private static final int numberToAlwaysKeep = 2; private File cachedFileReferences; private File cachedDownloads; private CachedFilesMaintainer cachedFilesMaintainer; private final ManualClock clock = new ManualClock(); @TempDir public File tempFolder; @BeforeEach public void setup() throws IOException { cachedFileReferences = newFolder(tempFolder, "cachedFileReferences"); cachedDownloads = newFolder(tempFolder, "cachedDownloads"); cachedFilesMaintainer = new CachedFilesMaintainer(cachedFileReferences, cachedDownloads, Duration.ofMinutes(2), clock, numberToAlwaysKeep); } @Test void require_old_files_to_be_deleted() { runMaintainerAndAssertFiles(0, 0); clock.advance(Duration.ofSeconds(55)); createFiles(); runMaintainerAndAssertFiles(4, 4); clock.advance(Duration.ofMinutes(1)); runMaintainerAndAssertFiles(3, 3); clock.advance(Duration.ofMinutes(100)); runMaintainerAndAssertFiles(numberToAlwaysKeep, numberToAlwaysKeep); } private void runMaintainerAndAssertFiles(int fileReferenceCount, int downloadCount) { cachedFilesMaintainer.run(); File[] fileReferences = cachedFileReferences.listFiles(); assertNotNull(fileReferences); assertEquals(fileReferenceCount, fileReferences.length); File[] downloads = cachedDownloads.listFiles(); assertNotNull(downloads); assertEquals(downloadCount, downloads.length); } private void writeFileAndSetLastAccessedTime(File directory, String filename) throws IOException { File file = new File(directory, filename); IOUtils.writeFile(file, filename, false); Files.setAttribute(file.toPath(), "lastAccessTime", FileTime.from(clock.instant())); } private static File newFolder(File root, String... subDirs) throws IOException { String subFolder = String.join("/", subDirs); File result = new File(root, subFolder); if (!result.mkdirs()) { throw new IOException("Couldn't create folders " + root); } return result; } }
🙈 🙊 🙉
public void testSingleSingleton() { try (Curator wrapped = new MockCurator()) { Phaser stunning = new Phaser(1); ManualClock clock = new ManualClock() { @Override public Instant instant() { stunning.arriveAndAwaitAdvance(); stunning.arriveAndAwaitAdvance(); return super.instant(); }; }; MockMetric metric = new MockMetric(); CuratorWrapper curator = new CuratorWrapper(wrapped, clock, Duration.ofMillis(100), metric); Singleton singleton = new Singleton(curator); assertTrue(singleton.isActive); assertTrue(wrapped.exists(lockPath)); assertTrue(curator.isActive(singleton.id())); singleton.shutdown(); assertFalse(singleton.isActive); Phaser mark2 = new Phaser(2); new Thread(() -> { mark2.arriveAndAwaitAdvance(); stunning.arriveAndAwaitAdvance(); stunning.arriveAndAwaitAdvance(); }).start(); singleton = new Singleton(curator) { @Override public void activate() { super.activate(); stunning.register(); mark2.arrive(); } }; assertTrue(singleton.isActive); stunning.arriveAndAwaitAdvance(); stunning.arriveAndAwaitAdvance(); stunning.arriveAndAwaitAdvance(); clock.advance(Curator.ZK_SESSION_TIMEOUT); singleton.phaser.register(); stunning.forceTermination(); singleton.phaser.arriveAndAwaitAdvance(); assertFalse(singleton.isActive); verifyMetrics(Map.of("activation.count", 2.0, "activation.millis", 0.0, "deactivation.count", 2.0, "deactivation.millis", 0.0, "is_active", 0.0), metric); singleton.phaser.arriveAndAwaitAdvance(); assertTrue(singleton.isActive); verifyMetrics(Map.of("activation.count", 3.0, "activation.millis", 0.0, "deactivation.count", 2.0, "deactivation.millis", 0.0, "is_active", 1.0, "has_lease", 1.0), metric); curator.deconstruct(); singleton.phaser.arriveAndAwaitAdvance(); assertFalse(singleton.isActive); verifyMetrics(Map.of("activation.count", 3.0, "activation.millis", 0.0, "deactivation.count", 3.0, "deactivation.millis", 0.0), metric); } }
@Override public void activate() {
public void testSingleSingleton() { try (Curator wrapped = new MockCurator()) { Phaser stunning = new Phaser(1); ManualClock clock = new ManualClock() { @Override public Instant instant() { stunning.arriveAndAwaitAdvance(); stunning.arriveAndAwaitAdvance(); return super.instant(); }; }; MockMetric metric = new MockMetric(); CuratorWrapper curator = new CuratorWrapper(wrapped, clock, Duration.ofMillis(100), metric); Singleton singleton = new Singleton(curator); assertTrue(singleton.isActive); assertTrue(wrapped.exists(lockPath)); stunning.register(); assertTrue(curator.isActive(singleton.id())); stunning.arriveAndDeregister(); singleton.shutdown(); assertFalse(singleton.isActive); Phaser mark2 = new Phaser(2); new Thread(() -> { mark2.arriveAndAwaitAdvance(); stunning.arriveAndAwaitAdvance(); stunning.arriveAndAwaitAdvance(); }).start(); singleton = new Singleton(curator) { @Override public void activate() { super.activate(); stunning.register(); mark2.arrive(); } }; assertTrue(singleton.isActive); stunning.arriveAndAwaitAdvance(); stunning.arriveAndAwaitAdvance(); stunning.arriveAndAwaitAdvance(); clock.advance(Curator.ZK_SESSION_TIMEOUT); singleton.phaser.register(); stunning.forceTermination(); singleton.phaser.arriveAndAwaitAdvance(); assertFalse(singleton.isActive); verifyMetrics(Map.of("activation.count", 2.0, "activation.millis", 0.0, "deactivation.count", 2.0, "deactivation.millis", 0.0), metric); singleton.phaser.awaitAdvance(singleton.phaser.arriveAndDeregister()); assertTrue(singleton.isActive); verifyMetrics(Map.of("activation.count", 3.0, "activation.millis", 0.0, "deactivation.count", 2.0, "deactivation.millis", 0.0, "has_lease", 1.0), metric); curator.deconstruct(); assertFalse(singleton.isActive); verifyMetrics(Map.of("activation.count", 3.0, "activation.millis", 0.0, "deactivation.count", 3.0, "deactivation.millis", 0.0, "is_active", 0.0, "has_lease", 0.0), metric); } }
class CuratorWrapperTest { static final Path lockPath = Path.fromString("/vespa/singleton/v1/singleton/lock"); @Test public void testUserApi() throws Exception { try (Curator wrapped = new MockCurator()) { CuratorWrapper curator = new CuratorWrapper(wrapped, new MockMetric()); Path path = Path.fromString("path"); assertEquals(Optional.empty(), curator.stat(path)); Meta meta = curator.write(path, "data".getBytes(UTF_8)); assertEquals(Optional.of(meta), curator.stat(path)); assertEquals("data", new String(curator.read(path).get().data(), UTF_8)); assertEquals(meta, curator.read(path).get().meta()); assertEquals(Optional.empty(), curator.write(path, new byte[0], 0)); meta = curator.write(path, new byte[0], meta.version()).get(); assertEquals(3, meta.version()); assertEquals(List.of("path"), curator.list(Path.createRoot())); assertFalse(curator.delete(path, 0)); curator.delete(path, 3); assertEquals(List.of(), curator.list(Path.createRoot())); try (AutoCloseable lock = curator.lock(path, Duration.ofSeconds(1))) { assertEquals(List.of("user", "path"), wrapped.getChildren(Path.createRoot())); assertEquals(List.of("path"), wrapped.getChildren(CuratorWrapper.userRoot)); } try (AutoCloseable lock = curator.lock(path, Duration.ofSeconds(1))) { } } } @Test @Test public void testSingletonsInSameContainer() { try (Curator wrapped = new MockCurator()) { MockMetric metric = new MockMetric(); CuratorWrapper curator = new CuratorWrapper(wrapped, metric); Singleton singleton = new Singleton(curator); assertTrue(singleton.isActive); assertTrue(wrapped.exists(lockPath)); assertTrue(curator.isActive(singleton.id())); Singleton newSingleton = new Singleton(curator); assertTrue(newSingleton.isActive); assertFalse(singleton.isActive); Singleton newerSingleton = new Singleton(curator); assertTrue(newerSingleton.isActive); assertFalse(newSingleton.isActive); assertFalse(singleton.isActive); singleton.shutdown(); assertTrue(newerSingleton.isActive); assertFalse(newSingleton.isActive); assertFalse(singleton.isActive); newerSingleton.shutdown(); assertFalse(newerSingleton.isActive); assertTrue(newSingleton.isActive); assertFalse(singleton.isActive); verifyMetrics(Map.of("activation.count", 4.0, "activation.millis", 0.0, "deactivation.count", 3.0, "deactivation.millis", 0.0, "is_active", 1.0, "has_lease", 1.0), metric); Phaser stunning = new Phaser(2); AtomicReference<String> thrownMessage = new AtomicReference<>(); new Thread(() -> { RuntimeException e = assertThrows(RuntimeException.class, () -> new Singleton(curator) { @Override public void activate() { throw new RuntimeException(); } @Override public void deactivate() { stunning.arriveAndAwaitAdvance(); stunning.arriveAndAwaitAdvance(); throw new RuntimeException(); } @Override public String toString() { return "failing singleton"; } }); stunning.arriveAndAwaitAdvance(); thrownMessage.set(e.getMessage()); }).start(); stunning.arriveAndAwaitAdvance(); assertFalse(newSingleton.isActive); assertTrue(curator.isActive(newSingleton.id())); verifyMetrics(Map.of("activation.count", 5.0, "activation.millis", 0.0, "activation.failure.count", 1.0, "deactivation.count", 5.0, "deactivation.millis", 0.0, "is_active", 0.0, "has_lease", 1.0), metric); stunning.arriveAndAwaitAdvance(); stunning.arriveAndAwaitAdvance(); assertTrue(newSingleton.isActive); assertEquals("failed to register failing singleton", thrownMessage.get()); verifyMetrics(Map.of("activation.count", 6.0, "activation.millis", 0.0, "activation.failure.count", 1.0, "deactivation.count", 5.0, "deactivation.millis", 0.0, "is_active", 1.0, "has_lease", 1.0), metric); newSingleton.shutdown(); verifyMetrics(Map.of("activation.count", 6.0, "activation.millis", 0.0, "activation.failure.count", 1.0, "deactivation.count", 6.0, "deactivation.millis", 0.0, "is_active", 0.0, "has_lease", 0.0), metric); curator.deconstruct(); } } @Test public void testSingletonsInDifferentContainers() { try (MockCurator wrapped = new MockCurator()) { MockMetric metric = new MockMetric(); CuratorWrapper curator = new CuratorWrapper(wrapped, Clock.systemUTC(), Duration.ofMillis(100), metric); Singleton singleton; try (Lock lock = wrapped.lock(lockPath, Duration.ofSeconds(1))) { singleton = new Singleton(curator); assertFalse(singleton.isActive); assertFalse(curator.isActive(singleton.id())); assertEquals(Map.of(), metric.metrics()); singleton.phaser.register(); } singleton.phaser.arriveAndAwaitAdvance(); assertTrue(curator.isActive(singleton.id())); assertTrue(singleton.isActive); verifyMetrics(Map.of("activation.count", 1.0, "activation.millis", 0.0, "is_active", 1.0, "has_lease", 1.0), metric); Phaser stunning = new Phaser(2); new Thread(() -> { try (Lock lock = wrapped.lock(lockPath, Duration.ofSeconds(2))) { stunning.arriveAndAwaitAdvance(); stunning.arriveAndAwaitAdvance(); } }).start(); ((MockCuratorFramework) wrapped.framework()).connectionStateListeners.listeners.forEach(listener -> listener.stateChanged(null, ConnectionState.LOST)); stunning.arriveAndAwaitAdvance(); singleton.phaser.arriveAndAwaitAdvance(); assertFalse(singleton.isActive); verifyMetrics(Map.of("activation.count", 1.0, "activation.millis", 0.0, "deactivation.count", 1.0, "deactivation.millis", 0.0, "is_active", 0.0, "has_lease", 0.0), metric); stunning.arriveAndAwaitAdvance(); singleton.phaser.arriveAndAwaitAdvance(); assertTrue(singleton.isActive); verifyMetrics(Map.of("activation.count", 2.0, "activation.millis", 0.0, "deactivation.count", 1.0, "deactivation.millis", 0.0, "is_active", 1.0, "has_lease", 1.0), metric); singleton.phaser.arriveAndDeregister(); singleton.shutdown(); assertFalse(singleton.isActive); verifyMetrics(Map.of("activation.count", 2.0, "activation.millis", 0.0, "deactivation.count", 2.0, "deactivation.millis", 0.0, "is_active", 0.0, "has_lease", 0.0), metric); curator.deconstruct(); } } static class Singleton extends AbstractSingletonWorker { Singleton(VespaCurator curator) { register(curator, Duration.ofSeconds(2)); } boolean isActive; Phaser phaser = new Phaser(1); @Override public String id() { return "singleton"; } @Override public void activate() { isActive = true; phaser.arriveAndAwaitAdvance(); } @Override public void deactivate() { isActive = false; phaser.arriveAndAwaitAdvance(); } public void shutdown() { unregister(Duration.ofSeconds(2)); } } static void verifyMetrics(Map<String, Double> expected, MockMetric metrics) { expected.forEach((metric, value) -> assertEquals(metric, value, metrics.metrics().get("singleton." + metric).get(Map.of("singletonId", "singleton")))); } }
class CuratorWrapperTest { static final Path lockPath = Path.fromString("/vespa/singleton/v1/singleton/lock"); @Test public void testUserApi() throws Exception { try (Curator wrapped = new MockCurator()) { CuratorWrapper curator = new CuratorWrapper(wrapped, new MockMetric()); Path path = Path.fromString("path"); assertEquals(Optional.empty(), curator.stat(path)); Meta meta = curator.write(path, "data".getBytes(UTF_8)); assertEquals(Optional.of(meta), curator.stat(path)); assertEquals("data", new String(curator.read(path).get().data(), UTF_8)); assertEquals(meta, curator.read(path).get().meta()); assertEquals(Optional.empty(), curator.write(path, new byte[0], 0)); meta = curator.write(path, new byte[0], meta.version()).get(); assertEquals(3, meta.version()); assertEquals(List.of("path"), curator.list(Path.createRoot())); assertFalse(curator.delete(path, 0)); curator.delete(path, 3); assertEquals(List.of(), curator.list(Path.createRoot())); try (AutoCloseable lock = curator.lock(path, Duration.ofSeconds(1))) { assertEquals(List.of("user", "path"), wrapped.getChildren(Path.createRoot())); assertEquals(List.of("path"), wrapped.getChildren(CuratorWrapper.userRoot)); } try (AutoCloseable lock = curator.lock(path, Duration.ofSeconds(1))) { } } } @Test @Test public void testSingletonsInSameContainer() { try (Curator wrapped = new MockCurator()) { MockMetric metric = new MockMetric(); CuratorWrapper curator = new CuratorWrapper(wrapped, new ManualClock(), Duration.ofMillis(100), metric); Singleton singleton = new Singleton(curator); assertTrue(singleton.isActive); assertTrue(wrapped.exists(lockPath)); assertTrue(curator.isActive(singleton.id())); Singleton newSingleton = new Singleton(curator); assertTrue(newSingleton.isActive); assertFalse(singleton.isActive); Singleton newerSingleton = new Singleton(curator); assertTrue(newerSingleton.isActive); assertFalse(newSingleton.isActive); assertFalse(singleton.isActive); singleton.shutdown(); assertTrue(newerSingleton.isActive); assertFalse(newSingleton.isActive); assertFalse(singleton.isActive); newerSingleton.shutdown(); assertFalse(newerSingleton.isActive); assertTrue(newSingleton.isActive); assertFalse(singleton.isActive); verifyMetrics(Map.of("activation.count", 4.0, "activation.millis", 0.0, "deactivation.count", 3.0, "deactivation.millis", 0.0, "is_active", 1.0, "has_lease", 1.0), metric); Phaser stunning = new Phaser(2); AtomicReference<String> thrownMessage = new AtomicReference<>(); new Thread(() -> { RuntimeException e = assertThrows(RuntimeException.class, () -> new Singleton(curator) { @Override public void activate() { throw new RuntimeException("expected test exception"); } @Override public void deactivate() { stunning.arriveAndAwaitAdvance(); stunning.arriveAndAwaitAdvance(); throw new RuntimeException("expected test exception"); } @Override public String toString() { return "failing singleton"; } }); thrownMessage.set(e.getMessage()); stunning.arriveAndAwaitAdvance(); }).start(); stunning.arriveAndAwaitAdvance(); assertFalse(newSingleton.isActive); assertTrue(curator.isActive(newSingleton.id())); verifyMetrics(Map.of("activation.count", 5.0, "activation.millis", 0.0, "activation.failure.count", 1.0, "deactivation.count", 5.0, "deactivation.millis", 0.0, "is_active", 0.0, "has_lease", 1.0), metric); stunning.arriveAndAwaitAdvance(); stunning.arriveAndAwaitAdvance(); assertTrue(newSingleton.isActive); assertEquals("failed to register failing singleton", thrownMessage.get()); verifyMetrics(Map.of("activation.count", 6.0, "activation.millis", 0.0, "activation.failure.count", 1.0, "deactivation.count", 5.0, "deactivation.millis", 0.0, "is_active", 1.0, "has_lease", 1.0), metric); newSingleton.shutdown(); curator.deconstruct(); verifyMetrics(Map.of("activation.count", 6.0, "activation.millis", 0.0, "activation.failure.count", 1.0, "deactivation.count", 6.0, "deactivation.millis", 0.0, "is_active", 0.0, "has_lease", 0.0), metric); } } @Test public void testSingletonsInDifferentContainers() { try (MockCurator wrapped = new MockCurator()) { MockMetric metric = new MockMetric(); CuratorWrapper curator = new CuratorWrapper(wrapped, new ManualClock(), Duration.ofMillis(100), metric); Singleton singleton; try (Lock lock = wrapped.lock(lockPath, Duration.ofSeconds(1))) { singleton = new Singleton(curator); assertFalse(singleton.isActive); assertFalse(curator.isActive(singleton.id())); assertEquals(Map.of(), metric.metrics()); singleton.phaser.register(); } singleton.phaser.arriveAndAwaitAdvance(); assertTrue(curator.isActive(singleton.id())); assertTrue(singleton.isActive); verifyMetrics(Map.of("activation.count", 1.0, "has_lease", 1.0), metric); Phaser stunning = new Phaser(2); new Thread(() -> { try (Lock lock = wrapped.lock(lockPath, Duration.ofSeconds(2))) { stunning.arriveAndAwaitAdvance(); stunning.arriveAndAwaitAdvance(); } }).start(); ((MockCuratorFramework) wrapped.framework()).connectionStateListeners.listeners.forEach(listener -> listener.stateChanged(null, ConnectionState.LOST)); singleton.phaser.arriveAndAwaitAdvance(); stunning.arriveAndAwaitAdvance(); assertFalse(singleton.isActive); verifyMetrics(Map.of("activation.count", 1.0, "activation.millis", 0.0, "deactivation.count", 1.0, "deactivation.millis", 0.0, "is_active", 0.0, "has_lease", 0.0), metric); stunning.arriveAndAwaitAdvance(); singleton.phaser.arriveAndAwaitAdvance(); assertTrue(singleton.isActive); verifyMetrics(Map.of("activation.count", 2.0, "activation.millis", 0.0, "deactivation.count", 1.0, "deactivation.millis", 0.0, "has_lease", 1.0), metric); singleton.phaser.arriveAndDeregister(); singleton.shutdown(); curator.deconstruct(); assertFalse(singleton.isActive); verifyMetrics(Map.of("activation.count", 2.0, "activation.millis", 0.0, "deactivation.count", 2.0, "deactivation.millis", 0.0, "is_active", 0.0, "has_lease", 0.0), metric); } } static class Singleton extends AbstractSingletonWorker { Singleton(VespaCurator curator) { register(curator, Duration.ofSeconds(2)); } boolean isActive; Phaser phaser = new Phaser(1); @Override public String id() { return "singleton"; } @Override public void activate() { if (isActive) throw new IllegalStateException("already active"); isActive = true; phaser.arriveAndAwaitAdvance(); } @Override public void deactivate() { if ( ! isActive) throw new IllegalStateException("already inactive"); isActive = false; phaser.arriveAndAwaitAdvance(); } public void shutdown() { unregister(Duration.ofSeconds(2)); } } static void verifyMetrics(Map<String, Double> expected, MockMetric metrics) { expected.forEach((metric, value) -> assertEquals(metric, value, metrics.metrics().get("singleton." + metric).get(Map.of("singletonId", "singleton")))); } }
Consider having `SecureRandom` as a member (might be costly to instantiate per request). You may utilize a deterministic random generator in unit tests by providing `RandomGenerator` instance as constructor parameter and injecting a `Random` with fixed seed in test cases.
public static SecretSharedKey generateForReceiverPublicKey(PublicKey receiverPublicKey, int keyId) { try { var keyGen = KeyGenerator.getInstance("AES"); keyGen.init(256, new SecureRandom()); var secretKey = keyGen.generateKey(); var cipher = Cipher.getInstance(ECIES_CIPHER_NAME, BouncyCastleProviderHolder.getInstance()); cipher.init(Cipher.ENCRYPT_MODE, receiverPublicKey); byte[] eciesPayload = cipher.doFinal(secretKey.getEncoded()); var sealedSharedKey = new SealedSharedKey(keyId, eciesPayload); return new SecretSharedKey(secretKey, sealedSharedKey); } catch (NoSuchAlgorithmException | InvalidKeyException | NoSuchPaddingException | IllegalBlockSizeException | BadPaddingException e) { throw new RuntimeException(e); } }
keyGen.init(256, new SecureRandom());
public static SecretSharedKey generateForReceiverPublicKey(PublicKey receiverPublicKey, int keyId) { try { var keyGen = KeyGenerator.getInstance("AES"); keyGen.init(256, SHARED_CSPRNG); var secretKey = keyGen.generateKey(); var cipher = Cipher.getInstance(ECIES_CIPHER_NAME, BouncyCastleProviderHolder.getInstance()); cipher.init(Cipher.ENCRYPT_MODE, receiverPublicKey); byte[] eciesPayload = cipher.doFinal(secretKey.getEncoded()); var sealedSharedKey = new SealedSharedKey(keyId, eciesPayload); return new SecretSharedKey(secretKey, sealedSharedKey); } catch (NoSuchAlgorithmException | InvalidKeyException | NoSuchPaddingException | IllegalBlockSizeException | BadPaddingException e) { throw new RuntimeException(e); } }
class SharedKeyGenerator { private static final int AES_GCM_AUTH_TAG_BITS = 128; private static final String AES_GCM_ALGO_SPEC = "AES/GCM/NoPadding"; private static final String ECIES_CIPHER_NAME = "ECIES"; public static SecretSharedKey fromSealedKey(SealedSharedKey sealedKey, KeyPair receiverKeyPair) { try { var cipher = Cipher.getInstance(ECIES_CIPHER_NAME, BouncyCastleProviderHolder.getInstance()); cipher.init(Cipher.DECRYPT_MODE, receiverKeyPair.getPrivate()); byte[] secretKey = cipher.doFinal(sealedKey.eciesPayload()); return new SecretSharedKey(new SecretKeySpec(secretKey, "AES"), sealedKey); } catch (BadBlockException e) { throw new IllegalArgumentException("Token integrity check failed; token is either corrupt or was " + "generated for a different public key"); } catch (NoSuchAlgorithmException | InvalidKeyException | NoSuchPaddingException | IllegalBlockSizeException | BadPaddingException e) { throw new RuntimeException(e); } } private static byte[] fixed96BitIvForSingleUseKey() { return new byte[] { 'h', 'e', 'r', 'e', 'B', 'd', 'r', 'a', 'g', 'o', 'n', 's' }; } private static Cipher makeAes256GcmCipher(SecretSharedKey secretSharedKey, int cipherMode) { try { var cipher = Cipher.getInstance(AES_GCM_ALGO_SPEC); var gcmSpec = new GCMParameterSpec(AES_GCM_AUTH_TAG_BITS, fixed96BitIvForSingleUseKey()); cipher.init(cipherMode, secretSharedKey.secretKey(), gcmSpec); return cipher; } catch (NoSuchAlgorithmException | NoSuchPaddingException | InvalidKeyException | InvalidAlgorithmParameterException e) { throw new RuntimeException(e); } } /** * Creates an AES-GCM-256 Cipher that can be used to encrypt arbitrary plaintext. * * The given secret key MUST NOT be used to encrypt more than one plaintext. */ public static Cipher makeAes256GcmEncryptionCipher(SecretSharedKey secretSharedKey) { return makeAes256GcmCipher(secretSharedKey, Cipher.ENCRYPT_MODE); } /** * Creates an AES-GCM-256 Cipher that can be used to decrypt ciphertext that was previously * encrypted with the given secret key. */ public static Cipher makeAes256GcmDecryptionCipher(SecretSharedKey secretSharedKey) { return makeAes256GcmCipher(secretSharedKey, Cipher.DECRYPT_MODE); } }
class SharedKeyGenerator { private static final int AES_GCM_AUTH_TAG_BITS = 128; private static final String AES_GCM_ALGO_SPEC = "AES/GCM/NoPadding"; private static final String ECIES_CIPHER_NAME = "ECIES"; private static final SecureRandom SHARED_CSPRNG = new SecureRandom(); public static SecretSharedKey fromSealedKey(SealedSharedKey sealedKey, PrivateKey receiverPrivateKey) { try { var cipher = Cipher.getInstance(ECIES_CIPHER_NAME, BouncyCastleProviderHolder.getInstance()); cipher.init(Cipher.DECRYPT_MODE, receiverPrivateKey); byte[] secretKey = cipher.doFinal(sealedKey.eciesPayload()); return new SecretSharedKey(new SecretKeySpec(secretKey, "AES"), sealedKey); } catch (BadBlockException e) { throw new IllegalArgumentException("Token integrity check failed; token is either corrupt or was " + "generated for a different public key"); } catch (NoSuchAlgorithmException | InvalidKeyException | NoSuchPaddingException | IllegalBlockSizeException | BadPaddingException e) { throw new RuntimeException(e); } } private static byte[] fixed96BitIvForSingleUseKey() { return new byte[] { 'h', 'e', 'r', 'e', 'B', 'd', 'r', 'a', 'g', 'o', 'n', 's' }; } private static Cipher makeAes256GcmCipher(SecretSharedKey secretSharedKey, int cipherMode) { try { var cipher = Cipher.getInstance(AES_GCM_ALGO_SPEC); var gcmSpec = new GCMParameterSpec(AES_GCM_AUTH_TAG_BITS, fixed96BitIvForSingleUseKey()); cipher.init(cipherMode, secretSharedKey.secretKey(), gcmSpec); return cipher; } catch (NoSuchAlgorithmException | NoSuchPaddingException | InvalidKeyException | InvalidAlgorithmParameterException e) { throw new RuntimeException(e); } } /** * Creates an AES-GCM-256 Cipher that can be used to encrypt arbitrary plaintext. * * The given secret key MUST NOT be used to encrypt more than one plaintext. */ public static Cipher makeAes256GcmEncryptionCipher(SecretSharedKey secretSharedKey) { return makeAes256GcmCipher(secretSharedKey, Cipher.ENCRYPT_MODE); } /** * Creates an AES-GCM-256 Cipher that can be used to decrypt ciphertext that was previously * encrypted with the given secret key. */ public static Cipher makeAes256GcmDecryptionCipher(SecretSharedKey secretSharedKey) { return makeAes256GcmCipher(secretSharedKey, Cipher.DECRYPT_MODE); } }
Alternatively as static field if deterministic unit tests are not required. `SecureRandom` is thread-safe.
public static SecretSharedKey generateForReceiverPublicKey(PublicKey receiverPublicKey, int keyId) { try { var keyGen = KeyGenerator.getInstance("AES"); keyGen.init(256, new SecureRandom()); var secretKey = keyGen.generateKey(); var cipher = Cipher.getInstance(ECIES_CIPHER_NAME, BouncyCastleProviderHolder.getInstance()); cipher.init(Cipher.ENCRYPT_MODE, receiverPublicKey); byte[] eciesPayload = cipher.doFinal(secretKey.getEncoded()); var sealedSharedKey = new SealedSharedKey(keyId, eciesPayload); return new SecretSharedKey(secretKey, sealedSharedKey); } catch (NoSuchAlgorithmException | InvalidKeyException | NoSuchPaddingException | IllegalBlockSizeException | BadPaddingException e) { throw new RuntimeException(e); } }
keyGen.init(256, new SecureRandom());
public static SecretSharedKey generateForReceiverPublicKey(PublicKey receiverPublicKey, int keyId) { try { var keyGen = KeyGenerator.getInstance("AES"); keyGen.init(256, SHARED_CSPRNG); var secretKey = keyGen.generateKey(); var cipher = Cipher.getInstance(ECIES_CIPHER_NAME, BouncyCastleProviderHolder.getInstance()); cipher.init(Cipher.ENCRYPT_MODE, receiverPublicKey); byte[] eciesPayload = cipher.doFinal(secretKey.getEncoded()); var sealedSharedKey = new SealedSharedKey(keyId, eciesPayload); return new SecretSharedKey(secretKey, sealedSharedKey); } catch (NoSuchAlgorithmException | InvalidKeyException | NoSuchPaddingException | IllegalBlockSizeException | BadPaddingException e) { throw new RuntimeException(e); } }
class SharedKeyGenerator { private static final int AES_GCM_AUTH_TAG_BITS = 128; private static final String AES_GCM_ALGO_SPEC = "AES/GCM/NoPadding"; private static final String ECIES_CIPHER_NAME = "ECIES"; public static SecretSharedKey fromSealedKey(SealedSharedKey sealedKey, KeyPair receiverKeyPair) { try { var cipher = Cipher.getInstance(ECIES_CIPHER_NAME, BouncyCastleProviderHolder.getInstance()); cipher.init(Cipher.DECRYPT_MODE, receiverKeyPair.getPrivate()); byte[] secretKey = cipher.doFinal(sealedKey.eciesPayload()); return new SecretSharedKey(new SecretKeySpec(secretKey, "AES"), sealedKey); } catch (BadBlockException e) { throw new IllegalArgumentException("Token integrity check failed; token is either corrupt or was " + "generated for a different public key"); } catch (NoSuchAlgorithmException | InvalidKeyException | NoSuchPaddingException | IllegalBlockSizeException | BadPaddingException e) { throw new RuntimeException(e); } } private static byte[] fixed96BitIvForSingleUseKey() { return new byte[] { 'h', 'e', 'r', 'e', 'B', 'd', 'r', 'a', 'g', 'o', 'n', 's' }; } private static Cipher makeAes256GcmCipher(SecretSharedKey secretSharedKey, int cipherMode) { try { var cipher = Cipher.getInstance(AES_GCM_ALGO_SPEC); var gcmSpec = new GCMParameterSpec(AES_GCM_AUTH_TAG_BITS, fixed96BitIvForSingleUseKey()); cipher.init(cipherMode, secretSharedKey.secretKey(), gcmSpec); return cipher; } catch (NoSuchAlgorithmException | NoSuchPaddingException | InvalidKeyException | InvalidAlgorithmParameterException e) { throw new RuntimeException(e); } } /** * Creates an AES-GCM-256 Cipher that can be used to encrypt arbitrary plaintext. * * The given secret key MUST NOT be used to encrypt more than one plaintext. */ public static Cipher makeAes256GcmEncryptionCipher(SecretSharedKey secretSharedKey) { return makeAes256GcmCipher(secretSharedKey, Cipher.ENCRYPT_MODE); } /** * Creates an AES-GCM-256 Cipher that can be used to decrypt ciphertext that was previously * encrypted with the given secret key. */ public static Cipher makeAes256GcmDecryptionCipher(SecretSharedKey secretSharedKey) { return makeAes256GcmCipher(secretSharedKey, Cipher.DECRYPT_MODE); } }
class SharedKeyGenerator { private static final int AES_GCM_AUTH_TAG_BITS = 128; private static final String AES_GCM_ALGO_SPEC = "AES/GCM/NoPadding"; private static final String ECIES_CIPHER_NAME = "ECIES"; private static final SecureRandom SHARED_CSPRNG = new SecureRandom(); public static SecretSharedKey fromSealedKey(SealedSharedKey sealedKey, PrivateKey receiverPrivateKey) { try { var cipher = Cipher.getInstance(ECIES_CIPHER_NAME, BouncyCastleProviderHolder.getInstance()); cipher.init(Cipher.DECRYPT_MODE, receiverPrivateKey); byte[] secretKey = cipher.doFinal(sealedKey.eciesPayload()); return new SecretSharedKey(new SecretKeySpec(secretKey, "AES"), sealedKey); } catch (BadBlockException e) { throw new IllegalArgumentException("Token integrity check failed; token is either corrupt or was " + "generated for a different public key"); } catch (NoSuchAlgorithmException | InvalidKeyException | NoSuchPaddingException | IllegalBlockSizeException | BadPaddingException e) { throw new RuntimeException(e); } } private static byte[] fixed96BitIvForSingleUseKey() { return new byte[] { 'h', 'e', 'r', 'e', 'B', 'd', 'r', 'a', 'g', 'o', 'n', 's' }; } private static Cipher makeAes256GcmCipher(SecretSharedKey secretSharedKey, int cipherMode) { try { var cipher = Cipher.getInstance(AES_GCM_ALGO_SPEC); var gcmSpec = new GCMParameterSpec(AES_GCM_AUTH_TAG_BITS, fixed96BitIvForSingleUseKey()); cipher.init(cipherMode, secretSharedKey.secretKey(), gcmSpec); return cipher; } catch (NoSuchAlgorithmException | NoSuchPaddingException | InvalidKeyException | InvalidAlgorithmParameterException e) { throw new RuntimeException(e); } } /** * Creates an AES-GCM-256 Cipher that can be used to encrypt arbitrary plaintext. * * The given secret key MUST NOT be used to encrypt more than one plaintext. */ public static Cipher makeAes256GcmEncryptionCipher(SecretSharedKey secretSharedKey) { return makeAes256GcmCipher(secretSharedKey, Cipher.ENCRYPT_MODE); } /** * Creates an AES-GCM-256 Cipher that can be used to decrypt ciphertext that was previously * encrypted with the given secret key. */ public static Cipher makeAes256GcmDecryptionCipher(SecretSharedKey secretSharedKey) { return makeAes256GcmCipher(secretSharedKey, Cipher.DECRYPT_MODE); } }
Fixed; moved to static field since we don't test the generated key material bytes explicitly
public static SecretSharedKey generateForReceiverPublicKey(PublicKey receiverPublicKey, int keyId) { try { var keyGen = KeyGenerator.getInstance("AES"); keyGen.init(256, new SecureRandom()); var secretKey = keyGen.generateKey(); var cipher = Cipher.getInstance(ECIES_CIPHER_NAME, BouncyCastleProviderHolder.getInstance()); cipher.init(Cipher.ENCRYPT_MODE, receiverPublicKey); byte[] eciesPayload = cipher.doFinal(secretKey.getEncoded()); var sealedSharedKey = new SealedSharedKey(keyId, eciesPayload); return new SecretSharedKey(secretKey, sealedSharedKey); } catch (NoSuchAlgorithmException | InvalidKeyException | NoSuchPaddingException | IllegalBlockSizeException | BadPaddingException e) { throw new RuntimeException(e); } }
keyGen.init(256, new SecureRandom());
public static SecretSharedKey generateForReceiverPublicKey(PublicKey receiverPublicKey, int keyId) { try { var keyGen = KeyGenerator.getInstance("AES"); keyGen.init(256, SHARED_CSPRNG); var secretKey = keyGen.generateKey(); var cipher = Cipher.getInstance(ECIES_CIPHER_NAME, BouncyCastleProviderHolder.getInstance()); cipher.init(Cipher.ENCRYPT_MODE, receiverPublicKey); byte[] eciesPayload = cipher.doFinal(secretKey.getEncoded()); var sealedSharedKey = new SealedSharedKey(keyId, eciesPayload); return new SecretSharedKey(secretKey, sealedSharedKey); } catch (NoSuchAlgorithmException | InvalidKeyException | NoSuchPaddingException | IllegalBlockSizeException | BadPaddingException e) { throw new RuntimeException(e); } }
class SharedKeyGenerator { private static final int AES_GCM_AUTH_TAG_BITS = 128; private static final String AES_GCM_ALGO_SPEC = "AES/GCM/NoPadding"; private static final String ECIES_CIPHER_NAME = "ECIES"; public static SecretSharedKey fromSealedKey(SealedSharedKey sealedKey, KeyPair receiverKeyPair) { try { var cipher = Cipher.getInstance(ECIES_CIPHER_NAME, BouncyCastleProviderHolder.getInstance()); cipher.init(Cipher.DECRYPT_MODE, receiverKeyPair.getPrivate()); byte[] secretKey = cipher.doFinal(sealedKey.eciesPayload()); return new SecretSharedKey(new SecretKeySpec(secretKey, "AES"), sealedKey); } catch (BadBlockException e) { throw new IllegalArgumentException("Token integrity check failed; token is either corrupt or was " + "generated for a different public key"); } catch (NoSuchAlgorithmException | InvalidKeyException | NoSuchPaddingException | IllegalBlockSizeException | BadPaddingException e) { throw new RuntimeException(e); } } private static byte[] fixed96BitIvForSingleUseKey() { return new byte[] { 'h', 'e', 'r', 'e', 'B', 'd', 'r', 'a', 'g', 'o', 'n', 's' }; } private static Cipher makeAes256GcmCipher(SecretSharedKey secretSharedKey, int cipherMode) { try { var cipher = Cipher.getInstance(AES_GCM_ALGO_SPEC); var gcmSpec = new GCMParameterSpec(AES_GCM_AUTH_TAG_BITS, fixed96BitIvForSingleUseKey()); cipher.init(cipherMode, secretSharedKey.secretKey(), gcmSpec); return cipher; } catch (NoSuchAlgorithmException | NoSuchPaddingException | InvalidKeyException | InvalidAlgorithmParameterException e) { throw new RuntimeException(e); } } /** * Creates an AES-GCM-256 Cipher that can be used to encrypt arbitrary plaintext. * * The given secret key MUST NOT be used to encrypt more than one plaintext. */ public static Cipher makeAes256GcmEncryptionCipher(SecretSharedKey secretSharedKey) { return makeAes256GcmCipher(secretSharedKey, Cipher.ENCRYPT_MODE); } /** * Creates an AES-GCM-256 Cipher that can be used to decrypt ciphertext that was previously * encrypted with the given secret key. */ public static Cipher makeAes256GcmDecryptionCipher(SecretSharedKey secretSharedKey) { return makeAes256GcmCipher(secretSharedKey, Cipher.DECRYPT_MODE); } }
class SharedKeyGenerator { private static final int AES_GCM_AUTH_TAG_BITS = 128; private static final String AES_GCM_ALGO_SPEC = "AES/GCM/NoPadding"; private static final String ECIES_CIPHER_NAME = "ECIES"; private static final SecureRandom SHARED_CSPRNG = new SecureRandom(); public static SecretSharedKey fromSealedKey(SealedSharedKey sealedKey, PrivateKey receiverPrivateKey) { try { var cipher = Cipher.getInstance(ECIES_CIPHER_NAME, BouncyCastleProviderHolder.getInstance()); cipher.init(Cipher.DECRYPT_MODE, receiverPrivateKey); byte[] secretKey = cipher.doFinal(sealedKey.eciesPayload()); return new SecretSharedKey(new SecretKeySpec(secretKey, "AES"), sealedKey); } catch (BadBlockException e) { throw new IllegalArgumentException("Token integrity check failed; token is either corrupt or was " + "generated for a different public key"); } catch (NoSuchAlgorithmException | InvalidKeyException | NoSuchPaddingException | IllegalBlockSizeException | BadPaddingException e) { throw new RuntimeException(e); } } private static byte[] fixed96BitIvForSingleUseKey() { return new byte[] { 'h', 'e', 'r', 'e', 'B', 'd', 'r', 'a', 'g', 'o', 'n', 's' }; } private static Cipher makeAes256GcmCipher(SecretSharedKey secretSharedKey, int cipherMode) { try { var cipher = Cipher.getInstance(AES_GCM_ALGO_SPEC); var gcmSpec = new GCMParameterSpec(AES_GCM_AUTH_TAG_BITS, fixed96BitIvForSingleUseKey()); cipher.init(cipherMode, secretSharedKey.secretKey(), gcmSpec); return cipher; } catch (NoSuchAlgorithmException | NoSuchPaddingException | InvalidKeyException | InvalidAlgorithmParameterException e) { throw new RuntimeException(e); } } /** * Creates an AES-GCM-256 Cipher that can be used to encrypt arbitrary plaintext. * * The given secret key MUST NOT be used to encrypt more than one plaintext. */ public static Cipher makeAes256GcmEncryptionCipher(SecretSharedKey secretSharedKey) { return makeAes256GcmCipher(secretSharedKey, Cipher.ENCRYPT_MODE); } /** * Creates an AES-GCM-256 Cipher that can be used to decrypt ciphertext that was previously * encrypted with the given secret key. */ public static Cipher makeAes256GcmDecryptionCipher(SecretSharedKey secretSharedKey) { return makeAes256GcmCipher(secretSharedKey, Cipher.DECRYPT_MODE); } }
Urgh, I guess this should preprocess with all configurations that may eventually be deployed.
private void preProcessAndPopulateCache() { FileWrapper servicesXml = files.wrapper().wrap(Paths.get(servicesFile)); if (servicesXml.exists()) try { new XmlPreProcessor(files.wrapper().wrap(Paths.get("./")), new InputStreamReader(new ByteArrayInputStream(servicesXml.content()), UTF_8), InstanceName.defaultName(), Environment.prod, RegionName.defaultName(), Tags.empty()) .run(); } catch (IllegalArgumentException e) { throw e; } catch (Exception e) { throw new IllegalArgumentException(e); } }
Tags.empty())
private void preProcessAndPopulateCache() { FileWrapper servicesXml = files.wrapper().wrap(Paths.get(servicesFile)); if (servicesXml.exists()) try { new XmlPreProcessor(files.wrapper().wrap(Paths.get("./")), new InputStreamReader(new ByteArrayInputStream(servicesXml.content()), UTF_8), InstanceName.defaultName(), Environment.prod, RegionName.defaultName(), Tags.empty()) .run(); } catch (IllegalArgumentException e) { throw e; } catch (Exception e) { throw new IllegalArgumentException(e); } }
class ApplicationPackage { private static final String trustedCertificatesFile = "security/clients.pem"; private static final String buildMetaFile = "build-meta.json"; static final String deploymentFile = "deployment.xml"; private static final String validationOverridesFile = "validation-overrides.xml"; static final String servicesFile = "services.xml"; private final String contentHash; private final String bundleHash; private final byte[] zippedContent; private final DeploymentSpec deploymentSpec; private final ValidationOverrides validationOverrides; private final ZipArchiveCache files; private final Optional<Version> compileVersion; private final Optional<Instant> buildTime; private final Optional<Version> parentVersion; private final List<X509Certificate> trustedCertificates; /** * Creates an application package from its zipped content. * This <b>assigns ownership</b> of the given byte array to this class; * it must not be further changed by the caller. */ public ApplicationPackage(byte[] zippedContent) { this(zippedContent, false); } /** * Creates an application package from its zipped content. * This <b>assigns ownership</b> of the given byte array to this class; * it must not be further changed by the caller. * If 'requireFiles' is true, files needed by deployment orchestration must be present. */ @SuppressWarnings("deprecation") public ApplicationPackage(byte[] zippedContent, boolean requireFiles) { this.zippedContent = Objects.requireNonNull(zippedContent, "The application package content cannot be null"); this.contentHash = Hashing.sha1().hashBytes(zippedContent).toString(); this.files = new ZipArchiveCache(zippedContent, Set.of(deploymentFile, validationOverridesFile, servicesFile, buildMetaFile, trustedCertificatesFile)); Optional<DeploymentSpec> deploymentSpec = files.get(deploymentFile).map(bytes -> new String(bytes, UTF_8)).map(DeploymentSpec::fromXml); if (requireFiles && deploymentSpec.isEmpty()) throw new IllegalArgumentException("Missing required file '" + deploymentFile + "'"); this.deploymentSpec = deploymentSpec.orElse(DeploymentSpec.empty); this.validationOverrides = files.get(validationOverridesFile).map(bytes -> new String(bytes, UTF_8)).map(ValidationOverrides::fromXml).orElse(ValidationOverrides.empty); Optional<Inspector> buildMetaObject = files.get(buildMetaFile).map(SlimeUtils::jsonToSlime).map(Slime::get); this.compileVersion = buildMetaObject.flatMap(object -> parse(object, "compileVersion", field -> Version.fromString(field.asString()))); this.buildTime = buildMetaObject.flatMap(object -> parse(object, "buildTime", field -> Instant.ofEpochMilli(field.asLong()))); this.parentVersion = buildMetaObject.flatMap(object -> parse(object, "parentVersion", field -> Version.fromString(field.asString()))); this.trustedCertificates = files.get(trustedCertificatesFile).map(bytes -> X509CertificateUtils.certificateListFromPem(new String(bytes, UTF_8))).orElse(List.of()); this.bundleHash = calculateBundleHash(zippedContent); preProcessAndPopulateCache(); } /** Returns a copy of this with the given certificate appended. */ public ApplicationPackage withTrustedCertificate(X509Certificate certificate) { List<X509Certificate> trustedCertificates = new ArrayList<>(this.trustedCertificates); trustedCertificates.add(certificate); byte[] certificatesBytes = X509CertificateUtils.toPem(trustedCertificates).getBytes(UTF_8); ByteArrayOutputStream modified = new ByteArrayOutputStream(zippedContent.length + certificatesBytes.length); ZipEntries.transferAndWrite(modified, new ByteArrayInputStream(zippedContent), trustedCertificatesFile, certificatesBytes); return new ApplicationPackage(modified.toByteArray()); } /** Returns a hash of the content of this package */ public String hash() { return contentHash; } /** Hash of all files and settings that influence what is deployed to config servers. */ public String bundleHash() { return bundleHash; } /** Returns the content of this package. The content <b>must not</b> be modified. */ public byte[] zippedContent() { return zippedContent; } /** * Returns the deployment spec from the deployment.xml file of the package content.<br> * This is the DeploymentSpec.empty instance if this package does not contain a deployment.xml file.<br> * <em>NB: <strong>Always</strong> read deployment spec from the {@link Application}, for deployment orchestration.</em> */ public DeploymentSpec deploymentSpec() { return deploymentSpec; } /** * Returns the validation overrides from the validation-overrides.xml file of the package content. * This is the ValidationOverrides.empty instance if this package does not contain a validation-overrides.xml file. */ public ValidationOverrides validationOverrides() { return validationOverrides; } /** Returns the platform version which package was compiled against, if known. */ public Optional<Version> compileVersion() { return compileVersion; } /** Returns the time this package was built, if known. */ public Optional<Instant> buildTime() { return buildTime; } /** Returns the parent version used to compile the package, if known. */ public Optional<Version> parentVersion() { return parentVersion; } /** Returns the list of certificates trusted by this application, or an empty list if no trust configured. */ public List<X509Certificate> trustedCertificates() { return trustedCertificates; } private static <Type> Optional<Type> parse(Inspector buildMetaObject, String fieldName, Function<Inspector, Type> mapper) { Inspector field = buildMetaObject.field(fieldName); if ( ! field.valid() || field.type() == NIX) return Optional.empty(); try { return Optional.of(mapper.apply(buildMetaObject.field(fieldName))); } catch (RuntimeException e) { throw new IllegalArgumentException("Failed parsing \"" + fieldName + "\" in '" + buildMetaFile + "': " + Exceptions.toMessageString(e)); } } /** Creates a valid application package that will remove all application's deployments */ public static ApplicationPackage deploymentRemoval() { return new ApplicationPackage(filesZip(Map.of(validationOverridesFile, allValidationOverrides().xmlForm().getBytes(UTF_8), deploymentFile, DeploymentSpec.empty.xmlForm().getBytes(UTF_8)))); } /** Returns a zip containing meta data about deployments of this package by the given job. */ public byte[] metaDataZip() { return cacheZip(); } private byte[] cacheZip() { return filesZip(files.cache.entrySet().stream() .filter(entry -> entry.getValue().isPresent()) .collect(toMap(entry -> entry.getKey().toString(), entry -> entry.getValue().get()))); } public static byte[] filesZip(Map<String, byte[]> files) { try (ZipBuilder zipBuilder = new ZipBuilder(files.values().stream().mapToInt(bytes -> bytes.length).sum() + 512)) { files.forEach(zipBuilder::add); zipBuilder.close(); return zipBuilder.toByteArray(); } } private static ValidationOverrides allValidationOverrides() { String until = DateTimeFormatter.ISO_LOCAL_DATE.format(Instant.now().plus(Duration.ofDays(25)).atZone(ZoneOffset.UTC)); StringBuilder validationOverridesContents = new StringBuilder(1000); validationOverridesContents.append("<validation-overrides version=\"1.0\">\n"); for (ValidationId validationId: ValidationId.values()) validationOverridesContents.append("\t<allow until=\"").append(until).append("\">").append(validationId.value()).append("</allow>\n"); validationOverridesContents.append("</validation-overrides>\n"); return ValidationOverrides.fromXml(validationOverridesContents.toString()); } @SuppressWarnings("deprecation") private String calculateBundleHash(byte[] zippedContent) { Predicate<String> entryMatcher = name -> ! name.endsWith(deploymentFile) && ! name.endsWith(buildMetaFile); SortedMap<String, Long> crcByEntry = new TreeMap<>(); Options options = Options.standard().pathPredicate(entryMatcher); ArchiveFile file; try (ArchiveStreamReader reader = ArchiveStreamReader.ofZip(new ByteArrayInputStream(zippedContent), options)) { OutputStream discard = OutputStream.nullOutputStream(); while ((file = reader.readNextTo(discard)) != null) { crcByEntry.put(file.path().toString(), file.crc32().orElse(-1)); } } Funnel<SortedMap<String, Long>> funnel = (from, into) -> from.forEach((key, value) -> { into.putBytes(key.getBytes()); into.putLong(value); }); return Hashing.sha1().newHasher() .putObject(crcByEntry, funnel) .putInt(deploymentSpec.deployableHashCode()) .hash().toString(); } @SuppressWarnings("deprecation") public static String calculateHash(byte[] bytes) { return Hashing.sha1().newHasher() .putBytes(bytes) .hash().toString(); } /** Maps normalized paths to cached content read from a zip archive. */ private static class ZipArchiveCache { /** Max size of each extracted file */ private static final int maxSize = 10 << 20; private final byte[] zip; private final Map<Path, Optional<byte[]>> cache; public ZipArchiveCache(byte[] zip, Collection<String> prePopulated) { this.zip = zip; this.cache = new ConcurrentSkipListMap<>(); this.cache.putAll(read(prePopulated)); } public Optional<byte[]> get(String path) { return get(Paths.get(path)); } public Optional<byte[]> get(Path path) { return cache.computeIfAbsent(path.normalize(), read(List.of(path.normalize().toString()))::get); } public FileSystemWrapper wrapper() { return FileSystemWrapper.ofFiles(Path.of("./"), path -> get(path).isPresent(), path -> get(path).orElseThrow(() -> new NoSuchFileException(path.toString()))); } private Map<Path, Optional<byte[]>> read(Collection<String> names) { var entries = ZipEntries.from(zip, name -> names.contains(name), maxSize, true) .asList().stream() .collect(toMap(entry -> Paths.get(entry.name()).normalize(), ZipEntries.ZipEntryWithContent::content)); names.stream().map(Paths::get).forEach(path -> entries.putIfAbsent(path.normalize(), Optional.empty())); return entries; } } }
class ApplicationPackage { private static final String trustedCertificatesFile = "security/clients.pem"; private static final String buildMetaFile = "build-meta.json"; static final String deploymentFile = "deployment.xml"; private static final String validationOverridesFile = "validation-overrides.xml"; static final String servicesFile = "services.xml"; private final String contentHash; private final String bundleHash; private final byte[] zippedContent; private final DeploymentSpec deploymentSpec; private final ValidationOverrides validationOverrides; private final ZipArchiveCache files; private final Optional<Version> compileVersion; private final Optional<Instant> buildTime; private final Optional<Version> parentVersion; private final List<X509Certificate> trustedCertificates; /** * Creates an application package from its zipped content. * This <b>assigns ownership</b> of the given byte array to this class; * it must not be further changed by the caller. */ public ApplicationPackage(byte[] zippedContent) { this(zippedContent, false); } /** * Creates an application package from its zipped content. * This <b>assigns ownership</b> of the given byte array to this class; * it must not be further changed by the caller. * If 'requireFiles' is true, files needed by deployment orchestration must be present. */ @SuppressWarnings("deprecation") public ApplicationPackage(byte[] zippedContent, boolean requireFiles) { this.zippedContent = Objects.requireNonNull(zippedContent, "The application package content cannot be null"); this.contentHash = Hashing.sha1().hashBytes(zippedContent).toString(); this.files = new ZipArchiveCache(zippedContent, Set.of(deploymentFile, validationOverridesFile, servicesFile, buildMetaFile, trustedCertificatesFile)); Optional<DeploymentSpec> deploymentSpec = files.get(deploymentFile).map(bytes -> new String(bytes, UTF_8)).map(DeploymentSpec::fromXml); if (requireFiles && deploymentSpec.isEmpty()) throw new IllegalArgumentException("Missing required file '" + deploymentFile + "'"); this.deploymentSpec = deploymentSpec.orElse(DeploymentSpec.empty); this.validationOverrides = files.get(validationOverridesFile).map(bytes -> new String(bytes, UTF_8)).map(ValidationOverrides::fromXml).orElse(ValidationOverrides.empty); Optional<Inspector> buildMetaObject = files.get(buildMetaFile).map(SlimeUtils::jsonToSlime).map(Slime::get); this.compileVersion = buildMetaObject.flatMap(object -> parse(object, "compileVersion", field -> Version.fromString(field.asString()))); this.buildTime = buildMetaObject.flatMap(object -> parse(object, "buildTime", field -> Instant.ofEpochMilli(field.asLong()))); this.parentVersion = buildMetaObject.flatMap(object -> parse(object, "parentVersion", field -> Version.fromString(field.asString()))); this.trustedCertificates = files.get(trustedCertificatesFile).map(bytes -> X509CertificateUtils.certificateListFromPem(new String(bytes, UTF_8))).orElse(List.of()); this.bundleHash = calculateBundleHash(zippedContent); preProcessAndPopulateCache(); } /** Returns a copy of this with the given certificate appended. */ public ApplicationPackage withTrustedCertificate(X509Certificate certificate) { List<X509Certificate> trustedCertificates = new ArrayList<>(this.trustedCertificates); trustedCertificates.add(certificate); byte[] certificatesBytes = X509CertificateUtils.toPem(trustedCertificates).getBytes(UTF_8); ByteArrayOutputStream modified = new ByteArrayOutputStream(zippedContent.length + certificatesBytes.length); ZipEntries.transferAndWrite(modified, new ByteArrayInputStream(zippedContent), trustedCertificatesFile, certificatesBytes); return new ApplicationPackage(modified.toByteArray()); } /** Returns a hash of the content of this package */ public String hash() { return contentHash; } /** Hash of all files and settings that influence what is deployed to config servers. */ public String bundleHash() { return bundleHash; } /** Returns the content of this package. The content <b>must not</b> be modified. */ public byte[] zippedContent() { return zippedContent; } /** * Returns the deployment spec from the deployment.xml file of the package content.<br> * This is the DeploymentSpec.empty instance if this package does not contain a deployment.xml file.<br> * <em>NB: <strong>Always</strong> read deployment spec from the {@link Application}, for deployment orchestration.</em> */ public DeploymentSpec deploymentSpec() { return deploymentSpec; } /** * Returns the validation overrides from the validation-overrides.xml file of the package content. * This is the ValidationOverrides.empty instance if this package does not contain a validation-overrides.xml file. */ public ValidationOverrides validationOverrides() { return validationOverrides; } /** Returns the platform version which package was compiled against, if known. */ public Optional<Version> compileVersion() { return compileVersion; } /** Returns the time this package was built, if known. */ public Optional<Instant> buildTime() { return buildTime; } /** Returns the parent version used to compile the package, if known. */ public Optional<Version> parentVersion() { return parentVersion; } /** Returns the list of certificates trusted by this application, or an empty list if no trust configured. */ public List<X509Certificate> trustedCertificates() { return trustedCertificates; } private static <Type> Optional<Type> parse(Inspector buildMetaObject, String fieldName, Function<Inspector, Type> mapper) { Inspector field = buildMetaObject.field(fieldName); if ( ! field.valid() || field.type() == NIX) return Optional.empty(); try { return Optional.of(mapper.apply(buildMetaObject.field(fieldName))); } catch (RuntimeException e) { throw new IllegalArgumentException("Failed parsing \"" + fieldName + "\" in '" + buildMetaFile + "': " + Exceptions.toMessageString(e)); } } /** Creates a valid application package that will remove all application's deployments */ public static ApplicationPackage deploymentRemoval() { return new ApplicationPackage(filesZip(Map.of(validationOverridesFile, allValidationOverrides().xmlForm().getBytes(UTF_8), deploymentFile, DeploymentSpec.empty.xmlForm().getBytes(UTF_8)))); } /** Returns a zip containing meta data about deployments of this package by the given job. */ public byte[] metaDataZip() { return cacheZip(); } private byte[] cacheZip() { return filesZip(files.cache.entrySet().stream() .filter(entry -> entry.getValue().isPresent()) .collect(toMap(entry -> entry.getKey().toString(), entry -> entry.getValue().get()))); } public static byte[] filesZip(Map<String, byte[]> files) { try (ZipBuilder zipBuilder = new ZipBuilder(files.values().stream().mapToInt(bytes -> bytes.length).sum() + 512)) { files.forEach(zipBuilder::add); zipBuilder.close(); return zipBuilder.toByteArray(); } } private static ValidationOverrides allValidationOverrides() { String until = DateTimeFormatter.ISO_LOCAL_DATE.format(Instant.now().plus(Duration.ofDays(25)).atZone(ZoneOffset.UTC)); StringBuilder validationOverridesContents = new StringBuilder(1000); validationOverridesContents.append("<validation-overrides version=\"1.0\">\n"); for (ValidationId validationId: ValidationId.values()) validationOverridesContents.append("\t<allow until=\"").append(until).append("\">").append(validationId.value()).append("</allow>\n"); validationOverridesContents.append("</validation-overrides>\n"); return ValidationOverrides.fromXml(validationOverridesContents.toString()); } @SuppressWarnings("deprecation") private String calculateBundleHash(byte[] zippedContent) { Predicate<String> entryMatcher = name -> ! name.endsWith(deploymentFile) && ! name.endsWith(buildMetaFile); SortedMap<String, Long> crcByEntry = new TreeMap<>(); Options options = Options.standard().pathPredicate(entryMatcher); ArchiveFile file; try (ArchiveStreamReader reader = ArchiveStreamReader.ofZip(new ByteArrayInputStream(zippedContent), options)) { OutputStream discard = OutputStream.nullOutputStream(); while ((file = reader.readNextTo(discard)) != null) { crcByEntry.put(file.path().toString(), file.crc32().orElse(-1)); } } Funnel<SortedMap<String, Long>> funnel = (from, into) -> from.forEach((key, value) -> { into.putBytes(key.getBytes()); into.putLong(value); }); return Hashing.sha1().newHasher() .putObject(crcByEntry, funnel) .putInt(deploymentSpec.deployableHashCode()) .hash().toString(); } @SuppressWarnings("deprecation") public static String calculateHash(byte[] bytes) { return Hashing.sha1().newHasher() .putBytes(bytes) .hash().toString(); } /** Maps normalized paths to cached content read from a zip archive. */ private static class ZipArchiveCache { /** Max size of each extracted file */ private static final int maxSize = 10 << 20; private final byte[] zip; private final Map<Path, Optional<byte[]>> cache; public ZipArchiveCache(byte[] zip, Collection<String> prePopulated) { this.zip = zip; this.cache = new ConcurrentSkipListMap<>(); this.cache.putAll(read(prePopulated)); } public Optional<byte[]> get(String path) { return get(Paths.get(path)); } public Optional<byte[]> get(Path path) { return cache.computeIfAbsent(path.normalize(), read(List.of(path.normalize().toString()))::get); } public FileSystemWrapper wrapper() { return FileSystemWrapper.ofFiles(Path.of("./"), path -> get(path).isPresent(), path -> get(path).orElseThrow(() -> new NoSuchFileException(path.toString()))); } private Map<Path, Optional<byte[]>> read(Collection<String> names) { var entries = ZipEntries.from(zip, name -> names.contains(name), maxSize, true) .asList().stream() .collect(toMap(entry -> Paths.get(entry.name()).normalize(), ZipEntries.ZipEntryWithContent::content)); names.stream().map(Paths::get).forEach(path -> entries.putIfAbsent(path.normalize(), Optional.empty())); return entries; } } }