comment
stringlengths
1
45k
method_body
stringlengths
23
281k
target_code
stringlengths
0
5.16k
method_body_after
stringlengths
12
281k
context_before
stringlengths
8
543k
context_after
stringlengths
8
543k
Doc was in agreement with this, but not any more.
public ApplicationList allowMajorVersion(int targetMajorVersion, int defaultMajorVersion) { return listOf(list.stream().filter(a -> a.majorVersion().orElse(a.deploymentSpec().majorVersion().orElse(defaultMajorVersion)) >= targetMajorVersion)); }
>= targetMajorVersion));
public ApplicationList allowMajorVersion(int targetMajorVersion, int defaultMajorVersion) { return listOf(list.stream().filter(a -> a.majorVersion().orElse(a.deploymentSpec().majorVersion().orElse(defaultMajorVersion)) >= targetMajorVersion)); }
class ApplicationList { private final ImmutableList<Application> list; private ApplicationList(Iterable<Application> applications) { this.list = ImmutableList.copyOf(applications); } public static ApplicationList from(Iterable<Application> applications) { return new ApplicationList(applications); } public static ApplicationList from(Collection<ApplicationId> ids, ApplicationController applications) { return listOf(ids.stream().map(applications::require)); } /** Returns the applications in this as an immutable list */ public List<Application> asList() { return list; } /** Returns the ids of the applications in this as an immutable list */ public List<ApplicationId> idList() { return ImmutableList.copyOf(list.stream().map(Application::id)::iterator); } public boolean isEmpty() { return list.isEmpty(); } public int size() { return list.size(); } /** Returns the subset of applications which are upgrading (to any version), not considering block windows. */ public ApplicationList upgrading() { return listOf(list.stream().filter(application -> application.change().platform().isPresent())); } /** Returns the subset of applications which are currently upgrading to the given version */ public ApplicationList upgradingTo(Version version) { return listOf(list.stream().filter(application -> isUpgradingTo(version, application))); } /** Returns the subset of applications which are not pinned to a certain Vespa version. */ public ApplicationList notPinned() { return listOf(list.stream().filter(application -> ! application.change().isPinned())); } /** Returns the subset of applications which are currently not upgrading to the given version */ public ApplicationList notUpgradingTo(Version version) { return listOf(list.stream().filter(application -> ! isUpgradingTo(version, application))); } /** * Returns the subset of applications which are currently not upgrading to the given version, * or returns all if no version is specified */ public ApplicationList notUpgradingTo(Optional<Version> version) { if ( ! version.isPresent()) return this; return notUpgradingTo(version.get()); } /** Returns the subset of applications which have changes left to deploy; blocked, or deploying */ public ApplicationList withChanges() { return listOf(list.stream().filter(application -> application.change().isPresent() || application.outstandingChange().isPresent())); } /** Returns the subset of applications which are currently not deploying a change */ public ApplicationList notDeploying() { return listOf(list.stream().filter(application -> ! application.change().isPresent())); } /** Returns the subset of applications which currently does not have any failing jobs */ public ApplicationList notFailing() { return listOf(list.stream().filter(application -> ! application.deploymentJobs().hasFailures())); } /** Returns the subset of applications which currently have failing jobs */ public ApplicationList failing() { return listOf(list.stream().filter(application -> application.deploymentJobs().hasFailures())); } /** Returns the subset of applications which have been failing an upgrade to the given version since the given instant */ public ApplicationList failingUpgradeToVersionSince(Version version, Instant threshold) { return listOf(list.stream().filter(application -> failingUpgradeToVersionSince(application, version, threshold))); } /** Returns the subset of applications which have been failing an application change since the given instant */ public ApplicationList failingApplicationChangeSince(Instant threshold) { return listOf(list.stream().filter(application -> failingApplicationChangeSince(application, threshold))); } /** Returns the subset of applications which currently does not have any failing jobs on the given version */ public ApplicationList notFailingOn(Version version) { return listOf(list.stream().filter(application -> ! failingOn(version, application))); } /** Returns the subset of applications which have at least one production deployment */ public ApplicationList hasDeployment() { return listOf(list.stream().filter(a -> !a.productionDeployments().isEmpty())); } /** Returns the subset of applications which started failing on the given version */ public ApplicationList startedFailingOn(Version version) { return listOf(list.stream().filter(application -> ! JobList.from(application).firstFailing().on(version).isEmpty())); } /** Returns the subset of applications which has the given upgrade policy */ public ApplicationList with(UpgradePolicy policy) { return listOf(list.stream().filter(a -> a.deploymentSpec().upgradePolicy() == policy)); } /** Returns the subset of applications which does not have the given upgrade policy */ public ApplicationList without(UpgradePolicy policy) { return listOf(list.stream().filter(a -> a.deploymentSpec().upgradePolicy() != policy)); } /** Returns the subset of applications which have at least one deployment on a lower version than the given one */ public ApplicationList onLowerVersionThan(Version version) { return listOf(list.stream() .filter(a -> a.productionDeployments().values().stream() .anyMatch(d -> d.version().isBefore(version)))); } /** Returns the subset of applications which have a project ID */ public ApplicationList withProjectId() { return listOf(list.stream().filter(a -> a.deploymentJobs().projectId().isPresent())); } /** Returns the subset of applications which have at least one production deployment */ public ApplicationList hasProductionDeployment() { return listOf(list.stream().filter(a -> ! a.productionDeployments().isEmpty())); } /** Returns the subset of applications that are allowed to upgrade at the given time */ public ApplicationList canUpgradeAt(Instant instant) { return listOf(list.stream().filter(a -> a.deploymentSpec().canUpgradeAt(instant))); } /** * Returns the subset of applications that hasn't pinned to another major version than the given one. * * @param targetMajorVersion the target major version which applications returned allows upgrading to * @param defaultMajorVersion the default major version to assume for applications not specifying one */ /** Returns the first n application in this (or all, if there are less than n). */ public ApplicationList first(int n) { if (list.size() < n) return this; return new ApplicationList(list.subList(0, n)); } /** * Returns this list sorted by increasing deployed version. * If multiple versions are deployed the oldest is used. * Applications without any deployments are ordered first. */ public ApplicationList byIncreasingDeployedVersion() { return listOf(list.stream().sorted(Comparator.comparing(application -> application.oldestDeployedPlatform().orElse(Version.emptyVersion)))); } private static boolean isUpgradingTo(Version version, Application application) { return application.change().platform().equals(Optional.of(version)); } private static boolean failingOn(Version version, Application application) { return ! JobList.from(application) .failing() .lastCompleted().on(version) .isEmpty(); } private static boolean failingUpgradeToVersionSince(Application application, Version version, Instant threshold) { return ! JobList.from(application) .not().failingApplicationChange() .firstFailing().before(threshold) .lastCompleted().on(version) .isEmpty(); } private static boolean failingApplicationChangeSince(Application application, Instant threshold) { return ! JobList.from(application) .failingApplicationChange() .firstFailing().before(threshold) .isEmpty(); } /** Convenience converter from a stream to an ApplicationList */ private static ApplicationList listOf(Stream<Application> applications) { return from(applications::iterator); } }
class ApplicationList { private final ImmutableList<Application> list; private ApplicationList(Iterable<Application> applications) { this.list = ImmutableList.copyOf(applications); } public static ApplicationList from(Iterable<Application> applications) { return new ApplicationList(applications); } public static ApplicationList from(Collection<ApplicationId> ids, ApplicationController applications) { return listOf(ids.stream().map(applications::require)); } /** Returns the applications in this as an immutable list */ public List<Application> asList() { return list; } /** Returns the ids of the applications in this as an immutable list */ public List<ApplicationId> idList() { return ImmutableList.copyOf(list.stream().map(Application::id)::iterator); } public boolean isEmpty() { return list.isEmpty(); } public int size() { return list.size(); } /** Returns the subset of applications which are upgrading (to any version), not considering block windows. */ public ApplicationList upgrading() { return listOf(list.stream().filter(application -> application.change().platform().isPresent())); } /** Returns the subset of applications which are currently upgrading to the given version */ public ApplicationList upgradingTo(Version version) { return listOf(list.stream().filter(application -> isUpgradingTo(version, application))); } /** Returns the subset of applications which are not pinned to a certain Vespa version. */ public ApplicationList notPinned() { return listOf(list.stream().filter(application -> ! application.change().isPinned())); } /** Returns the subset of applications which are currently not upgrading to the given version */ public ApplicationList notUpgradingTo(Version version) { return listOf(list.stream().filter(application -> ! isUpgradingTo(version, application))); } /** * Returns the subset of applications which are currently not upgrading to the given version, * or returns all if no version is specified */ public ApplicationList notUpgradingTo(Optional<Version> version) { if ( ! version.isPresent()) return this; return notUpgradingTo(version.get()); } /** Returns the subset of applications which have changes left to deploy; blocked, or deploying */ public ApplicationList withChanges() { return listOf(list.stream().filter(application -> application.change().isPresent() || application.outstandingChange().isPresent())); } /** Returns the subset of applications which are currently not deploying a change */ public ApplicationList notDeploying() { return listOf(list.stream().filter(application -> ! application.change().isPresent())); } /** Returns the subset of applications which currently does not have any failing jobs */ public ApplicationList notFailing() { return listOf(list.stream().filter(application -> ! application.deploymentJobs().hasFailures())); } /** Returns the subset of applications which currently have failing jobs */ public ApplicationList failing() { return listOf(list.stream().filter(application -> application.deploymentJobs().hasFailures())); } /** Returns the subset of applications which have been failing an upgrade to the given version since the given instant */ public ApplicationList failingUpgradeToVersionSince(Version version, Instant threshold) { return listOf(list.stream().filter(application -> failingUpgradeToVersionSince(application, version, threshold))); } /** Returns the subset of applications which have been failing an application change since the given instant */ public ApplicationList failingApplicationChangeSince(Instant threshold) { return listOf(list.stream().filter(application -> failingApplicationChangeSince(application, threshold))); } /** Returns the subset of applications which currently does not have any failing jobs on the given version */ public ApplicationList notFailingOn(Version version) { return listOf(list.stream().filter(application -> ! failingOn(version, application))); } /** Returns the subset of applications which have at least one production deployment */ public ApplicationList hasDeployment() { return listOf(list.stream().filter(a -> !a.productionDeployments().isEmpty())); } /** Returns the subset of applications which started failing on the given version */ public ApplicationList startedFailingOn(Version version) { return listOf(list.stream().filter(application -> ! JobList.from(application).firstFailing().on(version).isEmpty())); } /** Returns the subset of applications which has the given upgrade policy */ public ApplicationList with(UpgradePolicy policy) { return listOf(list.stream().filter(a -> a.deploymentSpec().upgradePolicy() == policy)); } /** Returns the subset of applications which does not have the given upgrade policy */ public ApplicationList without(UpgradePolicy policy) { return listOf(list.stream().filter(a -> a.deploymentSpec().upgradePolicy() != policy)); } /** Returns the subset of applications which have at least one deployment on a lower version than the given one */ public ApplicationList onLowerVersionThan(Version version) { return listOf(list.stream() .filter(a -> a.productionDeployments().values().stream() .anyMatch(d -> d.version().isBefore(version)))); } /** Returns the subset of applications which have a project ID */ public ApplicationList withProjectId() { return listOf(list.stream().filter(a -> a.deploymentJobs().projectId().isPresent())); } /** Returns the subset of applications which have at least one production deployment */ public ApplicationList hasProductionDeployment() { return listOf(list.stream().filter(a -> ! a.productionDeployments().isEmpty())); } /** Returns the subset of applications that are allowed to upgrade at the given time */ public ApplicationList canUpgradeAt(Instant instant) { return listOf(list.stream().filter(a -> a.deploymentSpec().canUpgradeAt(instant))); } /** * Returns the subset of applications that hasn't pinned to another major version than the given one. * * @param targetMajorVersion the target major version which applications returned allows upgrading to * @param defaultMajorVersion the default major version to assume for applications not specifying one */ /** Returns the first n application in this (or all, if there are less than n). */ public ApplicationList first(int n) { if (list.size() < n) return this; return new ApplicationList(list.subList(0, n)); } /** * Returns this list sorted by increasing deployed version. * If multiple versions are deployed the oldest is used. * Applications without any deployments are ordered first. */ public ApplicationList byIncreasingDeployedVersion() { return listOf(list.stream().sorted(Comparator.comparing(application -> application.oldestDeployedPlatform().orElse(Version.emptyVersion)))); } private static boolean isUpgradingTo(Version version, Application application) { return application.change().platform().equals(Optional.of(version)); } private static boolean failingOn(Version version, Application application) { return ! JobList.from(application) .failing() .lastCompleted().on(version) .isEmpty(); } private static boolean failingUpgradeToVersionSince(Application application, Version version, Instant threshold) { return ! JobList.from(application) .not().failingApplicationChange() .firstFailing().before(threshold) .lastCompleted().on(version) .isEmpty(); } private static boolean failingApplicationChangeSince(Application application, Instant threshold) { return ! JobList.from(application) .failingApplicationChange() .firstFailing().before(threshold) .isEmpty(); } /** Convenience converter from a stream to an ApplicationList */ private static ApplicationList listOf(Stream<Application> applications) { return from(applications::iterator); } }
I think it's still correct
public ApplicationList allowMajorVersion(int targetMajorVersion, int defaultMajorVersion) { return listOf(list.stream().filter(a -> a.majorVersion().orElse(a.deploymentSpec().majorVersion().orElse(defaultMajorVersion)) >= targetMajorVersion)); }
>= targetMajorVersion));
public ApplicationList allowMajorVersion(int targetMajorVersion, int defaultMajorVersion) { return listOf(list.stream().filter(a -> a.majorVersion().orElse(a.deploymentSpec().majorVersion().orElse(defaultMajorVersion)) >= targetMajorVersion)); }
class ApplicationList { private final ImmutableList<Application> list; private ApplicationList(Iterable<Application> applications) { this.list = ImmutableList.copyOf(applications); } public static ApplicationList from(Iterable<Application> applications) { return new ApplicationList(applications); } public static ApplicationList from(Collection<ApplicationId> ids, ApplicationController applications) { return listOf(ids.stream().map(applications::require)); } /** Returns the applications in this as an immutable list */ public List<Application> asList() { return list; } /** Returns the ids of the applications in this as an immutable list */ public List<ApplicationId> idList() { return ImmutableList.copyOf(list.stream().map(Application::id)::iterator); } public boolean isEmpty() { return list.isEmpty(); } public int size() { return list.size(); } /** Returns the subset of applications which are upgrading (to any version), not considering block windows. */ public ApplicationList upgrading() { return listOf(list.stream().filter(application -> application.change().platform().isPresent())); } /** Returns the subset of applications which are currently upgrading to the given version */ public ApplicationList upgradingTo(Version version) { return listOf(list.stream().filter(application -> isUpgradingTo(version, application))); } /** Returns the subset of applications which are not pinned to a certain Vespa version. */ public ApplicationList notPinned() { return listOf(list.stream().filter(application -> ! application.change().isPinned())); } /** Returns the subset of applications which are currently not upgrading to the given version */ public ApplicationList notUpgradingTo(Version version) { return listOf(list.stream().filter(application -> ! isUpgradingTo(version, application))); } /** * Returns the subset of applications which are currently not upgrading to the given version, * or returns all if no version is specified */ public ApplicationList notUpgradingTo(Optional<Version> version) { if ( ! version.isPresent()) return this; return notUpgradingTo(version.get()); } /** Returns the subset of applications which have changes left to deploy; blocked, or deploying */ public ApplicationList withChanges() { return listOf(list.stream().filter(application -> application.change().isPresent() || application.outstandingChange().isPresent())); } /** Returns the subset of applications which are currently not deploying a change */ public ApplicationList notDeploying() { return listOf(list.stream().filter(application -> ! application.change().isPresent())); } /** Returns the subset of applications which currently does not have any failing jobs */ public ApplicationList notFailing() { return listOf(list.stream().filter(application -> ! application.deploymentJobs().hasFailures())); } /** Returns the subset of applications which currently have failing jobs */ public ApplicationList failing() { return listOf(list.stream().filter(application -> application.deploymentJobs().hasFailures())); } /** Returns the subset of applications which have been failing an upgrade to the given version since the given instant */ public ApplicationList failingUpgradeToVersionSince(Version version, Instant threshold) { return listOf(list.stream().filter(application -> failingUpgradeToVersionSince(application, version, threshold))); } /** Returns the subset of applications which have been failing an application change since the given instant */ public ApplicationList failingApplicationChangeSince(Instant threshold) { return listOf(list.stream().filter(application -> failingApplicationChangeSince(application, threshold))); } /** Returns the subset of applications which currently does not have any failing jobs on the given version */ public ApplicationList notFailingOn(Version version) { return listOf(list.stream().filter(application -> ! failingOn(version, application))); } /** Returns the subset of applications which have at least one production deployment */ public ApplicationList hasDeployment() { return listOf(list.stream().filter(a -> !a.productionDeployments().isEmpty())); } /** Returns the subset of applications which started failing on the given version */ public ApplicationList startedFailingOn(Version version) { return listOf(list.stream().filter(application -> ! JobList.from(application).firstFailing().on(version).isEmpty())); } /** Returns the subset of applications which has the given upgrade policy */ public ApplicationList with(UpgradePolicy policy) { return listOf(list.stream().filter(a -> a.deploymentSpec().upgradePolicy() == policy)); } /** Returns the subset of applications which does not have the given upgrade policy */ public ApplicationList without(UpgradePolicy policy) { return listOf(list.stream().filter(a -> a.deploymentSpec().upgradePolicy() != policy)); } /** Returns the subset of applications which have at least one deployment on a lower version than the given one */ public ApplicationList onLowerVersionThan(Version version) { return listOf(list.stream() .filter(a -> a.productionDeployments().values().stream() .anyMatch(d -> d.version().isBefore(version)))); } /** Returns the subset of applications which have a project ID */ public ApplicationList withProjectId() { return listOf(list.stream().filter(a -> a.deploymentJobs().projectId().isPresent())); } /** Returns the subset of applications which have at least one production deployment */ public ApplicationList hasProductionDeployment() { return listOf(list.stream().filter(a -> ! a.productionDeployments().isEmpty())); } /** Returns the subset of applications that are allowed to upgrade at the given time */ public ApplicationList canUpgradeAt(Instant instant) { return listOf(list.stream().filter(a -> a.deploymentSpec().canUpgradeAt(instant))); } /** * Returns the subset of applications that hasn't pinned to another major version than the given one. * * @param targetMajorVersion the target major version which applications returned allows upgrading to * @param defaultMajorVersion the default major version to assume for applications not specifying one */ /** Returns the first n application in this (or all, if there are less than n). */ public ApplicationList first(int n) { if (list.size() < n) return this; return new ApplicationList(list.subList(0, n)); } /** * Returns this list sorted by increasing deployed version. * If multiple versions are deployed the oldest is used. * Applications without any deployments are ordered first. */ public ApplicationList byIncreasingDeployedVersion() { return listOf(list.stream().sorted(Comparator.comparing(application -> application.oldestDeployedPlatform().orElse(Version.emptyVersion)))); } private static boolean isUpgradingTo(Version version, Application application) { return application.change().platform().equals(Optional.of(version)); } private static boolean failingOn(Version version, Application application) { return ! JobList.from(application) .failing() .lastCompleted().on(version) .isEmpty(); } private static boolean failingUpgradeToVersionSince(Application application, Version version, Instant threshold) { return ! JobList.from(application) .not().failingApplicationChange() .firstFailing().before(threshold) .lastCompleted().on(version) .isEmpty(); } private static boolean failingApplicationChangeSince(Application application, Instant threshold) { return ! JobList.from(application) .failingApplicationChange() .firstFailing().before(threshold) .isEmpty(); } /** Convenience converter from a stream to an ApplicationList */ private static ApplicationList listOf(Stream<Application> applications) { return from(applications::iterator); } }
class ApplicationList { private final ImmutableList<Application> list; private ApplicationList(Iterable<Application> applications) { this.list = ImmutableList.copyOf(applications); } public static ApplicationList from(Iterable<Application> applications) { return new ApplicationList(applications); } public static ApplicationList from(Collection<ApplicationId> ids, ApplicationController applications) { return listOf(ids.stream().map(applications::require)); } /** Returns the applications in this as an immutable list */ public List<Application> asList() { return list; } /** Returns the ids of the applications in this as an immutable list */ public List<ApplicationId> idList() { return ImmutableList.copyOf(list.stream().map(Application::id)::iterator); } public boolean isEmpty() { return list.isEmpty(); } public int size() { return list.size(); } /** Returns the subset of applications which are upgrading (to any version), not considering block windows. */ public ApplicationList upgrading() { return listOf(list.stream().filter(application -> application.change().platform().isPresent())); } /** Returns the subset of applications which are currently upgrading to the given version */ public ApplicationList upgradingTo(Version version) { return listOf(list.stream().filter(application -> isUpgradingTo(version, application))); } /** Returns the subset of applications which are not pinned to a certain Vespa version. */ public ApplicationList notPinned() { return listOf(list.stream().filter(application -> ! application.change().isPinned())); } /** Returns the subset of applications which are currently not upgrading to the given version */ public ApplicationList notUpgradingTo(Version version) { return listOf(list.stream().filter(application -> ! isUpgradingTo(version, application))); } /** * Returns the subset of applications which are currently not upgrading to the given version, * or returns all if no version is specified */ public ApplicationList notUpgradingTo(Optional<Version> version) { if ( ! version.isPresent()) return this; return notUpgradingTo(version.get()); } /** Returns the subset of applications which have changes left to deploy; blocked, or deploying */ public ApplicationList withChanges() { return listOf(list.stream().filter(application -> application.change().isPresent() || application.outstandingChange().isPresent())); } /** Returns the subset of applications which are currently not deploying a change */ public ApplicationList notDeploying() { return listOf(list.stream().filter(application -> ! application.change().isPresent())); } /** Returns the subset of applications which currently does not have any failing jobs */ public ApplicationList notFailing() { return listOf(list.stream().filter(application -> ! application.deploymentJobs().hasFailures())); } /** Returns the subset of applications which currently have failing jobs */ public ApplicationList failing() { return listOf(list.stream().filter(application -> application.deploymentJobs().hasFailures())); } /** Returns the subset of applications which have been failing an upgrade to the given version since the given instant */ public ApplicationList failingUpgradeToVersionSince(Version version, Instant threshold) { return listOf(list.stream().filter(application -> failingUpgradeToVersionSince(application, version, threshold))); } /** Returns the subset of applications which have been failing an application change since the given instant */ public ApplicationList failingApplicationChangeSince(Instant threshold) { return listOf(list.stream().filter(application -> failingApplicationChangeSince(application, threshold))); } /** Returns the subset of applications which currently does not have any failing jobs on the given version */ public ApplicationList notFailingOn(Version version) { return listOf(list.stream().filter(application -> ! failingOn(version, application))); } /** Returns the subset of applications which have at least one production deployment */ public ApplicationList hasDeployment() { return listOf(list.stream().filter(a -> !a.productionDeployments().isEmpty())); } /** Returns the subset of applications which started failing on the given version */ public ApplicationList startedFailingOn(Version version) { return listOf(list.stream().filter(application -> ! JobList.from(application).firstFailing().on(version).isEmpty())); } /** Returns the subset of applications which has the given upgrade policy */ public ApplicationList with(UpgradePolicy policy) { return listOf(list.stream().filter(a -> a.deploymentSpec().upgradePolicy() == policy)); } /** Returns the subset of applications which does not have the given upgrade policy */ public ApplicationList without(UpgradePolicy policy) { return listOf(list.stream().filter(a -> a.deploymentSpec().upgradePolicy() != policy)); } /** Returns the subset of applications which have at least one deployment on a lower version than the given one */ public ApplicationList onLowerVersionThan(Version version) { return listOf(list.stream() .filter(a -> a.productionDeployments().values().stream() .anyMatch(d -> d.version().isBefore(version)))); } /** Returns the subset of applications which have a project ID */ public ApplicationList withProjectId() { return listOf(list.stream().filter(a -> a.deploymentJobs().projectId().isPresent())); } /** Returns the subset of applications which have at least one production deployment */ public ApplicationList hasProductionDeployment() { return listOf(list.stream().filter(a -> ! a.productionDeployments().isEmpty())); } /** Returns the subset of applications that are allowed to upgrade at the given time */ public ApplicationList canUpgradeAt(Instant instant) { return listOf(list.stream().filter(a -> a.deploymentSpec().canUpgradeAt(instant))); } /** * Returns the subset of applications that hasn't pinned to another major version than the given one. * * @param targetMajorVersion the target major version which applications returned allows upgrading to * @param defaultMajorVersion the default major version to assume for applications not specifying one */ /** Returns the first n application in this (or all, if there are less than n). */ public ApplicationList first(int n) { if (list.size() < n) return this; return new ApplicationList(list.subList(0, n)); } /** * Returns this list sorted by increasing deployed version. * If multiple versions are deployed the oldest is used. * Applications without any deployments are ordered first. */ public ApplicationList byIncreasingDeployedVersion() { return listOf(list.stream().sorted(Comparator.comparing(application -> application.oldestDeployedPlatform().orElse(Version.emptyVersion)))); } private static boolean isUpgradingTo(Version version, Application application) { return application.change().platform().equals(Optional.of(version)); } private static boolean failingOn(Version version, Application application) { return ! JobList.from(application) .failing() .lastCompleted().on(version) .isEmpty(); } private static boolean failingUpgradeToVersionSince(Application application, Version version, Instant threshold) { return ! JobList.from(application) .not().failingApplicationChange() .firstFailing().before(threshold) .lastCompleted().on(version) .isEmpty(); } private static boolean failingApplicationChangeSince(Application application, Instant threshold) { return ! JobList.from(application) .failingApplicationChange() .firstFailing().before(threshold) .isEmpty(); } /** Convenience converter from a stream to an ApplicationList */ private static ApplicationList listOf(Stream<Application> applications) { return from(applications::iterator); } }
Right, I forgot I edited this comment independently of the semantic change. I'll correct it back.
public ApplicationList allowMajorVersion(int targetMajorVersion, int defaultMajorVersion) { return listOf(list.stream().filter(a -> a.majorVersion().orElse(a.deploymentSpec().majorVersion().orElse(defaultMajorVersion)) >= targetMajorVersion)); }
>= targetMajorVersion));
public ApplicationList allowMajorVersion(int targetMajorVersion, int defaultMajorVersion) { return listOf(list.stream().filter(a -> a.majorVersion().orElse(a.deploymentSpec().majorVersion().orElse(defaultMajorVersion)) >= targetMajorVersion)); }
class ApplicationList { private final ImmutableList<Application> list; private ApplicationList(Iterable<Application> applications) { this.list = ImmutableList.copyOf(applications); } public static ApplicationList from(Iterable<Application> applications) { return new ApplicationList(applications); } public static ApplicationList from(Collection<ApplicationId> ids, ApplicationController applications) { return listOf(ids.stream().map(applications::require)); } /** Returns the applications in this as an immutable list */ public List<Application> asList() { return list; } /** Returns the ids of the applications in this as an immutable list */ public List<ApplicationId> idList() { return ImmutableList.copyOf(list.stream().map(Application::id)::iterator); } public boolean isEmpty() { return list.isEmpty(); } public int size() { return list.size(); } /** Returns the subset of applications which are upgrading (to any version), not considering block windows. */ public ApplicationList upgrading() { return listOf(list.stream().filter(application -> application.change().platform().isPresent())); } /** Returns the subset of applications which are currently upgrading to the given version */ public ApplicationList upgradingTo(Version version) { return listOf(list.stream().filter(application -> isUpgradingTo(version, application))); } /** Returns the subset of applications which are not pinned to a certain Vespa version. */ public ApplicationList notPinned() { return listOf(list.stream().filter(application -> ! application.change().isPinned())); } /** Returns the subset of applications which are currently not upgrading to the given version */ public ApplicationList notUpgradingTo(Version version) { return listOf(list.stream().filter(application -> ! isUpgradingTo(version, application))); } /** * Returns the subset of applications which are currently not upgrading to the given version, * or returns all if no version is specified */ public ApplicationList notUpgradingTo(Optional<Version> version) { if ( ! version.isPresent()) return this; return notUpgradingTo(version.get()); } /** Returns the subset of applications which have changes left to deploy; blocked, or deploying */ public ApplicationList withChanges() { return listOf(list.stream().filter(application -> application.change().isPresent() || application.outstandingChange().isPresent())); } /** Returns the subset of applications which are currently not deploying a change */ public ApplicationList notDeploying() { return listOf(list.stream().filter(application -> ! application.change().isPresent())); } /** Returns the subset of applications which currently does not have any failing jobs */ public ApplicationList notFailing() { return listOf(list.stream().filter(application -> ! application.deploymentJobs().hasFailures())); } /** Returns the subset of applications which currently have failing jobs */ public ApplicationList failing() { return listOf(list.stream().filter(application -> application.deploymentJobs().hasFailures())); } /** Returns the subset of applications which have been failing an upgrade to the given version since the given instant */ public ApplicationList failingUpgradeToVersionSince(Version version, Instant threshold) { return listOf(list.stream().filter(application -> failingUpgradeToVersionSince(application, version, threshold))); } /** Returns the subset of applications which have been failing an application change since the given instant */ public ApplicationList failingApplicationChangeSince(Instant threshold) { return listOf(list.stream().filter(application -> failingApplicationChangeSince(application, threshold))); } /** Returns the subset of applications which currently does not have any failing jobs on the given version */ public ApplicationList notFailingOn(Version version) { return listOf(list.stream().filter(application -> ! failingOn(version, application))); } /** Returns the subset of applications which have at least one production deployment */ public ApplicationList hasDeployment() { return listOf(list.stream().filter(a -> !a.productionDeployments().isEmpty())); } /** Returns the subset of applications which started failing on the given version */ public ApplicationList startedFailingOn(Version version) { return listOf(list.stream().filter(application -> ! JobList.from(application).firstFailing().on(version).isEmpty())); } /** Returns the subset of applications which has the given upgrade policy */ public ApplicationList with(UpgradePolicy policy) { return listOf(list.stream().filter(a -> a.deploymentSpec().upgradePolicy() == policy)); } /** Returns the subset of applications which does not have the given upgrade policy */ public ApplicationList without(UpgradePolicy policy) { return listOf(list.stream().filter(a -> a.deploymentSpec().upgradePolicy() != policy)); } /** Returns the subset of applications which have at least one deployment on a lower version than the given one */ public ApplicationList onLowerVersionThan(Version version) { return listOf(list.stream() .filter(a -> a.productionDeployments().values().stream() .anyMatch(d -> d.version().isBefore(version)))); } /** Returns the subset of applications which have a project ID */ public ApplicationList withProjectId() { return listOf(list.stream().filter(a -> a.deploymentJobs().projectId().isPresent())); } /** Returns the subset of applications which have at least one production deployment */ public ApplicationList hasProductionDeployment() { return listOf(list.stream().filter(a -> ! a.productionDeployments().isEmpty())); } /** Returns the subset of applications that are allowed to upgrade at the given time */ public ApplicationList canUpgradeAt(Instant instant) { return listOf(list.stream().filter(a -> a.deploymentSpec().canUpgradeAt(instant))); } /** * Returns the subset of applications that hasn't pinned to another major version than the given one. * * @param targetMajorVersion the target major version which applications returned allows upgrading to * @param defaultMajorVersion the default major version to assume for applications not specifying one */ /** Returns the first n application in this (or all, if there are less than n). */ public ApplicationList first(int n) { if (list.size() < n) return this; return new ApplicationList(list.subList(0, n)); } /** * Returns this list sorted by increasing deployed version. * If multiple versions are deployed the oldest is used. * Applications without any deployments are ordered first. */ public ApplicationList byIncreasingDeployedVersion() { return listOf(list.stream().sorted(Comparator.comparing(application -> application.oldestDeployedPlatform().orElse(Version.emptyVersion)))); } private static boolean isUpgradingTo(Version version, Application application) { return application.change().platform().equals(Optional.of(version)); } private static boolean failingOn(Version version, Application application) { return ! JobList.from(application) .failing() .lastCompleted().on(version) .isEmpty(); } private static boolean failingUpgradeToVersionSince(Application application, Version version, Instant threshold) { return ! JobList.from(application) .not().failingApplicationChange() .firstFailing().before(threshold) .lastCompleted().on(version) .isEmpty(); } private static boolean failingApplicationChangeSince(Application application, Instant threshold) { return ! JobList.from(application) .failingApplicationChange() .firstFailing().before(threshold) .isEmpty(); } /** Convenience converter from a stream to an ApplicationList */ private static ApplicationList listOf(Stream<Application> applications) { return from(applications::iterator); } }
class ApplicationList { private final ImmutableList<Application> list; private ApplicationList(Iterable<Application> applications) { this.list = ImmutableList.copyOf(applications); } public static ApplicationList from(Iterable<Application> applications) { return new ApplicationList(applications); } public static ApplicationList from(Collection<ApplicationId> ids, ApplicationController applications) { return listOf(ids.stream().map(applications::require)); } /** Returns the applications in this as an immutable list */ public List<Application> asList() { return list; } /** Returns the ids of the applications in this as an immutable list */ public List<ApplicationId> idList() { return ImmutableList.copyOf(list.stream().map(Application::id)::iterator); } public boolean isEmpty() { return list.isEmpty(); } public int size() { return list.size(); } /** Returns the subset of applications which are upgrading (to any version), not considering block windows. */ public ApplicationList upgrading() { return listOf(list.stream().filter(application -> application.change().platform().isPresent())); } /** Returns the subset of applications which are currently upgrading to the given version */ public ApplicationList upgradingTo(Version version) { return listOf(list.stream().filter(application -> isUpgradingTo(version, application))); } /** Returns the subset of applications which are not pinned to a certain Vespa version. */ public ApplicationList notPinned() { return listOf(list.stream().filter(application -> ! application.change().isPinned())); } /** Returns the subset of applications which are currently not upgrading to the given version */ public ApplicationList notUpgradingTo(Version version) { return listOf(list.stream().filter(application -> ! isUpgradingTo(version, application))); } /** * Returns the subset of applications which are currently not upgrading to the given version, * or returns all if no version is specified */ public ApplicationList notUpgradingTo(Optional<Version> version) { if ( ! version.isPresent()) return this; return notUpgradingTo(version.get()); } /** Returns the subset of applications which have changes left to deploy; blocked, or deploying */ public ApplicationList withChanges() { return listOf(list.stream().filter(application -> application.change().isPresent() || application.outstandingChange().isPresent())); } /** Returns the subset of applications which are currently not deploying a change */ public ApplicationList notDeploying() { return listOf(list.stream().filter(application -> ! application.change().isPresent())); } /** Returns the subset of applications which currently does not have any failing jobs */ public ApplicationList notFailing() { return listOf(list.stream().filter(application -> ! application.deploymentJobs().hasFailures())); } /** Returns the subset of applications which currently have failing jobs */ public ApplicationList failing() { return listOf(list.stream().filter(application -> application.deploymentJobs().hasFailures())); } /** Returns the subset of applications which have been failing an upgrade to the given version since the given instant */ public ApplicationList failingUpgradeToVersionSince(Version version, Instant threshold) { return listOf(list.stream().filter(application -> failingUpgradeToVersionSince(application, version, threshold))); } /** Returns the subset of applications which have been failing an application change since the given instant */ public ApplicationList failingApplicationChangeSince(Instant threshold) { return listOf(list.stream().filter(application -> failingApplicationChangeSince(application, threshold))); } /** Returns the subset of applications which currently does not have any failing jobs on the given version */ public ApplicationList notFailingOn(Version version) { return listOf(list.stream().filter(application -> ! failingOn(version, application))); } /** Returns the subset of applications which have at least one production deployment */ public ApplicationList hasDeployment() { return listOf(list.stream().filter(a -> !a.productionDeployments().isEmpty())); } /** Returns the subset of applications which started failing on the given version */ public ApplicationList startedFailingOn(Version version) { return listOf(list.stream().filter(application -> ! JobList.from(application).firstFailing().on(version).isEmpty())); } /** Returns the subset of applications which has the given upgrade policy */ public ApplicationList with(UpgradePolicy policy) { return listOf(list.stream().filter(a -> a.deploymentSpec().upgradePolicy() == policy)); } /** Returns the subset of applications which does not have the given upgrade policy */ public ApplicationList without(UpgradePolicy policy) { return listOf(list.stream().filter(a -> a.deploymentSpec().upgradePolicy() != policy)); } /** Returns the subset of applications which have at least one deployment on a lower version than the given one */ public ApplicationList onLowerVersionThan(Version version) { return listOf(list.stream() .filter(a -> a.productionDeployments().values().stream() .anyMatch(d -> d.version().isBefore(version)))); } /** Returns the subset of applications which have a project ID */ public ApplicationList withProjectId() { return listOf(list.stream().filter(a -> a.deploymentJobs().projectId().isPresent())); } /** Returns the subset of applications which have at least one production deployment */ public ApplicationList hasProductionDeployment() { return listOf(list.stream().filter(a -> ! a.productionDeployments().isEmpty())); } /** Returns the subset of applications that are allowed to upgrade at the given time */ public ApplicationList canUpgradeAt(Instant instant) { return listOf(list.stream().filter(a -> a.deploymentSpec().canUpgradeAt(instant))); } /** * Returns the subset of applications that hasn't pinned to another major version than the given one. * * @param targetMajorVersion the target major version which applications returned allows upgrading to * @param defaultMajorVersion the default major version to assume for applications not specifying one */ /** Returns the first n application in this (or all, if there are less than n). */ public ApplicationList first(int n) { if (list.size() < n) return this; return new ApplicationList(list.subList(0, n)); } /** * Returns this list sorted by increasing deployed version. * If multiple versions are deployed the oldest is used. * Applications without any deployments are ordered first. */ public ApplicationList byIncreasingDeployedVersion() { return listOf(list.stream().sorted(Comparator.comparing(application -> application.oldestDeployedPlatform().orElse(Version.emptyVersion)))); } private static boolean isUpgradingTo(Version version, Application application) { return application.change().platform().equals(Optional.of(version)); } private static boolean failingOn(Version version, Application application) { return ! JobList.from(application) .failing() .lastCompleted().on(version) .isEmpty(); } private static boolean failingUpgradeToVersionSince(Application application, Version version, Instant threshold) { return ! JobList.from(application) .not().failingApplicationChange() .firstFailing().before(threshold) .lastCompleted().on(version) .isEmpty(); } private static boolean failingApplicationChangeSince(Application application, Instant threshold) { return ! JobList.from(application) .failingApplicationChange() .firstFailing().before(threshold) .isEmpty(); } /** Convenience converter from a stream to an ApplicationList */ private static ApplicationList listOf(Stream<Application> applications) { return from(applications::iterator); } }
This will return HTTP-200 with body `{}` if no such flag exists?
public void render(OutputStream outputStream) throws IOException { ObjectNode rootNode = mapper.createObjectNode(); flagDefinition.ifPresent(definition -> renderFlagDefinition(definition, rootNode)); mapper.writeValue(outputStream, rootNode); }
flagDefinition.ifPresent(definition -> renderFlagDefinition(definition, rootNode));
public void render(OutputStream outputStream) throws IOException { ObjectNode rootNode = mapper.createObjectNode(); renderFlagDefinition(flagDefinition, rootNode); mapper.writeValue(outputStream, rootNode); }
class DefinedFlag extends HttpResponse { private static ObjectMapper mapper = new ObjectMapper(); private final Optional<FlagDefinition> flagDefinition; public DefinedFlag(Optional<FlagDefinition> flagDefinition) { super(Response.Status.OK); this.flagDefinition = flagDefinition; } @Override static void renderFlagDefinition(FlagDefinition flagDefinition, ObjectNode definitionNode) { definitionNode.put("description", flagDefinition.getDescription()); definitionNode.put("modification-effect", flagDefinition.getModificationEffect()); ArrayNode dimensionsNode = definitionNode.putArray("dimensions"); flagDefinition.getDimensions().forEach(dimension -> dimensionsNode.add(DimensionHelper.toWire(dimension))); } @Override public String getContentType() { return HttpConfigResponse.JSON_CONTENT_TYPE; } }
class DefinedFlag extends HttpResponse { private static ObjectMapper mapper = new ObjectMapper(); private final FlagDefinition flagDefinition; public DefinedFlag(FlagDefinition flagDefinition) { super(Response.Status.OK); this.flagDefinition = flagDefinition; } @Override static void renderFlagDefinition(FlagDefinition flagDefinition, ObjectNode definitionNode) { definitionNode.put("description", flagDefinition.getDescription()); definitionNode.put("modification-effect", flagDefinition.getModificationEffect()); ArrayNode dimensionsNode = definitionNode.putArray("dimensions"); flagDefinition.getDimensions().forEach(dimension -> dimensionsNode.add(DimensionHelper.toWire(dimension))); } @Override public String getContentType() { return HttpConfigResponse.JSON_CONTENT_TYPE; } }
Ah, we want 404 in that case, hold on
public void render(OutputStream outputStream) throws IOException { ObjectNode rootNode = mapper.createObjectNode(); flagDefinition.ifPresent(definition -> renderFlagDefinition(definition, rootNode)); mapper.writeValue(outputStream, rootNode); }
flagDefinition.ifPresent(definition -> renderFlagDefinition(definition, rootNode));
public void render(OutputStream outputStream) throws IOException { ObjectNode rootNode = mapper.createObjectNode(); renderFlagDefinition(flagDefinition, rootNode); mapper.writeValue(outputStream, rootNode); }
class DefinedFlag extends HttpResponse { private static ObjectMapper mapper = new ObjectMapper(); private final Optional<FlagDefinition> flagDefinition; public DefinedFlag(Optional<FlagDefinition> flagDefinition) { super(Response.Status.OK); this.flagDefinition = flagDefinition; } @Override static void renderFlagDefinition(FlagDefinition flagDefinition, ObjectNode definitionNode) { definitionNode.put("description", flagDefinition.getDescription()); definitionNode.put("modification-effect", flagDefinition.getModificationEffect()); ArrayNode dimensionsNode = definitionNode.putArray("dimensions"); flagDefinition.getDimensions().forEach(dimension -> dimensionsNode.add(DimensionHelper.toWire(dimension))); } @Override public String getContentType() { return HttpConfigResponse.JSON_CONTENT_TYPE; } }
class DefinedFlag extends HttpResponse { private static ObjectMapper mapper = new ObjectMapper(); private final FlagDefinition flagDefinition; public DefinedFlag(FlagDefinition flagDefinition) { super(Response.Status.OK); this.flagDefinition = flagDefinition; } @Override static void renderFlagDefinition(FlagDefinition flagDefinition, ObjectNode definitionNode) { definitionNode.put("description", flagDefinition.getDescription()); definitionNode.put("modification-effect", flagDefinition.getModificationEffect()); ArrayNode dimensionsNode = definitionNode.putArray("dimensions"); flagDefinition.getDimensions().forEach(dimension -> dimensionsNode.add(DimensionHelper.toWire(dimension))); } @Override public String getContentType() { return HttpConfigResponse.JSON_CONTENT_TYPE; } }
Fixed
public void render(OutputStream outputStream) throws IOException { ObjectNode rootNode = mapper.createObjectNode(); flagDefinition.ifPresent(definition -> renderFlagDefinition(definition, rootNode)); mapper.writeValue(outputStream, rootNode); }
flagDefinition.ifPresent(definition -> renderFlagDefinition(definition, rootNode));
public void render(OutputStream outputStream) throws IOException { ObjectNode rootNode = mapper.createObjectNode(); renderFlagDefinition(flagDefinition, rootNode); mapper.writeValue(outputStream, rootNode); }
class DefinedFlag extends HttpResponse { private static ObjectMapper mapper = new ObjectMapper(); private final Optional<FlagDefinition> flagDefinition; public DefinedFlag(Optional<FlagDefinition> flagDefinition) { super(Response.Status.OK); this.flagDefinition = flagDefinition; } @Override static void renderFlagDefinition(FlagDefinition flagDefinition, ObjectNode definitionNode) { definitionNode.put("description", flagDefinition.getDescription()); definitionNode.put("modification-effect", flagDefinition.getModificationEffect()); ArrayNode dimensionsNode = definitionNode.putArray("dimensions"); flagDefinition.getDimensions().forEach(dimension -> dimensionsNode.add(DimensionHelper.toWire(dimension))); } @Override public String getContentType() { return HttpConfigResponse.JSON_CONTENT_TYPE; } }
class DefinedFlag extends HttpResponse { private static ObjectMapper mapper = new ObjectMapper(); private final FlagDefinition flagDefinition; public DefinedFlag(FlagDefinition flagDefinition) { super(Response.Status.OK); this.flagDefinition = flagDefinition; } @Override static void renderFlagDefinition(FlagDefinition flagDefinition, ObjectNode definitionNode) { definitionNode.put("description", flagDefinition.getDescription()); definitionNode.put("modification-effect", flagDefinition.getModificationEffect()); ArrayNode dimensionsNode = definitionNode.putArray("dimensions"); flagDefinition.getDimensions().forEach(dimension -> dimensionsNode.add(DimensionHelper.toWire(dimension))); } @Override public String getContentType() { return HttpConfigResponse.JSON_CONTENT_TYPE; } }
Why not use a stream here? It would be a one-liner ...
public int workingNodes() { int nodesUp = 0; for (Node node : nodes) { if (node.isWorking()) { nodesUp++; } } return nodesUp; }
int nodesUp = 0;
public int workingNodes() { int nodesUp = 0; for (Node node : nodes) { if (node.isWorking()) { nodesUp++; } } return nodesUp; }
class Group { private final int id; private final ImmutableList<Node> nodes; private final AtomicBoolean hasSufficientCoverage = new AtomicBoolean(true); private final AtomicLong activeDocuments = new AtomicLong(0); public Group(int id, List<Node> nodes) { this.id = id; this.nodes = ImmutableList.copyOf(nodes); } /** Returns the unique identity of this group */ public int id() { return id; } /** Returns the nodes in this group as an immutable list */ public ImmutableList<Node> nodes() { return nodes; } /** * Returns whether this group has sufficient active documents * (compared to other groups) that is should receive traffic */ public boolean hasSufficientCoverage() { return hasSufficientCoverage.get(); } void setHasSufficientCoverage(boolean sufficientCoverage) { hasSufficientCoverage.lazySet(sufficientCoverage); } void aggregateActiveDocuments() { long activeDocumentsInGroup = 0; for (Node node : nodes) { if (node.isWorking()) { activeDocumentsInGroup += node.getActiveDocuments(); } } activeDocuments.set(activeDocumentsInGroup); } /** Returns the active documents on this node. If unknown, 0 is returned. */ long getActiveDocuments() { return this.activeDocuments.get(); } @Override public String toString() { return "search group " + id; } @Override public int hashCode() { return id; } @Override public boolean equals(Object other) { if (other == this) return true; if (!(other instanceof Group)) return false; return ((Group) other).id == this.id; } }
class Group { private final int id; private final ImmutableList<Node> nodes; private final AtomicBoolean hasSufficientCoverage = new AtomicBoolean(true); private final AtomicLong activeDocuments = new AtomicLong(0); public Group(int id, List<Node> nodes) { this.id = id; this.nodes = ImmutableList.copyOf(nodes); } /** Returns the unique identity of this group */ public int id() { return id; } /** Returns the nodes in this group as an immutable list */ public ImmutableList<Node> nodes() { return nodes; } /** * Returns whether this group has sufficient active documents * (compared to other groups) that is should receive traffic */ public boolean hasSufficientCoverage() { return hasSufficientCoverage.get(); } void setHasSufficientCoverage(boolean sufficientCoverage) { hasSufficientCoverage.lazySet(sufficientCoverage); } void aggregateActiveDocuments() { long activeDocumentsInGroup = 0; for (Node node : nodes) { if (node.isWorking()) { activeDocumentsInGroup += node.getActiveDocuments(); } } activeDocuments.set(activeDocumentsInGroup); } /** Returns the active documents on this node. If unknown, 0 is returned. */ long getActiveDocuments() { return this.activeDocuments.get(); } @Override public String toString() { return "search group " + id; } @Override public int hashCode() { return id; } @Override public boolean equals(Object other) { if (other == this) return true; if (!(other instanceof Group)) return false; return ((Group) other).id == this.id; } }
You're absolutely right. I've been looking at too many stack traces containing lambdas lately so I've unconsciously avoided writing them in code, even when (as in this case) you'd never expect to see it in a trace. And the code would be at least as readable as the loopy version.
public int workingNodes() { int nodesUp = 0; for (Node node : nodes) { if (node.isWorking()) { nodesUp++; } } return nodesUp; }
int nodesUp = 0;
public int workingNodes() { int nodesUp = 0; for (Node node : nodes) { if (node.isWorking()) { nodesUp++; } } return nodesUp; }
class Group { private final int id; private final ImmutableList<Node> nodes; private final AtomicBoolean hasSufficientCoverage = new AtomicBoolean(true); private final AtomicLong activeDocuments = new AtomicLong(0); public Group(int id, List<Node> nodes) { this.id = id; this.nodes = ImmutableList.copyOf(nodes); } /** Returns the unique identity of this group */ public int id() { return id; } /** Returns the nodes in this group as an immutable list */ public ImmutableList<Node> nodes() { return nodes; } /** * Returns whether this group has sufficient active documents * (compared to other groups) that is should receive traffic */ public boolean hasSufficientCoverage() { return hasSufficientCoverage.get(); } void setHasSufficientCoverage(boolean sufficientCoverage) { hasSufficientCoverage.lazySet(sufficientCoverage); } void aggregateActiveDocuments() { long activeDocumentsInGroup = 0; for (Node node : nodes) { if (node.isWorking()) { activeDocumentsInGroup += node.getActiveDocuments(); } } activeDocuments.set(activeDocumentsInGroup); } /** Returns the active documents on this node. If unknown, 0 is returned. */ long getActiveDocuments() { return this.activeDocuments.get(); } @Override public String toString() { return "search group " + id; } @Override public int hashCode() { return id; } @Override public boolean equals(Object other) { if (other == this) return true; if (!(other instanceof Group)) return false; return ((Group) other).id == this.id; } }
class Group { private final int id; private final ImmutableList<Node> nodes; private final AtomicBoolean hasSufficientCoverage = new AtomicBoolean(true); private final AtomicLong activeDocuments = new AtomicLong(0); public Group(int id, List<Node> nodes) { this.id = id; this.nodes = ImmutableList.copyOf(nodes); } /** Returns the unique identity of this group */ public int id() { return id; } /** Returns the nodes in this group as an immutable list */ public ImmutableList<Node> nodes() { return nodes; } /** * Returns whether this group has sufficient active documents * (compared to other groups) that is should receive traffic */ public boolean hasSufficientCoverage() { return hasSufficientCoverage.get(); } void setHasSufficientCoverage(boolean sufficientCoverage) { hasSufficientCoverage.lazySet(sufficientCoverage); } void aggregateActiveDocuments() { long activeDocumentsInGroup = 0; for (Node node : nodes) { if (node.isWorking()) { activeDocumentsInGroup += node.getActiveDocuments(); } } activeDocuments.set(activeDocumentsInGroup); } /** Returns the active documents on this node. If unknown, 0 is returned. */ long getActiveDocuments() { return this.activeDocuments.get(); } @Override public String toString() { return "search group " + id; } @Override public int hashCode() { return id; } @Override public boolean equals(Object other) { if (other == this) return true; if (!(other instanceof Group)) return false; return ((Group) other).id == this.id; } }
Document the meaning of this
private CreateContainerCmd createCreateContainerCmd() { List<Bind> volumeBinds = volumeBindSpecs.stream().map(Bind::parse).collect(Collectors.toList()); final HostConfig hostConfig = new HostConfig(); containerResources.ifPresent(cr -> hostConfig .withCpuShares(cr.cpuShares()) .withMemory(cr.memoryBytes()) .withMemorySwap(cr.memoryBytes()) .withCpuPeriod(cr.cpuQuota() > 0 ? cr.cpuPeriod() : null) .withCpuQuota(cr.cpuQuota() > 0 ? cr.cpuQuota() : null)); final CreateContainerCmd containerCmd = docker .createContainerCmd(dockerImage.asString()) .withHostConfig(hostConfig) .withName(containerName.asString()) .withLabels(labels) .withEnv(environmentAssignments) .withBinds(volumeBinds) .withUlimits(ulimits) .withCapAdd(new ArrayList<>(addCapabilities)) .withCapDrop(new ArrayList<>(dropCapabilities)) .withPrivileged(privileged); networkMode .filter(mode -> ! mode.toLowerCase().equals("host")) .ifPresent(mode -> containerCmd.withMacAddress(generateMACAddress(hostName, ipv4Address, ipv6Address))); hostName.ifPresent(containerCmd::withHostName); networkMode.ifPresent(containerCmd::withNetworkMode); ipv4Address.ifPresent(containerCmd::withIpv4Address); ipv6Address.ifPresent(containerCmd::withIpv6Address); entrypoint.ifPresent(containerCmd::withEntrypoint); return containerCmd; }
.withMemorySwap(cr.memoryBytes())
private CreateContainerCmd createCreateContainerCmd() { List<Bind> volumeBinds = volumeBindSpecs.stream().map(Bind::parse).collect(Collectors.toList()); final HostConfig hostConfig = new HostConfig(); containerResources.ifPresent(cr -> hostConfig .withCpuShares(cr.cpuShares()) .withMemory(cr.memoryBytes()) .withMemorySwap(cr.memoryBytes()) .withCpuPeriod(cr.cpuQuota() > 0 ? cr.cpuPeriod() : null) .withCpuQuota(cr.cpuQuota() > 0 ? cr.cpuQuota() : null)); final CreateContainerCmd containerCmd = docker .createContainerCmd(dockerImage.asString()) .withHostConfig(hostConfig) .withName(containerName.asString()) .withLabels(labels) .withEnv(environmentAssignments) .withBinds(volumeBinds) .withUlimits(ulimits) .withCapAdd(new ArrayList<>(addCapabilities)) .withCapDrop(new ArrayList<>(dropCapabilities)) .withPrivileged(privileged); networkMode .filter(mode -> ! mode.toLowerCase().equals("host")) .ifPresent(mode -> containerCmd.withMacAddress(generateMACAddress(hostName, ipv4Address, ipv6Address))); hostName.ifPresent(containerCmd::withHostName); networkMode.ifPresent(containerCmd::withNetworkMode); ipv4Address.ifPresent(containerCmd::withIpv4Address); ipv6Address.ifPresent(containerCmd::withIpv6Address); entrypoint.ifPresent(containerCmd::withEntrypoint); return containerCmd; }
class CreateContainerCommandImpl implements Docker.CreateContainerCommand { private final DockerClient docker; private final DockerImage dockerImage; private final ContainerName containerName; private final Map<String, String> labels = new HashMap<>(); private final List<String> environmentAssignments = new ArrayList<>(); private final List<String> volumeBindSpecs = new ArrayList<>(); private final List<Ulimit> ulimits = new ArrayList<>(); private final Set<Capability> addCapabilities = new HashSet<>(); private final Set<Capability> dropCapabilities = new HashSet<>(); private Optional<String> hostName = Optional.empty(); private Optional<ContainerResources> containerResources = Optional.empty(); private Optional<String> networkMode = Optional.empty(); private Optional<String> ipv4Address = Optional.empty(); private Optional<String> ipv6Address = Optional.empty(); private Optional<String[]> entrypoint = Optional.empty(); private boolean privileged = false; CreateContainerCommandImpl(DockerClient docker, DockerImage dockerImage, ContainerName containerName) { this.docker = docker; this.dockerImage = dockerImage; this.containerName = containerName; } @Override public Docker.CreateContainerCommand withHostName(String hostName) { this.hostName = Optional.of(hostName); return this; } @Override public Docker.CreateContainerCommand withResources(ContainerResources containerResources) { this.containerResources = Optional.of(containerResources); return this; } @Override public Docker.CreateContainerCommand withLabel(String name, String value) { assert !name.contains("="); labels.put(name, value); return this; } public Docker.CreateContainerCommand withManagedBy(String manager) { return withLabel(LABEL_NAME_MANAGEDBY, manager); } @Override public Docker.CreateContainerCommand withAddCapability(String capabilityName) { addCapabilities.add(Capability.valueOf(capabilityName)); return this; } @Override public Docker.CreateContainerCommand withDropCapability(String capabilityName) { dropCapabilities.add(Capability.valueOf(capabilityName)); return this; } @Override public Docker.CreateContainerCommand withPrivileged(boolean privileged) { this.privileged = privileged; return this; } @Override public Docker.CreateContainerCommand withUlimit(String name, int softLimit, int hardLimit) { ulimits.add(new Ulimit(name, softLimit, hardLimit)); return this; } @Override public Docker.CreateContainerCommand withEntrypoint(String... entrypoint) { if (entrypoint.length < 1) throw new IllegalArgumentException("Entrypoint must contain at least 1 element"); this.entrypoint = Optional.of(entrypoint); return this; } @Override public Docker.CreateContainerCommand withEnvironment(String name, String value) { assert name.indexOf('=') == -1; environmentAssignments.add(name + "=" + value); return this; } @Override public Docker.CreateContainerCommand withVolume(Path path, Path volumePath) { volumeBindSpecs.add(path + ":" + volumePath + ":Z"); return this; } @Override public Docker.CreateContainerCommand withSharedVolume(Path path, Path volumePath) { volumeBindSpecs.add(path + ":" + volumePath + ":z"); return this; } @Override public Docker.CreateContainerCommand withNetworkMode(String mode) { networkMode = Optional.of(mode); return this; } @Override public Docker.CreateContainerCommand withIpAddress(InetAddress address) { if (address instanceof Inet6Address) { ipv6Address = Optional.of(address.getHostAddress()); } else { ipv4Address = Optional.of(address.getHostAddress()); } return this; } @Override public void create() { try { createCreateContainerCmd().exec(); } catch (RuntimeException e) { throw new DockerException("Failed to create container " + toString(), e); } } /** Maps ("--env", {"A", "B", "C"}) to "--env A --env B --env C" */ private static String toRepeatedOption(String option, List<String> optionValues) { return optionValues.stream() .map(optionValue -> option + " " + optionValue) .collect(Collectors.joining(" ")); } private static String toOptionalOption(String option, Optional<?> value) { return value.map(o -> option + " " + o).orElse(""); } private static String toFlagOption(String option, boolean value) { return value ? option : ""; } /** Make toString() print the equivalent arguments to 'docker run' */ @Override public String toString() { List<String> labelList = labels.entrySet().stream() .map(entry -> entry.getKey() + "=" + entry.getValue()).collect(Collectors.toList()); List<String> ulimitList = ulimits.stream() .map(ulimit -> ulimit.getName() + "=" + ulimit.getSoft() + ":" + ulimit.getHard()) .collect(Collectors.toList()); List<String> addCapabilitiesList = addCapabilities.stream().map(Enum<Capability>::toString).sorted().collect(Collectors.toList()); List<String> dropCapabilitiesList = dropCapabilities.stream().map(Enum<Capability>::toString).sorted().collect(Collectors.toList()); Optional<String> entrypointExecuteable = entrypoint.map(args -> args[0]); String entrypointArgs = entrypoint.map(Stream::of).orElseGet(Stream::empty) .skip(1) .collect(Collectors.joining(" ")); return Stream.of( "--name " + containerName.asString(), toOptionalOption("--hostname", hostName), toOptionalOption("--cpu-shares", containerResources.map(ContainerResources::cpuShares)), toOptionalOption("--cpus", containerResources.map(ContainerResources::cpus)), toOptionalOption("--memory", containerResources.map(ContainerResources::memoryBytes)), toRepeatedOption("--label", labelList), toRepeatedOption("--ulimit", ulimitList), toRepeatedOption("--env", environmentAssignments), toRepeatedOption("--volume", volumeBindSpecs), toRepeatedOption("--cap-add", addCapabilitiesList), toRepeatedOption("--cap-drop", dropCapabilitiesList), toOptionalOption("--net", networkMode), toOptionalOption("--ip", ipv4Address), toOptionalOption("--ip6", ipv6Address), toOptionalOption("--entrypoint", entrypointExecuteable), toFlagOption("--privileged", privileged), dockerImage.asString(), entrypointArgs) .filter(s -> !s.isEmpty()) .collect(Collectors.joining(" ")); } /** * Generates a pseudo-random MAC address based on the hostname, IPv4- and IPv6-address. */ static String generateMACAddress(Optional<String> hostname, Optional<String> ipv4Address, Optional<String> ipv6Address) { final String seed = hostname.orElse("") + ipv4Address.orElse("") + ipv6Address.orElse(""); Random rand = getPRNG(seed); byte[] macAddr = new byte[6]; rand.nextBytes(macAddr); macAddr[0] = (byte) ((macAddr[0] | 2) & 254); return IntStream.range(0, macAddr.length) .mapToObj(i -> String.format("%02x", macAddr[i])) .collect(Collectors.joining(":")); } private static Random getPRNG(String seed) { try { SecureRandom rand = SecureRandom.getInstance("SHA1PRNG"); rand.setSeed(seed.getBytes()); return rand; } catch (NoSuchAlgorithmException e) { throw new RuntimeException("Failed to get pseudo-random number generator", e); } } }
class CreateContainerCommandImpl implements Docker.CreateContainerCommand { private final DockerClient docker; private final DockerImage dockerImage; private final ContainerName containerName; private final Map<String, String> labels = new HashMap<>(); private final List<String> environmentAssignments = new ArrayList<>(); private final List<String> volumeBindSpecs = new ArrayList<>(); private final List<Ulimit> ulimits = new ArrayList<>(); private final Set<Capability> addCapabilities = new HashSet<>(); private final Set<Capability> dropCapabilities = new HashSet<>(); private Optional<String> hostName = Optional.empty(); private Optional<ContainerResources> containerResources = Optional.empty(); private Optional<String> networkMode = Optional.empty(); private Optional<String> ipv4Address = Optional.empty(); private Optional<String> ipv6Address = Optional.empty(); private Optional<String[]> entrypoint = Optional.empty(); private boolean privileged = false; CreateContainerCommandImpl(DockerClient docker, DockerImage dockerImage, ContainerName containerName) { this.docker = docker; this.dockerImage = dockerImage; this.containerName = containerName; } @Override public Docker.CreateContainerCommand withHostName(String hostName) { this.hostName = Optional.of(hostName); return this; } @Override public Docker.CreateContainerCommand withResources(ContainerResources containerResources) { this.containerResources = Optional.of(containerResources); return this; } @Override public Docker.CreateContainerCommand withLabel(String name, String value) { assert !name.contains("="); labels.put(name, value); return this; } public Docker.CreateContainerCommand withManagedBy(String manager) { return withLabel(LABEL_NAME_MANAGEDBY, manager); } @Override public Docker.CreateContainerCommand withAddCapability(String capabilityName) { addCapabilities.add(Capability.valueOf(capabilityName)); return this; } @Override public Docker.CreateContainerCommand withDropCapability(String capabilityName) { dropCapabilities.add(Capability.valueOf(capabilityName)); return this; } @Override public Docker.CreateContainerCommand withPrivileged(boolean privileged) { this.privileged = privileged; return this; } @Override public Docker.CreateContainerCommand withUlimit(String name, int softLimit, int hardLimit) { ulimits.add(new Ulimit(name, softLimit, hardLimit)); return this; } @Override public Docker.CreateContainerCommand withEntrypoint(String... entrypoint) { if (entrypoint.length < 1) throw new IllegalArgumentException("Entrypoint must contain at least 1 element"); this.entrypoint = Optional.of(entrypoint); return this; } @Override public Docker.CreateContainerCommand withEnvironment(String name, String value) { assert name.indexOf('=') == -1; environmentAssignments.add(name + "=" + value); return this; } @Override public Docker.CreateContainerCommand withVolume(Path path, Path volumePath) { volumeBindSpecs.add(path + ":" + volumePath + ":Z"); return this; } @Override public Docker.CreateContainerCommand withSharedVolume(Path path, Path volumePath) { volumeBindSpecs.add(path + ":" + volumePath + ":z"); return this; } @Override public Docker.CreateContainerCommand withNetworkMode(String mode) { networkMode = Optional.of(mode); return this; } @Override public Docker.CreateContainerCommand withIpAddress(InetAddress address) { if (address instanceof Inet6Address) { ipv6Address = Optional.of(address.getHostAddress()); } else { ipv4Address = Optional.of(address.getHostAddress()); } return this; } @Override public void create() { try { createCreateContainerCmd().exec(); } catch (RuntimeException e) { throw new DockerException("Failed to create container " + toString(), e); } } /** Maps ("--env", {"A", "B", "C"}) to "--env A --env B --env C" */ private static String toRepeatedOption(String option, List<String> optionValues) { return optionValues.stream() .map(optionValue -> option + " " + optionValue) .collect(Collectors.joining(" ")); } private static String toOptionalOption(String option, Optional<?> value) { return value.map(o -> option + " " + o).orElse(""); } private static String toFlagOption(String option, boolean value) { return value ? option : ""; } /** Make toString() print the equivalent arguments to 'docker run' */ @Override public String toString() { List<String> labelList = labels.entrySet().stream() .map(entry -> entry.getKey() + "=" + entry.getValue()).collect(Collectors.toList()); List<String> ulimitList = ulimits.stream() .map(ulimit -> ulimit.getName() + "=" + ulimit.getSoft() + ":" + ulimit.getHard()) .collect(Collectors.toList()); List<String> addCapabilitiesList = addCapabilities.stream().map(Enum<Capability>::toString).sorted().collect(Collectors.toList()); List<String> dropCapabilitiesList = dropCapabilities.stream().map(Enum<Capability>::toString).sorted().collect(Collectors.toList()); Optional<String> entrypointExecuteable = entrypoint.map(args -> args[0]); String entrypointArgs = entrypoint.map(Stream::of).orElseGet(Stream::empty) .skip(1) .collect(Collectors.joining(" ")); return Stream.of( "--name " + containerName.asString(), toOptionalOption("--hostname", hostName), toOptionalOption("--cpu-shares", containerResources.map(ContainerResources::cpuShares)), toOptionalOption("--cpus", containerResources.map(ContainerResources::cpus)), toOptionalOption("--memory", containerResources.map(ContainerResources::memoryBytes)), toRepeatedOption("--label", labelList), toRepeatedOption("--ulimit", ulimitList), toRepeatedOption("--env", environmentAssignments), toRepeatedOption("--volume", volumeBindSpecs), toRepeatedOption("--cap-add", addCapabilitiesList), toRepeatedOption("--cap-drop", dropCapabilitiesList), toOptionalOption("--net", networkMode), toOptionalOption("--ip", ipv4Address), toOptionalOption("--ip6", ipv6Address), toOptionalOption("--entrypoint", entrypointExecuteable), toFlagOption("--privileged", privileged), dockerImage.asString(), entrypointArgs) .filter(s -> !s.isEmpty()) .collect(Collectors.joining(" ")); } /** * Generates a pseudo-random MAC address based on the hostname, IPv4- and IPv6-address. */ static String generateMACAddress(Optional<String> hostname, Optional<String> ipv4Address, Optional<String> ipv6Address) { final String seed = hostname.orElse("") + ipv4Address.orElse("") + ipv6Address.orElse(""); Random rand = getPRNG(seed); byte[] macAddr = new byte[6]; rand.nextBytes(macAddr); macAddr[0] = (byte) ((macAddr[0] | 2) & 254); return IntStream.range(0, macAddr.length) .mapToObj(i -> String.format("%02x", macAddr[i])) .collect(Collectors.joining(":")); } private static Random getPRNG(String seed) { try { SecureRandom rand = SecureRandom.getInstance("SHA1PRNG"); rand.setSeed(seed.getBytes()); return rand; } catch (NoSuchAlgorithmException e) { throw new RuntimeException("Failed to get pseudo-random number generator", e); } } }
I'll add in next PR
private CreateContainerCmd createCreateContainerCmd() { List<Bind> volumeBinds = volumeBindSpecs.stream().map(Bind::parse).collect(Collectors.toList()); final HostConfig hostConfig = new HostConfig(); containerResources.ifPresent(cr -> hostConfig .withCpuShares(cr.cpuShares()) .withMemory(cr.memoryBytes()) .withMemorySwap(cr.memoryBytes()) .withCpuPeriod(cr.cpuQuota() > 0 ? cr.cpuPeriod() : null) .withCpuQuota(cr.cpuQuota() > 0 ? cr.cpuQuota() : null)); final CreateContainerCmd containerCmd = docker .createContainerCmd(dockerImage.asString()) .withHostConfig(hostConfig) .withName(containerName.asString()) .withLabels(labels) .withEnv(environmentAssignments) .withBinds(volumeBinds) .withUlimits(ulimits) .withCapAdd(new ArrayList<>(addCapabilities)) .withCapDrop(new ArrayList<>(dropCapabilities)) .withPrivileged(privileged); networkMode .filter(mode -> ! mode.toLowerCase().equals("host")) .ifPresent(mode -> containerCmd.withMacAddress(generateMACAddress(hostName, ipv4Address, ipv6Address))); hostName.ifPresent(containerCmd::withHostName); networkMode.ifPresent(containerCmd::withNetworkMode); ipv4Address.ifPresent(containerCmd::withIpv4Address); ipv6Address.ifPresent(containerCmd::withIpv6Address); entrypoint.ifPresent(containerCmd::withEntrypoint); return containerCmd; }
.withMemorySwap(cr.memoryBytes())
private CreateContainerCmd createCreateContainerCmd() { List<Bind> volumeBinds = volumeBindSpecs.stream().map(Bind::parse).collect(Collectors.toList()); final HostConfig hostConfig = new HostConfig(); containerResources.ifPresent(cr -> hostConfig .withCpuShares(cr.cpuShares()) .withMemory(cr.memoryBytes()) .withMemorySwap(cr.memoryBytes()) .withCpuPeriod(cr.cpuQuota() > 0 ? cr.cpuPeriod() : null) .withCpuQuota(cr.cpuQuota() > 0 ? cr.cpuQuota() : null)); final CreateContainerCmd containerCmd = docker .createContainerCmd(dockerImage.asString()) .withHostConfig(hostConfig) .withName(containerName.asString()) .withLabels(labels) .withEnv(environmentAssignments) .withBinds(volumeBinds) .withUlimits(ulimits) .withCapAdd(new ArrayList<>(addCapabilities)) .withCapDrop(new ArrayList<>(dropCapabilities)) .withPrivileged(privileged); networkMode .filter(mode -> ! mode.toLowerCase().equals("host")) .ifPresent(mode -> containerCmd.withMacAddress(generateMACAddress(hostName, ipv4Address, ipv6Address))); hostName.ifPresent(containerCmd::withHostName); networkMode.ifPresent(containerCmd::withNetworkMode); ipv4Address.ifPresent(containerCmd::withIpv4Address); ipv6Address.ifPresent(containerCmd::withIpv6Address); entrypoint.ifPresent(containerCmd::withEntrypoint); return containerCmd; }
class CreateContainerCommandImpl implements Docker.CreateContainerCommand { private final DockerClient docker; private final DockerImage dockerImage; private final ContainerName containerName; private final Map<String, String> labels = new HashMap<>(); private final List<String> environmentAssignments = new ArrayList<>(); private final List<String> volumeBindSpecs = new ArrayList<>(); private final List<Ulimit> ulimits = new ArrayList<>(); private final Set<Capability> addCapabilities = new HashSet<>(); private final Set<Capability> dropCapabilities = new HashSet<>(); private Optional<String> hostName = Optional.empty(); private Optional<ContainerResources> containerResources = Optional.empty(); private Optional<String> networkMode = Optional.empty(); private Optional<String> ipv4Address = Optional.empty(); private Optional<String> ipv6Address = Optional.empty(); private Optional<String[]> entrypoint = Optional.empty(); private boolean privileged = false; CreateContainerCommandImpl(DockerClient docker, DockerImage dockerImage, ContainerName containerName) { this.docker = docker; this.dockerImage = dockerImage; this.containerName = containerName; } @Override public Docker.CreateContainerCommand withHostName(String hostName) { this.hostName = Optional.of(hostName); return this; } @Override public Docker.CreateContainerCommand withResources(ContainerResources containerResources) { this.containerResources = Optional.of(containerResources); return this; } @Override public Docker.CreateContainerCommand withLabel(String name, String value) { assert !name.contains("="); labels.put(name, value); return this; } public Docker.CreateContainerCommand withManagedBy(String manager) { return withLabel(LABEL_NAME_MANAGEDBY, manager); } @Override public Docker.CreateContainerCommand withAddCapability(String capabilityName) { addCapabilities.add(Capability.valueOf(capabilityName)); return this; } @Override public Docker.CreateContainerCommand withDropCapability(String capabilityName) { dropCapabilities.add(Capability.valueOf(capabilityName)); return this; } @Override public Docker.CreateContainerCommand withPrivileged(boolean privileged) { this.privileged = privileged; return this; } @Override public Docker.CreateContainerCommand withUlimit(String name, int softLimit, int hardLimit) { ulimits.add(new Ulimit(name, softLimit, hardLimit)); return this; } @Override public Docker.CreateContainerCommand withEntrypoint(String... entrypoint) { if (entrypoint.length < 1) throw new IllegalArgumentException("Entrypoint must contain at least 1 element"); this.entrypoint = Optional.of(entrypoint); return this; } @Override public Docker.CreateContainerCommand withEnvironment(String name, String value) { assert name.indexOf('=') == -1; environmentAssignments.add(name + "=" + value); return this; } @Override public Docker.CreateContainerCommand withVolume(Path path, Path volumePath) { volumeBindSpecs.add(path + ":" + volumePath + ":Z"); return this; } @Override public Docker.CreateContainerCommand withSharedVolume(Path path, Path volumePath) { volumeBindSpecs.add(path + ":" + volumePath + ":z"); return this; } @Override public Docker.CreateContainerCommand withNetworkMode(String mode) { networkMode = Optional.of(mode); return this; } @Override public Docker.CreateContainerCommand withIpAddress(InetAddress address) { if (address instanceof Inet6Address) { ipv6Address = Optional.of(address.getHostAddress()); } else { ipv4Address = Optional.of(address.getHostAddress()); } return this; } @Override public void create() { try { createCreateContainerCmd().exec(); } catch (RuntimeException e) { throw new DockerException("Failed to create container " + toString(), e); } } /** Maps ("--env", {"A", "B", "C"}) to "--env A --env B --env C" */ private static String toRepeatedOption(String option, List<String> optionValues) { return optionValues.stream() .map(optionValue -> option + " " + optionValue) .collect(Collectors.joining(" ")); } private static String toOptionalOption(String option, Optional<?> value) { return value.map(o -> option + " " + o).orElse(""); } private static String toFlagOption(String option, boolean value) { return value ? option : ""; } /** Make toString() print the equivalent arguments to 'docker run' */ @Override public String toString() { List<String> labelList = labels.entrySet().stream() .map(entry -> entry.getKey() + "=" + entry.getValue()).collect(Collectors.toList()); List<String> ulimitList = ulimits.stream() .map(ulimit -> ulimit.getName() + "=" + ulimit.getSoft() + ":" + ulimit.getHard()) .collect(Collectors.toList()); List<String> addCapabilitiesList = addCapabilities.stream().map(Enum<Capability>::toString).sorted().collect(Collectors.toList()); List<String> dropCapabilitiesList = dropCapabilities.stream().map(Enum<Capability>::toString).sorted().collect(Collectors.toList()); Optional<String> entrypointExecuteable = entrypoint.map(args -> args[0]); String entrypointArgs = entrypoint.map(Stream::of).orElseGet(Stream::empty) .skip(1) .collect(Collectors.joining(" ")); return Stream.of( "--name " + containerName.asString(), toOptionalOption("--hostname", hostName), toOptionalOption("--cpu-shares", containerResources.map(ContainerResources::cpuShares)), toOptionalOption("--cpus", containerResources.map(ContainerResources::cpus)), toOptionalOption("--memory", containerResources.map(ContainerResources::memoryBytes)), toRepeatedOption("--label", labelList), toRepeatedOption("--ulimit", ulimitList), toRepeatedOption("--env", environmentAssignments), toRepeatedOption("--volume", volumeBindSpecs), toRepeatedOption("--cap-add", addCapabilitiesList), toRepeatedOption("--cap-drop", dropCapabilitiesList), toOptionalOption("--net", networkMode), toOptionalOption("--ip", ipv4Address), toOptionalOption("--ip6", ipv6Address), toOptionalOption("--entrypoint", entrypointExecuteable), toFlagOption("--privileged", privileged), dockerImage.asString(), entrypointArgs) .filter(s -> !s.isEmpty()) .collect(Collectors.joining(" ")); } /** * Generates a pseudo-random MAC address based on the hostname, IPv4- and IPv6-address. */ static String generateMACAddress(Optional<String> hostname, Optional<String> ipv4Address, Optional<String> ipv6Address) { final String seed = hostname.orElse("") + ipv4Address.orElse("") + ipv6Address.orElse(""); Random rand = getPRNG(seed); byte[] macAddr = new byte[6]; rand.nextBytes(macAddr); macAddr[0] = (byte) ((macAddr[0] | 2) & 254); return IntStream.range(0, macAddr.length) .mapToObj(i -> String.format("%02x", macAddr[i])) .collect(Collectors.joining(":")); } private static Random getPRNG(String seed) { try { SecureRandom rand = SecureRandom.getInstance("SHA1PRNG"); rand.setSeed(seed.getBytes()); return rand; } catch (NoSuchAlgorithmException e) { throw new RuntimeException("Failed to get pseudo-random number generator", e); } } }
class CreateContainerCommandImpl implements Docker.CreateContainerCommand { private final DockerClient docker; private final DockerImage dockerImage; private final ContainerName containerName; private final Map<String, String> labels = new HashMap<>(); private final List<String> environmentAssignments = new ArrayList<>(); private final List<String> volumeBindSpecs = new ArrayList<>(); private final List<Ulimit> ulimits = new ArrayList<>(); private final Set<Capability> addCapabilities = new HashSet<>(); private final Set<Capability> dropCapabilities = new HashSet<>(); private Optional<String> hostName = Optional.empty(); private Optional<ContainerResources> containerResources = Optional.empty(); private Optional<String> networkMode = Optional.empty(); private Optional<String> ipv4Address = Optional.empty(); private Optional<String> ipv6Address = Optional.empty(); private Optional<String[]> entrypoint = Optional.empty(); private boolean privileged = false; CreateContainerCommandImpl(DockerClient docker, DockerImage dockerImage, ContainerName containerName) { this.docker = docker; this.dockerImage = dockerImage; this.containerName = containerName; } @Override public Docker.CreateContainerCommand withHostName(String hostName) { this.hostName = Optional.of(hostName); return this; } @Override public Docker.CreateContainerCommand withResources(ContainerResources containerResources) { this.containerResources = Optional.of(containerResources); return this; } @Override public Docker.CreateContainerCommand withLabel(String name, String value) { assert !name.contains("="); labels.put(name, value); return this; } public Docker.CreateContainerCommand withManagedBy(String manager) { return withLabel(LABEL_NAME_MANAGEDBY, manager); } @Override public Docker.CreateContainerCommand withAddCapability(String capabilityName) { addCapabilities.add(Capability.valueOf(capabilityName)); return this; } @Override public Docker.CreateContainerCommand withDropCapability(String capabilityName) { dropCapabilities.add(Capability.valueOf(capabilityName)); return this; } @Override public Docker.CreateContainerCommand withPrivileged(boolean privileged) { this.privileged = privileged; return this; } @Override public Docker.CreateContainerCommand withUlimit(String name, int softLimit, int hardLimit) { ulimits.add(new Ulimit(name, softLimit, hardLimit)); return this; } @Override public Docker.CreateContainerCommand withEntrypoint(String... entrypoint) { if (entrypoint.length < 1) throw new IllegalArgumentException("Entrypoint must contain at least 1 element"); this.entrypoint = Optional.of(entrypoint); return this; } @Override public Docker.CreateContainerCommand withEnvironment(String name, String value) { assert name.indexOf('=') == -1; environmentAssignments.add(name + "=" + value); return this; } @Override public Docker.CreateContainerCommand withVolume(Path path, Path volumePath) { volumeBindSpecs.add(path + ":" + volumePath + ":Z"); return this; } @Override public Docker.CreateContainerCommand withSharedVolume(Path path, Path volumePath) { volumeBindSpecs.add(path + ":" + volumePath + ":z"); return this; } @Override public Docker.CreateContainerCommand withNetworkMode(String mode) { networkMode = Optional.of(mode); return this; } @Override public Docker.CreateContainerCommand withIpAddress(InetAddress address) { if (address instanceof Inet6Address) { ipv6Address = Optional.of(address.getHostAddress()); } else { ipv4Address = Optional.of(address.getHostAddress()); } return this; } @Override public void create() { try { createCreateContainerCmd().exec(); } catch (RuntimeException e) { throw new DockerException("Failed to create container " + toString(), e); } } /** Maps ("--env", {"A", "B", "C"}) to "--env A --env B --env C" */ private static String toRepeatedOption(String option, List<String> optionValues) { return optionValues.stream() .map(optionValue -> option + " " + optionValue) .collect(Collectors.joining(" ")); } private static String toOptionalOption(String option, Optional<?> value) { return value.map(o -> option + " " + o).orElse(""); } private static String toFlagOption(String option, boolean value) { return value ? option : ""; } /** Make toString() print the equivalent arguments to 'docker run' */ @Override public String toString() { List<String> labelList = labels.entrySet().stream() .map(entry -> entry.getKey() + "=" + entry.getValue()).collect(Collectors.toList()); List<String> ulimitList = ulimits.stream() .map(ulimit -> ulimit.getName() + "=" + ulimit.getSoft() + ":" + ulimit.getHard()) .collect(Collectors.toList()); List<String> addCapabilitiesList = addCapabilities.stream().map(Enum<Capability>::toString).sorted().collect(Collectors.toList()); List<String> dropCapabilitiesList = dropCapabilities.stream().map(Enum<Capability>::toString).sorted().collect(Collectors.toList()); Optional<String> entrypointExecuteable = entrypoint.map(args -> args[0]); String entrypointArgs = entrypoint.map(Stream::of).orElseGet(Stream::empty) .skip(1) .collect(Collectors.joining(" ")); return Stream.of( "--name " + containerName.asString(), toOptionalOption("--hostname", hostName), toOptionalOption("--cpu-shares", containerResources.map(ContainerResources::cpuShares)), toOptionalOption("--cpus", containerResources.map(ContainerResources::cpus)), toOptionalOption("--memory", containerResources.map(ContainerResources::memoryBytes)), toRepeatedOption("--label", labelList), toRepeatedOption("--ulimit", ulimitList), toRepeatedOption("--env", environmentAssignments), toRepeatedOption("--volume", volumeBindSpecs), toRepeatedOption("--cap-add", addCapabilitiesList), toRepeatedOption("--cap-drop", dropCapabilitiesList), toOptionalOption("--net", networkMode), toOptionalOption("--ip", ipv4Address), toOptionalOption("--ip6", ipv6Address), toOptionalOption("--entrypoint", entrypointExecuteable), toFlagOption("--privileged", privileged), dockerImage.asString(), entrypointArgs) .filter(s -> !s.isEmpty()) .collect(Collectors.joining(" ")); } /** * Generates a pseudo-random MAC address based on the hostname, IPv4- and IPv6-address. */ static String generateMACAddress(Optional<String> hostname, Optional<String> ipv4Address, Optional<String> ipv6Address) { final String seed = hostname.orElse("") + ipv4Address.orElse("") + ipv6Address.orElse(""); Random rand = getPRNG(seed); byte[] macAddr = new byte[6]; rand.nextBytes(macAddr); macAddr[0] = (byte) ((macAddr[0] | 2) & 254); return IntStream.range(0, macAddr.length) .mapToObj(i -> String.format("%02x", macAddr[i])) .collect(Collectors.joining(":")); } private static Random getPRNG(String seed) { try { SecureRandom rand = SecureRandom.getInstance("SHA1PRNG"); rand.setSeed(seed.getBytes()); return rand; } catch (NoSuchAlgorithmException e) { throw new RuntimeException("Failed to get pseudo-random number generator", e); } } }
'search' is redundant, so remove it.
public void honours_environment_vars() { Element clusterElem = DomBuilderTest.parse( "<jdisc version='1.0'>", " <search/>", " <nodes>", " <environment-variables>", " <KMP_SETTING>1</KMP_SETTING>", " <KMP_AFFINITY>granularity=fine,verbose,compact,1,0</KMP_AFFINITY>", " </environment-variables>", " <node hostalias='mockhost'/>", " </nodes>", "</jdisc>" ); createModel(root, clusterElem); QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder(); root.getConfig(qrStartBuilder, "jdisc/container.0"); QrStartConfig qrStartConfig = new QrStartConfig(qrStartBuilder); assertEquals("KMP_SETTING=1 KMP_AFFINITY=granularity=fine,verbose,compact,1,0 ", qrStartConfig.qrs().env()); }
" <search/>",
public void honours_environment_vars() { Element clusterElem = DomBuilderTest.parse( "<container version='1.0'>", " <nodes>", " <environment-variables>", " <KMP_SETTING>1</KMP_SETTING>", " <KMP_AFFINITY>granularity=fine,verbose,compact,1,0</KMP_AFFINITY>", " </environment-variables>", " <node hostalias='mockhost'/>", " </nodes>", "</container>" ); createModel(root, clusterElem); QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder(); root.getConfig(qrStartBuilder, "container/container.0"); QrStartConfig qrStartConfig = new QrStartConfig(qrStartBuilder); assertEquals("KMP_SETTING=1 KMP_AFFINITY=granularity=fine,verbose,compact,1,0 ", qrStartConfig.qrs().env()); }
class MyLogger implements DeployLogger { List<Pair<Level, String>> msgs = new ArrayList<>(); @Override public void log(Level level, String message) { msgs.add(new Pair<>(level, message)); } }
class MyLogger implements DeployLogger { List<Pair<Level, String>> msgs = new ArrayList<>(); @Override public void log(Level level, String message) { msgs.add(new Pair<>(level, message)); } }
'jdisc' is soon to be deprecated, so use 'container' instead. (I know a lot of tests still use 'jdisc', but it will be fixed.) Verify that the test still works - you probably need to update the configId in the `getConfig` call below.
public void honours_environment_vars() { Element clusterElem = DomBuilderTest.parse( "<jdisc version='1.0'>", " <search/>", " <nodes>", " <environment-variables>", " <KMP_SETTING>1</KMP_SETTING>", " <KMP_AFFINITY>granularity=fine,verbose,compact,1,0</KMP_AFFINITY>", " </environment-variables>", " <node hostalias='mockhost'/>", " </nodes>", "</jdisc>" ); createModel(root, clusterElem); QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder(); root.getConfig(qrStartBuilder, "jdisc/container.0"); QrStartConfig qrStartConfig = new QrStartConfig(qrStartBuilder); assertEquals("KMP_SETTING=1 KMP_AFFINITY=granularity=fine,verbose,compact,1,0 ", qrStartConfig.qrs().env()); }
"<jdisc version='1.0'>",
public void honours_environment_vars() { Element clusterElem = DomBuilderTest.parse( "<container version='1.0'>", " <nodes>", " <environment-variables>", " <KMP_SETTING>1</KMP_SETTING>", " <KMP_AFFINITY>granularity=fine,verbose,compact,1,0</KMP_AFFINITY>", " </environment-variables>", " <node hostalias='mockhost'/>", " </nodes>", "</container>" ); createModel(root, clusterElem); QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder(); root.getConfig(qrStartBuilder, "container/container.0"); QrStartConfig qrStartConfig = new QrStartConfig(qrStartBuilder); assertEquals("KMP_SETTING=1 KMP_AFFINITY=granularity=fine,verbose,compact,1,0 ", qrStartConfig.qrs().env()); }
class MyLogger implements DeployLogger { List<Pair<Level, String>> msgs = new ArrayList<>(); @Override public void log(Level level, String message) { msgs.add(new Pair<>(level, message)); } }
class MyLogger implements DeployLogger { List<Pair<Level, String>> msgs = new ArrayList<>(); @Override public void log(Level level, String message) { msgs.add(new Pair<>(level, message)); } }
Fixed
public void honours_environment_vars() { Element clusterElem = DomBuilderTest.parse( "<jdisc version='1.0'>", " <search/>", " <nodes>", " <environment-variables>", " <KMP_SETTING>1</KMP_SETTING>", " <KMP_AFFINITY>granularity=fine,verbose,compact,1,0</KMP_AFFINITY>", " </environment-variables>", " <node hostalias='mockhost'/>", " </nodes>", "</jdisc>" ); createModel(root, clusterElem); QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder(); root.getConfig(qrStartBuilder, "jdisc/container.0"); QrStartConfig qrStartConfig = new QrStartConfig(qrStartBuilder); assertEquals("KMP_SETTING=1 KMP_AFFINITY=granularity=fine,verbose,compact,1,0 ", qrStartConfig.qrs().env()); }
"<jdisc version='1.0'>",
public void honours_environment_vars() { Element clusterElem = DomBuilderTest.parse( "<container version='1.0'>", " <nodes>", " <environment-variables>", " <KMP_SETTING>1</KMP_SETTING>", " <KMP_AFFINITY>granularity=fine,verbose,compact,1,0</KMP_AFFINITY>", " </environment-variables>", " <node hostalias='mockhost'/>", " </nodes>", "</container>" ); createModel(root, clusterElem); QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder(); root.getConfig(qrStartBuilder, "container/container.0"); QrStartConfig qrStartConfig = new QrStartConfig(qrStartBuilder); assertEquals("KMP_SETTING=1 KMP_AFFINITY=granularity=fine,verbose,compact,1,0 ", qrStartConfig.qrs().env()); }
class MyLogger implements DeployLogger { List<Pair<Level, String>> msgs = new ArrayList<>(); @Override public void log(Level level, String message) { msgs.add(new Pair<>(level, message)); } }
class MyLogger implements DeployLogger { List<Pair<Level, String>> msgs = new ArrayList<>(); @Override public void log(Level level, String message) { msgs.add(new Pair<>(level, message)); } }
Fixed
public void honours_environment_vars() { Element clusterElem = DomBuilderTest.parse( "<jdisc version='1.0'>", " <search/>", " <nodes>", " <environment-variables>", " <KMP_SETTING>1</KMP_SETTING>", " <KMP_AFFINITY>granularity=fine,verbose,compact,1,0</KMP_AFFINITY>", " </environment-variables>", " <node hostalias='mockhost'/>", " </nodes>", "</jdisc>" ); createModel(root, clusterElem); QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder(); root.getConfig(qrStartBuilder, "jdisc/container.0"); QrStartConfig qrStartConfig = new QrStartConfig(qrStartBuilder); assertEquals("KMP_SETTING=1 KMP_AFFINITY=granularity=fine,verbose,compact,1,0 ", qrStartConfig.qrs().env()); }
" <search/>",
public void honours_environment_vars() { Element clusterElem = DomBuilderTest.parse( "<container version='1.0'>", " <nodes>", " <environment-variables>", " <KMP_SETTING>1</KMP_SETTING>", " <KMP_AFFINITY>granularity=fine,verbose,compact,1,0</KMP_AFFINITY>", " </environment-variables>", " <node hostalias='mockhost'/>", " </nodes>", "</container>" ); createModel(root, clusterElem); QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder(); root.getConfig(qrStartBuilder, "container/container.0"); QrStartConfig qrStartConfig = new QrStartConfig(qrStartBuilder); assertEquals("KMP_SETTING=1 KMP_AFFINITY=granularity=fine,verbose,compact,1,0 ", qrStartConfig.qrs().env()); }
class MyLogger implements DeployLogger { List<Pair<Level, String>> msgs = new ArrayList<>(); @Override public void log(Level level, String message) { msgs.add(new Pair<>(level, message)); } }
class MyLogger implements DeployLogger { List<Pair<Level, String>> msgs = new ArrayList<>(); @Override public void log(Level level, String message) { msgs.add(new Pair<>(level, message)); } }
use snapshot instead?
public void setUp() throws ListenFailedException { metrics = TransportMetrics.getInstance(); metrics.reset(); server = new Supervisor(new Transport(crypto)); client = new Supervisor(new Transport(crypto)); acceptor = server.listen(new Spec(0)); target = client.connect(new Spec("localhost", acceptor.port())); server.addMethod(new Method("echo", "*", "*", this, "rpc_echo")); refValues = new Values(); byte[] dataValue = { 1, 2, 3, 4 }; byte[] int8Array = { 1, 2, 3, 4 }; short[] int16Array = { 2, 4, 6, 8 }; int[] int32Array = { 4, 8, 12, 16 }; long[] int64Array = { 8, 16, 24, 32 }; float[] floatArray = { 1.5f, 2.0f, 2.5f, 3.0f }; double[] doubleArray = { 1.25, 1.50, 1.75, 2.00 }; byte[][] dataArray = {{ 1, 0, 1, 0 }, { 0, 2, 0, 2 }, { 3, 0, 3, 0 }, { 0, 4, 0, 4 }}; String[] stringArray = { "one", "two", "three", "four" }; refValues.add(new Int8Value((byte)1)); refValues.add(new Int8Array(int8Array)); refValues.add(new Int16Value((short)2)); refValues.add(new Int16Array(int16Array)); refValues.add(new Int32Value(4)); refValues.add(new Int32Array(int32Array)); refValues.add(new Int64Value(8)); refValues.add(new Int64Array(int64Array)); refValues.add(new FloatValue(2.5f)); refValues.add(new FloatArray(floatArray)); refValues.add(new DoubleValue(3.75)); refValues.add(new DoubleArray(doubleArray)); refValues.add(new DataValue(dataValue)); refValues.add(new DataArray(dataArray)); refValues.add(new StringValue("test")); refValues.add(new StringArray(stringArray)); }
metrics.reset();
public void setUp() throws ListenFailedException { metrics = TransportMetrics.getInstance(); startSnapshot = metrics.snapshot(); server = new Supervisor(new Transport(crypto)); client = new Supervisor(new Transport(crypto)); acceptor = server.listen(new Spec(0)); target = client.connect(new Spec("localhost", acceptor.port())); server.addMethod(new Method("echo", "*", "*", this, "rpc_echo")); refValues = new Values(); byte[] dataValue = { 1, 2, 3, 4 }; byte[] int8Array = { 1, 2, 3, 4 }; short[] int16Array = { 2, 4, 6, 8 }; int[] int32Array = { 4, 8, 12, 16 }; long[] int64Array = { 8, 16, 24, 32 }; float[] floatArray = { 1.5f, 2.0f, 2.5f, 3.0f }; double[] doubleArray = { 1.25, 1.50, 1.75, 2.00 }; byte[][] dataArray = {{ 1, 0, 1, 0 }, { 0, 2, 0, 2 }, { 3, 0, 3, 0 }, { 0, 4, 0, 4 }}; String[] stringArray = { "one", "two", "three", "four" }; refValues.add(new Int8Value((byte)1)); refValues.add(new Int8Array(int8Array)); refValues.add(new Int16Value((short)2)); refValues.add(new Int16Array(int16Array)); refValues.add(new Int32Value(4)); refValues.add(new Int32Array(int32Array)); refValues.add(new Int64Value(8)); refValues.add(new Int64Array(int64Array)); refValues.add(new FloatValue(2.5f)); refValues.add(new FloatArray(floatArray)); refValues.add(new DoubleValue(3.75)); refValues.add(new DoubleArray(doubleArray)); refValues.add(new DataValue(dataValue)); refValues.add(new DataArray(dataArray)); refValues.add(new StringValue("test")); refValues.add(new StringArray(stringArray)); }
class EchoTest { TransportMetrics metrics; Supervisor server; Acceptor acceptor; Supervisor client; Target target; Values refValues; private interface MetricsAssertions { void assertMetrics(TransportMetrics metrics) throws AssertionError; } @Parameter(value = 0) public CryptoEngine crypto; @Parameter(value = 1) public MetricsAssertions metricsAssertions; @Parameters(name = "{0}") public static Object[] engines() { return new Object[][] { { new NullCryptoEngine(), (MetricsAssertions) metrics -> { assertEquals(1, metrics.serverUnencryptedConnectionsEstablished()); assertEquals(1, metrics.clientUnencryptedConnectionsEstablished()); }}, {new XorCryptoEngine(), null}, { new TlsCryptoEngine(createTestTlsContext()), (MetricsAssertions) metrics -> { assertEquals(1, metrics.serverTlsConnectionsEstablished()); assertEquals(1, metrics.clientTlsConnectionsEstablished()); }}, { new MaybeTlsCryptoEngine(new TlsCryptoEngine(createTestTlsContext()), false), (MetricsAssertions) metrics -> { assertEquals(1, metrics.serverUnencryptedConnectionsEstablished()); assertEquals(1, metrics.clientUnencryptedConnectionsEstablished()); }}, { new MaybeTlsCryptoEngine(new TlsCryptoEngine(createTestTlsContext()), true), (MetricsAssertions) metrics -> { assertEquals(1, metrics.serverTlsConnectionsEstablished()); assertEquals(1, metrics.clientTlsConnectionsEstablished()); }}}; } @Before @After public void tearDown() { target.close(); acceptor.shutdown().join(); client.transport().shutdown().join(); server.transport().shutdown().join(); } public void rpc_echo(Request req) { if (!Test.equals(req.parameters(), refValues)) { System.err.println("Parameters does not match reference values"); req.setError(ErrorCode.METHOD_FAILED, "parameter mismatch"); return; } Values p = req.parameters(); Values r = req.returnValues(); for (int i = 0; i < p.size(); i++) { r.add(p.get(i)); } } @org.junit.Test public void testEcho() { Request req = new Request("echo"); Values p = req.parameters(); for (int i = 0; i < refValues.size(); i++) { p.add(refValues.get(i)); } target.invokeSync(req, 60.0); assertTrue(req.checkReturnTypes("bBhHiIlLfFdDxXsS")); assertTrue(Test.equals(req.returnValues(), req.parameters())); assertTrue(Test.equals(req.returnValues(), refValues)); assertTrue(Test.equals(req.parameters(), refValues)); if (metricsAssertions != null) { metricsAssertions.assertMetrics(metrics); } } }
class EchoTest { TransportMetrics metrics; TransportMetrics.Snapshot startSnapshot; Supervisor server; Acceptor acceptor; Supervisor client; Target target; Values refValues; private interface MetricsAssertions { void assertMetrics(TransportMetrics.Snapshot snapshot) throws AssertionError; } @Parameter(value = 0) public CryptoEngine crypto; @Parameter(value = 1) public MetricsAssertions metricsAssertions; @Parameters(name = "{0}") public static Object[] engines() { return new Object[][] { { new NullCryptoEngine(), (MetricsAssertions) metrics -> { assertEquals(1, metrics.serverUnencryptedConnectionsEstablished()); assertEquals(1, metrics.clientUnencryptedConnectionsEstablished()); }}, {new XorCryptoEngine(), null}, { new TlsCryptoEngine(createTestTlsContext()), (MetricsAssertions) metrics -> { assertEquals(1, metrics.serverTlsConnectionsEstablished()); assertEquals(1, metrics.clientTlsConnectionsEstablished()); }}, { new MaybeTlsCryptoEngine(new TlsCryptoEngine(createTestTlsContext()), false), (MetricsAssertions) metrics -> { assertEquals(1, metrics.serverUnencryptedConnectionsEstablished()); assertEquals(1, metrics.clientUnencryptedConnectionsEstablished()); }}, { new MaybeTlsCryptoEngine(new TlsCryptoEngine(createTestTlsContext()), true), (MetricsAssertions) metrics -> { assertEquals(1, metrics.serverTlsConnectionsEstablished()); assertEquals(1, metrics.clientTlsConnectionsEstablished()); }}}; } @Before @After public void tearDown() { target.close(); acceptor.shutdown().join(); client.transport().shutdown().join(); server.transport().shutdown().join(); } public void rpc_echo(Request req) { if (!Test.equals(req.parameters(), refValues)) { System.err.println("Parameters does not match reference values"); req.setError(ErrorCode.METHOD_FAILED, "parameter mismatch"); return; } Values p = req.parameters(); Values r = req.returnValues(); for (int i = 0; i < p.size(); i++) { r.add(p.get(i)); } } @org.junit.Test public void testEcho() { Request req = new Request("echo"); Values p = req.parameters(); for (int i = 0; i < refValues.size(); i++) { p.add(refValues.get(i)); } target.invokeSync(req, 60.0); assertTrue(req.checkReturnTypes("bBhHiIlLfFdDxXsS")); assertTrue(Test.equals(req.returnValues(), req.parameters())); assertTrue(Test.equals(req.returnValues(), refValues)); assertTrue(Test.equals(req.parameters(), refValues)); if (metricsAssertions != null) { metricsAssertions.assertMetrics(metrics.snapshot().changesSince(startSnapshot)); } } }
`.applicationVersion()` never returns `null`. What you want to check for is whether it's `ApplicationVersion.unknown`.
private HttpResponse deploy(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = ZoneId.from(environment, region); Map<String, byte[]> dataParts = new MultipartParser().parse(request); if ( ! dataParts.containsKey("deployOptions")) return ErrorResponse.badRequest("Missing required form part 'deployOptions'"); Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get(); Optional<ApplicationPackage> applicationPackage = Optional.ofNullable(dataParts.get("applicationZip")) .map(ApplicationPackage::new); Inspector sourceRevision = deployOptions.field("sourceRevision"); Inspector buildNumber = deployOptions.field("buildNumber"); if (sourceRevision.valid() != buildNumber.valid()) throw new IllegalArgumentException("Source revision and build number must both be provided, or not"); Optional<ApplicationVersion> applicationVersion = Optional.empty(); if (sourceRevision.valid()) { if (applicationPackage.isPresent()) throw new IllegalArgumentException("Application version and application package can't both be provided."); applicationVersion = Optional.of(ApplicationVersion.from(toSourceRevision(sourceRevision), buildNumber.asLong())); applicationPackage = Optional.of(controller.applications().getApplicationPackage(controller.applications().require(applicationId), applicationVersion.get())); } boolean deployDirectly = deployOptions.field("deployDirectly").asBool(); Optional<Version> vespaVersion = optional("vespaVersion", deployOptions).map(Version::new); if(deployDirectly && !applicationPackage.isPresent() && !applicationVersion.isPresent() && !vespaVersion.isPresent()) { Optional<Deployment> deployment = controller.applications().get(applicationId) .map(Application::deployments) .flatMap(deployments -> Optional.ofNullable(deployments.get(zone))); if(!deployment.isPresent()) throw new IllegalArgumentException("Can't redeploy application, no deployment currently exist"); applicationVersion = Optional.ofNullable(deployment.get().applicationVersion()); if(!applicationVersion.isPresent()) throw new IllegalArgumentException("Can't redeploy application, application version is unknown"); vespaVersion = Optional.ofNullable(deployment.get().version()); applicationPackage = Optional.of(controller.applications().getApplicationPackage(controller.applications().require(applicationId), applicationVersion.get())); } DeployOptions deployOptionsJsonClass = new DeployOptions(deployDirectly, vespaVersion, deployOptions.field("ignoreValidationErrors").asBool(), deployOptions.field("deployCurrentVersion").asBool()); ActivateResult result = controller.applications().deploy(applicationId, zone, applicationPackage, applicationVersion, deployOptionsJsonClass, Optional.of(getUserPrincipal(request).getIdentity())); return new SlimeJsonResponse(toSlime(result)); }
applicationVersion = Optional.ofNullable(deployment.get().applicationVersion());
private HttpResponse deploy(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = ZoneId.from(environment, region); Map<String, byte[]> dataParts = new MultipartParser().parse(request); if ( ! dataParts.containsKey("deployOptions")) return ErrorResponse.badRequest("Missing required form part 'deployOptions'"); Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get(); Optional<ApplicationPackage> applicationPackage = Optional.ofNullable(dataParts.get("applicationZip")) .map(ApplicationPackage::new); Inspector sourceRevision = deployOptions.field("sourceRevision"); Inspector buildNumber = deployOptions.field("buildNumber"); if (sourceRevision.valid() != buildNumber.valid()) throw new IllegalArgumentException("Source revision and build number must both be provided, or not"); Optional<ApplicationVersion> applicationVersion = Optional.empty(); if (sourceRevision.valid()) { if (applicationPackage.isPresent()) throw new IllegalArgumentException("Application version and application package can't both be provided."); applicationVersion = Optional.of(ApplicationVersion.from(toSourceRevision(sourceRevision), buildNumber.asLong())); applicationPackage = Optional.of(controller.applications().getApplicationPackage(controller.applications().require(applicationId), applicationVersion.get())); } boolean deployDirectly = deployOptions.field("deployDirectly").asBool(); Optional<Version> vespaVersion = optional("vespaVersion", deployOptions).map(Version::new); if(deployDirectly && !applicationPackage.isPresent() && !applicationVersion.isPresent() && !vespaVersion.isPresent()) { Optional<Deployment> deployment = controller.applications().get(applicationId) .map(Application::deployments) .flatMap(deployments -> Optional.ofNullable(deployments.get(zone))); if(!deployment.isPresent()) throw new IllegalArgumentException("Can't redeploy application, no deployment currently exist"); ApplicationVersion version = deployment.get().applicationVersion(); if(version.isUnknown()) throw new IllegalArgumentException("Can't redeploy application, application version is unknown"); applicationVersion = Optional.of(version); vespaVersion = Optional.of(deployment.get().version()); applicationPackage = Optional.of(controller.applications().getApplicationPackage(controller.applications().require(applicationId), applicationVersion.get())); } DeployOptions deployOptionsJsonClass = new DeployOptions(deployDirectly, vespaVersion, deployOptions.field("ignoreValidationErrors").asBool(), deployOptions.field("deployCurrentVersion").asBool()); ActivateResult result = controller.applications().deploy(applicationId, zone, applicationPackage, applicationVersion, deployOptionsJsonClass, Optional.of(getUserPrincipal(request).getIdentity())); return new SlimeJsonResponse(toSlime(result)); }
class ApplicationApiHandler extends LoggingRequestHandler { private final Controller controller; private final ZmsClientFacade zmsClient; @Inject public ApplicationApiHandler(LoggingRequestHandler.Context parentCtx, Controller controller, AthenzClientFactory athenzClientFactory) { super(parentCtx); this.controller = controller; this.zmsClient = new ZmsClientFacade(athenzClientFactory.createZmsClient(), athenzClientFactory.getControllerIdentity()); } @Override public Duration getTimeout() { return Duration.ofMinutes(20); } @Override public HttpResponse handle(HttpRequest request) { try { switch (request.getMethod()) { case GET: return handleGET(request); case PUT: return handlePUT(request); case POST: return handlePOST(request); case DELETE: return handleDELETE(request); case OPTIONS: return handleOPTIONS(); default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported"); } } catch (ForbiddenException e) { return ErrorResponse.forbidden(Exceptions.toMessageString(e)); } catch (NotAuthorizedException e) { return ErrorResponse.unauthorized(Exceptions.toMessageString(e)); } catch (NotExistsException e) { return ErrorResponse.notFoundError(Exceptions.toMessageString(e)); } catch (IllegalArgumentException e) { return ErrorResponse.badRequest(Exceptions.toMessageString(e)); } catch (ConfigServerException e) { return ErrorResponse.from(e); } catch (RuntimeException e) { log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e); return ErrorResponse.internalServerError(Exceptions.toMessageString(e)); } } private HttpResponse handleGET(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/")) return root(request); if (path.matches("/application/v4/user")) return authenticatedUser(request); if (path.matches("/application/v4/tenant")) return tenants(request); if (path.matches("/application/v4/tenant-pipeline")) return tenantPipelines(); if (path.matches("/application/v4/athensDomain")) return athenzDomains(request); if (path.matches("/application/v4/property")) return properties(); if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.getUri().getQuery()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/badge")) return badge(path.get("tenant"), path.get("application"), path.get("instance")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/badge/{job}")) return badge(path.get("tenant"), path.get("application"), path.get("instance"), path.get("job"), request.getProperty("historyLength")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePUT(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/user")) return createUser(request); if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePOST(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/promote")) return promoteApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), readToString(request.getData()), false); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), readToString(request.getData()), true); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/jobreport")) return notifyJobCompletion(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return submit(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return trigger(appIdFromPath(path), jobTypeFromPath(path), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return pause(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/promote")) return promoteApplicationDeployment(path.get("tenant"), path.get("application"), path.get("environment"), path.get("region"), path.get("instance"), request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleDELETE(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), "all"); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("choice")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return JobControllerApiHandlerHelper.unregisterResponse(controller.jobController(), path.get("tenant"), path.get("application")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.abortJobResponse(controller.jobController(), appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleOPTIONS() { EmptyJsonResponse response = new EmptyJsonResponse(); response.headers().put("Allow", "GET,PUT,POST,DELETE,OPTIONS"); return response; } private HttpResponse recursiveRoot(HttpRequest request) { Slime slime = new Slime(); Cursor tenantArray = slime.setArray(); for (Tenant tenant : controller.tenants().asList()) toSlime(tenantArray.addObject(), tenant, request, true); return new SlimeJsonResponse(slime); } private HttpResponse root(HttpRequest request) { return recurseOverTenants(request) ? recursiveRoot(request) : new ResourceResponse(request, "user", "tenant", "tenant-pipeline", "athensDomain", "property"); } private HttpResponse authenticatedUser(HttpRequest request) { String userIdString = request.getProperty("userOverride"); if (userIdString == null) userIdString = getUserId(request) .map(UserId::id) .orElseThrow(() -> new ForbiddenException("You must be authenticated or specify userOverride")); UserId userId = new UserId(userIdString); List<Tenant> tenants = controller.tenants().asList(userId); Slime slime = new Slime(); Cursor response = slime.setObject(); response.setString("user", userId.id()); Cursor tenantsArray = response.setArray("tenants"); for (Tenant tenant : tenants) tenantInTenantsListToSlime(tenant, request.getUri(), tenantsArray.addObject()); response.setBool("tenantExists", tenants.stream().anyMatch(tenant -> tenant instanceof UserTenant && ((UserTenant) tenant).is(userId.id()))); return new SlimeJsonResponse(slime); } private HttpResponse tenants(HttpRequest request) { Slime slime = new Slime(); Cursor response = slime.setArray(); for (Tenant tenant : controller.tenants().asList()) tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject()); return new SlimeJsonResponse(slime); } /** Lists the screwdriver project id for each application */ private HttpResponse tenantPipelines() { Slime slime = new Slime(); Cursor response = slime.setObject(); Cursor pipelinesArray = response.setArray("tenantPipelines"); for (Application application : controller.applications().asList()) { if ( ! application.deploymentJobs().projectId().isPresent()) continue; Cursor pipelineObject = pipelinesArray.addObject(); pipelineObject.setString("screwdriverId", String.valueOf(application.deploymentJobs().projectId().getAsLong())); pipelineObject.setString("tenant", application.id().tenant().value()); pipelineObject.setString("application", application.id().application().value()); pipelineObject.setString("instance", application.id().instance().value()); } response.setArray("brokenTenantPipelines"); return new SlimeJsonResponse(slime); } private HttpResponse athenzDomains(HttpRequest request) { Slime slime = new Slime(); Cursor response = slime.setObject(); Cursor array = response.setArray("data"); for (AthenzDomain athenzDomain : controller.getDomainList(request.getProperty("prefix"))) { array.addString(athenzDomain.getName()); } return new SlimeJsonResponse(slime); } private HttpResponse properties() { Slime slime = new Slime(); Cursor response = slime.setObject(); Cursor array = response.setArray("properties"); for (Map.Entry<PropertyId, Property> entry : controller.fetchPropertyList().entrySet()) { Cursor propertyObject = array.addObject(); propertyObject.setString("propertyid", entry.getKey().id()); propertyObject.setString("property", entry.getValue().id()); } return new SlimeJsonResponse(slime); } private HttpResponse tenant(String tenantName, HttpRequest request) { return controller.tenants().tenant(TenantName.from(tenantName)) .map(tenant -> tenant(tenant, request, true)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist")); } private HttpResponse tenant(Tenant tenant, HttpRequest request, boolean listApplications) { Slime slime = new Slime(); toSlime(slime.setObject(), tenant, request, listApplications); return new SlimeJsonResponse(slime); } private HttpResponse applications(String tenantName, HttpRequest request) { TenantName tenant = TenantName.from(tenantName); Slime slime = new Slime(); Cursor array = slime.setArray(); for (Application application : controller.applications().asList(tenant)) toSlime(application, array.addObject(), request); return new SlimeJsonResponse(slime); } private HttpResponse application(String tenantName, String applicationName, HttpRequest request) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, "default"); Application application = controller.applications().get(applicationId) .orElseThrow(() -> new NotExistsException(applicationId + " not found")); Slime slime = new Slime(); toSlime(slime.setObject(), application, request); return new SlimeJsonResponse(slime); } private HttpResponse logs(String tenantName, String applicationName, String instanceName, String environment, String region, String query) { ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = ZoneId.from(environment, region); DeploymentId deployment = new DeploymentId(application, zone); HashMap<String, String> queryParameters = getParameters(query); Optional<Logs> response = controller.configServer().getLogs(deployment, queryParameters); Slime slime = new Slime(); Cursor object = slime.setObject(); if (response.isPresent()) { response.get().logs().entrySet().stream().forEach(entry -> object.setString(entry.getKey(), entry.getValue())); } return new SlimeJsonResponse(slime); } private HttpResponse trigger(ApplicationId id, JobType type, HttpRequest request) { String triggered = controller.applications().deploymentTrigger() .forceTrigger(id, type, request.getJDiscRequest().getUserPrincipal().getName()) .stream().map(JobType::jobName).collect(joining(", ")); return new MessageResponse(triggered.isEmpty() ? "Job " + type.jobName() + " for " + id + " not triggered" : "Triggered " + triggered + " for " + id); } private HttpResponse pause(ApplicationId id, JobType type) { Instant until = controller.clock().instant().plus(DeploymentTrigger.maxPause); controller.applications().deploymentTrigger().pauseJob(id, type, until); return new MessageResponse(type.jobName() + " for " + id + " paused for " + DeploymentTrigger.maxPause); } private HashMap<String, String> getParameters(String query) { HashMap<String, String> keyValPair = new HashMap<>(); Arrays.stream(query.split("&")).forEach(pair -> { String[] splitPair = pair.split("="); keyValPair.put(splitPair[0], splitPair[1]); }); return keyValPair; } private void toSlime(Cursor object, Application application, HttpRequest request) { object.setString("application", application.id().application().value()); object.setString("instance", application.id().instance().value()); object.setString("deployments", withPath("/application/v4" + "/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value() + "/instance/" + application.id().instance().value() + "/job/", request.getUri()).toString()); application.deploymentJobs().statusOf(JobType.component) .flatMap(status -> status.lastSuccess()) .map(run -> run.application().source()) .ifPresent(source -> sourceRevisionToSlime(source, object.setObject("source"))); application.deploymentJobs().projectId() .ifPresent(id -> object.setLong("projectId", id)); if (application.change().isPresent()) { toSlime(object.setObject("deploying"), application.change()); } if (application.outstandingChange().isPresent()) { toSlime(object.setObject("outstandingChange"), application.outstandingChange()); } List<JobStatus> jobStatus = controller.applications().deploymentTrigger() .steps(application.deploymentSpec()) .sortedJobs(application.deploymentJobs().jobStatus().values()); object.setBool("deployedInternally", application.deploymentJobs().deployedInternally()); Cursor deploymentsArray = object.setArray("deploymentJobs"); for (JobStatus job : jobStatus) { Cursor jobObject = deploymentsArray.addObject(); jobObject.setString("type", job.type().jobName()); jobObject.setBool("success", job.isSuccess()); job.lastTriggered().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastTriggered"))); job.lastCompleted().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastCompleted"))); job.firstFailing().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("firstFailing"))); job.lastSuccess().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastSuccess"))); } Cursor changeBlockers = object.setArray("changeBlockers"); application.deploymentSpec().changeBlocker().forEach(changeBlocker -> { Cursor changeBlockerObject = changeBlockers.addObject(); changeBlockerObject.setBool("versions", changeBlocker.blocksVersions()); changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions()); changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId()); Cursor days = changeBlockerObject.setArray("days"); changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong); Cursor hours = changeBlockerObject.setArray("hours"); changeBlocker.window().hours().forEach(hours::addLong); }); object.setString("compileVersion", controller.applications().oldestInstalledPlatform(application.id()).toFullString()); Cursor globalRotationsArray = object.setArray("globalRotations"); application.globalDnsName(controller.system()).ifPresent(rotation -> { globalRotationsArray.addString(rotation.url().toString()); globalRotationsArray.addString(rotation.secureUrl().toString()); globalRotationsArray.addString(rotation.oathUrl().toString()); object.setString("rotationId", application.rotation().get().asString()); }); List<Deployment> deployments = controller.applications().deploymentTrigger() .steps(application.deploymentSpec()) .sortedDeployments(application.deployments().values()); Cursor instancesArray = object.setArray("instances"); for (Deployment deployment : deployments) { Cursor deploymentObject = instancesArray.addObject(); deploymentObject.setString("environment", deployment.zone().environment().value()); deploymentObject.setString("region", deployment.zone().region().value()); deploymentObject.setString("instance", application.id().instance().value()); if (application.rotation().isPresent() && deployment.zone().environment() == Environment.prod) { toSlime(application.rotationStatus(deployment), deploymentObject); } if (recurseOverDeployments(request)) toSlime(deploymentObject, new DeploymentId(application.id(), deployment.zone()), deployment, request); else deploymentObject.setString("url", withPath(request.getUri().getPath() + "/environment/" + deployment.zone().environment().value() + "/region/" + deployment.zone().region().value() + "/instance/" + application.id().instance().value(), request.getUri()).toString()); } Cursor metricsObject = object.setObject("metrics"); metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality()); metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality()); Cursor activity = object.setObject("activity"); application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value())); application.owner().ifPresent(owner -> object.setString("owner", owner.username())); application.deploymentJobs().issueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value())); } private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); Application application = controller.applications().get(id) .orElseThrow(() -> new NotExistsException(id + " not found")); DeploymentId deploymentId = new DeploymentId(application.id(), ZoneId.from(environment, region)); Deployment deployment = application.deployments().get(deploymentId.zoneId()); if (deployment == null) throw new NotExistsException(application + " is not deployed in " + deploymentId.zoneId()); Slime slime = new Slime(); toSlime(slime.setObject(), deploymentId, deployment, request); return new SlimeJsonResponse(slime); } private void toSlime(Cursor object, Change change) { change.platform().ifPresent(version -> object.setString("version", version.toString())); change.application() .filter(version -> !version.isUnknown()) .ifPresent(version -> toSlime(version, object.setObject("revision"))); } private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) { Cursor serviceUrlArray = response.setArray("serviceUrls"); controller.applications().getDeploymentEndpoints(deploymentId) .ifPresent(endpoints -> endpoints.forEach(endpoint -> serviceUrlArray.addString(endpoint.toString()))); response.setString("nodes", withPath("/zone/v2/" + deploymentId.zoneId().environment() + "/" + deploymentId.zoneId().region() + "/nodes/v2/node/?&recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString()); controller.zoneRegistry().getLogServerUri(deploymentId) .ifPresent(elkUrl -> response.setString("elkUrl", elkUrl.toString())); response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString()); response.setString("version", deployment.version().toFullString()); response.setString("revision", deployment.applicationVersion().id()); response.setLong("deployTimeEpochMs", deployment.at().toEpochMilli()); controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zoneId()) .ifPresent(deploymentTimeToLive -> response.setLong("expiryTimeEpochMs", deployment.at().plus(deploymentTimeToLive).toEpochMilli())); controller.applications().require(deploymentId.applicationId()).deploymentJobs().projectId() .ifPresent(i -> response.setString("screwdriverId", String.valueOf(i))); sourceRevisionToSlime(deployment.applicationVersion().source(), response); Cursor activity = response.setObject("activity"); deployment.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); deployment.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); deployment.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); deployment.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); DeploymentCost appCost = deployment.calculateCost(); Cursor costObject = response.setObject("cost"); toSlime(appCost, costObject); DeploymentMetrics metrics = deployment.metrics(); Cursor metricsObject = response.setObject("metrics"); metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond()); metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond()); metricsObject.setDouble("documentCount", metrics.documentCount()); metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis()); metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis()); metrics.instant().ifPresent(instant -> metricsObject.setLong("lastUpdated", instant.toEpochMilli())); } private void toSlime(ApplicationVersion applicationVersion, Cursor object) { if (!applicationVersion.isUnknown()) { object.setString("hash", applicationVersion.id()); sourceRevisionToSlime(applicationVersion.source(), object.setObject("source")); } } private void sourceRevisionToSlime(Optional<SourceRevision> revision, Cursor object) { if ( ! revision.isPresent()) return; object.setString("gitRepository", revision.get().repository()); object.setString("gitBranch", revision.get().branch()); object.setString("gitCommit", revision.get().commit()); } private void toSlime(RotationStatus status, Cursor object) { Cursor bcpStatus = object.setObject("bcpStatus"); bcpStatus.setString("rotationStatus", status.name().toUpperCase()); } private URI monitoringSystemUri(DeploymentId deploymentId) { return controller.zoneRegistry().getMonitoringSystemUri(deploymentId); } private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) { Application application = controller.applications().require(ApplicationId.from(tenantName, applicationName, instanceName)); ZoneId zone = ZoneId.from(environment, region); Deployment deployment = application.deployments().get(zone); if (deployment == null) { throw new NotExistsException(application + " has no deployment in " + zone); } Inspector requestData = toSlime(request.getData()).get(); String reason = mandatory("reason", requestData).asString(); String agent = getUserPrincipal(request).getIdentity().getFullName(); long timestamp = controller.clock().instant().getEpochSecond(); EndpointStatus.Status status = inService ? EndpointStatus.Status.in : EndpointStatus.Status.out; EndpointStatus endpointStatus = new EndpointStatus(status, reason, agent, timestamp); controller.applications().setGlobalRotationStatus(new DeploymentId(application.id(), deployment.zone()), endpointStatus); return new MessageResponse(String.format("Successfully set %s in %s.%s %s service", application.id().toShortString(), deployment.zone().environment().value(), deployment.zone().region().value(), inService ? "in" : "out of")); } private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), ZoneId.from(environment, region)); Slime slime = new Slime(); Cursor array = slime.setObject().setArray("globalrotationoverride"); Map<RoutingEndpoint, EndpointStatus> status = controller.applications().globalRotationStatus(deploymentId); for (RoutingEndpoint endpoint : status.keySet()) { EndpointStatus currentStatus = status.get(endpoint); array.addString(endpoint.upstreamName()); Cursor statusObject = array.addObject(); statusObject.setString("status", currentStatus.getStatus().name()); statusObject.setString("reason", currentStatus.getReason() == null ? "" : currentStatus.getReason()); statusObject.setString("agent", currentStatus.getAgent() == null ? "" : currentStatus.getAgent()); statusObject.setLong("timestamp", currentStatus.getEpoch()); } return new SlimeJsonResponse(slime); } private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); Application application = controller.applications().require(applicationId); ZoneId zone = ZoneId.from(environment, region); if (!application.rotation().isPresent()) { throw new NotExistsException("global rotation does not exist for " + application); } Deployment deployment = application.deployments().get(zone); if (deployment == null) { throw new NotExistsException(application + " has no deployment in " + zone); } Slime slime = new Slime(); Cursor response = slime.setObject(); toSlime(application.rotationStatus(deployment), response); return new SlimeJsonResponse(slime); } private HttpResponse suspended(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), ZoneId.from(environment, region)); boolean suspended = controller.applications().isSuspended(deploymentId); Slime slime = new Slime(); Cursor response = slime.setObject(); response.setBool("suspended", suspended); return new SlimeJsonResponse(slime); } private HttpResponse services(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationView applicationView = controller.getApplicationView(tenantName, applicationName, instanceName, environment, region); ServiceApiResponse response = new ServiceApiResponse(ZoneId.from(environment, region), new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(), controller.zoneRegistry().getConfigServerApiUris(ZoneId.from(environment, region)), request.getUri()); response.setResponse(applicationView); return response; } private HttpResponse service(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String restPath, HttpRequest request) { Map<?,?> result = controller.getServiceApiResponse(tenantName, applicationName, instanceName, environment, region, serviceName, restPath); ServiceApiResponse response = new ServiceApiResponse(ZoneId.from(environment, region), new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(), controller.zoneRegistry().getConfigServerApiUris(ZoneId.from(environment, region)), request.getUri()); response.setResponse(result, serviceName, restPath); return response; } private HttpResponse createUser(HttpRequest request) { Optional<UserId> user = getUserId(request); if ( ! user.isPresent() ) throw new ForbiddenException("Not authenticated or not an user."); String username = UserTenant.normalizeUser(user.get().id()); try { controller.tenants().create(UserTenant.create(username)); return new MessageResponse("Created user '" + username + "'"); } catch (AlreadyExistsException e) { return new MessageResponse("User '" + username + "' already exists"); } } private HttpResponse updateTenant(String tenantName, HttpRequest request) { Optional<AthenzTenant> tenant = controller.tenants().athenzTenant(TenantName.from(tenantName)); if ( ! tenant.isPresent()) return ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"); Inspector requestData = toSlime(request.getData()).get(); OktaAccessToken token = requireOktaAccessToken(request, "Could not update " + tenantName); controller.tenants().lockOrThrow(tenant.get().name(), lockedTenant -> { lockedTenant = lockedTenant.with(new Property(mandatory("property", requestData).asString())); lockedTenant = controller.tenants().withDomain( lockedTenant, new AthenzDomain(mandatory("athensDomain", requestData).asString()), token ); Optional<PropertyId> propertyId = optional("propertyId", requestData).map(PropertyId::new); if (propertyId.isPresent()) { lockedTenant = lockedTenant.with(propertyId.get()); } controller.tenants().store(lockedTenant); }); return tenant(controller.tenants().requireAthenzTenant(tenant.get().name()), request, true); } private HttpResponse createTenant(String tenantName, HttpRequest request) { Inspector requestData = toSlime(request.getData()).get(); AthenzTenant tenant = AthenzTenant.create(TenantName.from(tenantName), new AthenzDomain(mandatory("athensDomain", requestData).asString()), new Property(mandatory("property", requestData).asString()), optional("propertyId", requestData).map(PropertyId::new)); throwIfNotAthenzDomainAdmin(tenant.domain(), request); controller.tenants().create(tenant, requireOktaAccessToken(request, "Could not create " + tenantName)); return tenant(tenant, request, true); } private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) { Application application; try { application = controller.applications().createApplication(ApplicationId.from(tenantName, applicationName, "default"), getOktaAccessToken(request)); } catch (ZmsClientException e) { if (e.getErrorCode() == com.yahoo.jdisc.Response.Status.FORBIDDEN) throw new ForbiddenException("Not authorized to create application", e); else throw e; } Slime slime = new Slime(); toSlime(application, slime.setObject(), request); return new SlimeJsonResponse(slime); } /** * Trigger deployment of the given Vespa version if a valid one is given, e.g., "7.8.9", * optionally pinning to that version if. */ private HttpResponse deployPlatform(String tenantName, String applicationName, String versionString, boolean pin) { ApplicationId id = ApplicationId.from(tenantName, applicationName, "default"); StringBuilder response = new StringBuilder(); controller.applications().lockOrThrow(id, application -> { Version version = Version.fromString(versionString); if (version.equals(Version.emptyVersion)) version = controller.systemVersion(); if ( ! systemHasVersion(version)) throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " + "Version is not active in this system. " + "Active versions: " + controller.versionStatus().versions() .stream() .map(VespaVersion::versionNumber) .map(Version::toString) .collect(joining(", "))); Change change = Change.of(version); if (pin) change = change.withPin(); controller.applications().deploymentTrigger().forceChange(id, change); response.append("Triggered " + change + " for " + id); }); return new MessageResponse(response.toString()); } /** Trigger deployment to the last known application package for the given application. */ private HttpResponse deployApplication(String tenantName, String applicationName) { ApplicationId id = ApplicationId.from(tenantName, applicationName, "default"); StringBuilder response = new StringBuilder(); controller.applications().lockOrThrow(id, application -> { Change change = Change.of(application.get().deploymentJobs().statusOf(JobType.component).get().lastSuccess().get().application()); controller.applications().deploymentTrigger().forceChange(id, change); response.append("Triggered " + change + " for " + id); }); return new MessageResponse(response.toString()); } /** Cancel ongoing change for given application, e.g., everything with {"cancel":"all"} */ private HttpResponse cancelDeploy(String tenantName, String applicationName, String choice) { ApplicationId id = ApplicationId.from(tenantName, applicationName, "default"); StringBuilder response = new StringBuilder(); controller.applications().lockOrThrow(id, application -> { Change change = application.get().change(); if ( ! change.isPresent() && ! change.isPinned()) { response.append("No deployment in progress for " + application + " at this time"); return; } ChangesToCancel cancel = ChangesToCancel.valueOf(choice.toUpperCase()); controller.applications().deploymentTrigger().cancelChange(id, cancel); response.append("Changed deployment from '" + change + "' to '" + controller.applications().require(id).change() + "' for " + application); }); return new MessageResponse(response.toString()); } /** Schedule restart of deployment, or specific host in a deployment */ private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), ZoneId.from(environment, region)); Optional<Hostname> hostname = Optional.ofNullable(request.getProperty("hostname")).map(Hostname::new); controller.applications().restart(deploymentId, hostname); return new StringResponse("Requested restart of " + path(TenantResource.API_PATH, tenantName, ApplicationResource.API_PATH, applicationName, EnvironmentResource.API_PATH, environment, "region", region, "instance", instanceName)); } private HttpResponse deleteTenant(String tenantName, HttpRequest request) { Optional<Tenant> tenant = controller.tenants().tenant(tenantName); if ( ! tenant.isPresent()) return ErrorResponse.notFoundError("Could not delete tenant '" + tenantName + "': Tenant not found"); if (tenant.get() instanceof AthenzTenant) { controller.tenants().deleteTenant((AthenzTenant) tenant.get(), requireOktaAccessToken(request, "Could not delete " + tenantName)); } else if (tenant.get() instanceof UserTenant) { controller.tenants().deleteTenant((UserTenant) tenant.get()); } else { throw new IllegalArgumentException("Unknown tenant type:" + tenant.get().getClass().getSimpleName() + ", for " + tenant.get()); } return tenant(tenant.get(), request, false); } private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, "default"); controller.applications().deleteApplication(id, getOktaAccessToken(request)); return new EmptyJsonResponse(); } private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { Application application = controller.applications().require(ApplicationId.from(tenantName, applicationName, instanceName)); controller.applications().deactivate(application.id(), ZoneId.from(environment, region)); return new StringResponse("Deactivated " + path(TenantResource.API_PATH, tenantName, ApplicationResource.API_PATH, applicationName, EnvironmentResource.API_PATH, environment, "region", region, "instance", instanceName)); } /** * Promote application Chef environments. To be used by component jobs only */ private HttpResponse promoteApplication(String tenantName, String applicationName, HttpRequest request) { try{ ApplicationChefEnvironment chefEnvironment = new ApplicationChefEnvironment(controller.system()); String sourceEnvironment = chefEnvironment.systemChefEnvironment(); String targetEnvironment = chefEnvironment.applicationSourceEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName)); controller.chefClient().copyChefEnvironment(sourceEnvironment, targetEnvironment); return new MessageResponse(String.format("Successfully copied environment %s to %s", sourceEnvironment, targetEnvironment)); } catch (Exception e) { log.log(LogLevel.ERROR, String.format("Error during Chef copy environment. (%s.%s)", tenantName, applicationName), e); return ErrorResponse.internalServerError("Unable to promote Chef environments for application"); } } /** * Promote application Chef environments for jobs that deploy applications */ private HttpResponse promoteApplicationDeployment(String tenantName, String applicationName, String environmentName, String regionName, String instanceName, HttpRequest request) { try { ApplicationChefEnvironment chefEnvironment = new ApplicationChefEnvironment(controller.system()); String sourceEnvironment = chefEnvironment.applicationSourceEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName)); String targetEnvironment = chefEnvironment.applicationTargetEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName), Environment.from(environmentName), RegionName.from(regionName)); controller.chefClient().copyChefEnvironment(sourceEnvironment, targetEnvironment); return new MessageResponse(String.format("Successfully copied environment %s to %s", sourceEnvironment, targetEnvironment)); } catch (Exception e) { log.log(LogLevel.ERROR, String.format("Error during Chef copy environment. (%s.%s %s.%s)", tenantName, applicationName, environmentName, regionName), e); return ErrorResponse.internalServerError("Unable to promote Chef environments for application"); } } private HttpResponse notifyJobCompletion(String tenant, String application, HttpRequest request) { try { DeploymentJobs.JobReport report = toJobReport(tenant, application, toSlime(request.getData()).get()); if ( report.jobType() == JobType.component && controller.applications().require(report.applicationId()).deploymentJobs().deployedInternally()) throw new IllegalArgumentException(report.applicationId() + " is set up to be deployed from internally, and no " + "longer accepts submissions from Screwdriver v3 jobs. If you need to revert " + "to the old pipeline, please file a ticket at yo/vespa-support and request this."); controller.applications().deploymentTrigger().notifyOfCompletion(report); return new MessageResponse("ok"); } catch (IllegalStateException e) { return ErrorResponse.badRequest(Exceptions.toMessageString(e)); } } /** Returns a URI which points to an overview badge for the given application. */ private HttpResponse badge(String tenant, String application, String instance) { URI location = controller.jobController().overviewBadge(ApplicationId.from(tenant, application, instance)); return redirect(location); } /** Returns a URI which points to a history badge for the given application and job type. */ private HttpResponse badge(String tenant, String application, String instance, String jobName, String historyLength) { URI location = controller.jobController().historicBadge(ApplicationId.from(tenant, application, instance), JobType.fromJobName(jobName), historyLength == null ? 5 : Math.min(32, Math.max(0, Integer.parseInt(historyLength)))); return redirect(location); } private static HttpResponse redirect(URI location) { HttpResponse httpResponse = new HttpResponse(Response.Status.FOUND) { @Override public void render(OutputStream outputStream) { } }; httpResponse.headers().add("Location", location.toString()); return httpResponse; } private static DeploymentJobs.JobReport toJobReport(String tenantName, String applicationName, Inspector report) { Optional<DeploymentJobs.JobError> jobError = Optional.empty(); if (report.field("jobError").valid()) { jobError = Optional.of(DeploymentJobs.JobError.valueOf(report.field("jobError").asString())); } ApplicationId id = ApplicationId.from(tenantName, applicationName, report.field("instance").asString()); JobType type = JobType.fromJobName(report.field("jobName").asString()); long buildNumber = report.field("buildNumber").asLong(); if (type == JobType.component) return DeploymentJobs.JobReport.ofComponent(id, report.field("projectId").asLong(), buildNumber, jobError, toSourceRevision(report.field("sourceRevision"))); else return DeploymentJobs.JobReport.ofJob(id, type, buildNumber, jobError); } private static SourceRevision toSourceRevision(Inspector object) { if (!object.field("repository").valid() || !object.field("branch").valid() || !object.field("commit").valid()) { throw new IllegalArgumentException("Must specify \"repository\", \"branch\", and \"commit\"."); } return new SourceRevision(object.field("repository").asString(), object.field("branch").asString(), object.field("commit").asString()); } private Tenant getTenantOrThrow(String tenantName) { return controller.tenants().tenant(tenantName) .orElseThrow(() -> new NotExistsException(new TenantId(tenantName))); } private void toSlime(Cursor object, Tenant tenant, HttpRequest request, boolean listApplications) { object.setString("tenant", tenant.name().value()); object.setString("type", tentantType(tenant)); if (tenant instanceof AthenzTenant) { AthenzTenant athenzTenant = (AthenzTenant) tenant; object.setString("athensDomain", athenzTenant.domain().getName()); object.setString("property", athenzTenant.property().id()); athenzTenant.propertyId().ifPresent(id -> object.setString("propertyId", id.toString())); } Cursor applicationArray = object.setArray("applications"); if (listApplications) { for (Application application : controller.applications().asList(tenant.name())) { if (application.id().instance().isDefault()) { if (recurseOverApplications(request)) toSlime(applicationArray.addObject(), application, request); else toSlime(application, applicationArray.addObject(), request); } } } if (tenant instanceof AthenzTenant) { AthenzTenant athenzTenant = (AthenzTenant) tenant; athenzTenant.contact().ifPresent(c -> { object.setString("propertyUrl", c.propertyUrl().toString()); object.setString("contactsUrl", c.url().toString()); object.setString("issueCreationUrl", c.issueTrackerUrl().toString()); Cursor contactsArray = object.setArray("contacts"); c.persons().forEach(persons -> { Cursor personArray = contactsArray.addArray(); persons.forEach(personArray::addString); }); }); } } private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) { object.setString("tenant", tenant.name().value()); Cursor metaData = object.setObject("metaData"); metaData.setString("type", tentantType(tenant)); if (tenant instanceof AthenzTenant) { AthenzTenant athenzTenant = (AthenzTenant) tenant; metaData.setString("athensDomain", athenzTenant.domain().getName()); metaData.setString("property", athenzTenant.property().id()); } object.setString("url", withPath("/application/v4/tenant/" + tenant.name().value(), requestURI).toString()); } /** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */ private URI withPath(String newPath, URI uri) { try { return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, null, null); } catch (URISyntaxException e) { throw new RuntimeException("Will not happen", e); } } private long asLong(String valueOrNull, long defaultWhenNull) { if (valueOrNull == null) return defaultWhenNull; try { return Long.parseLong(valueOrNull); } catch (NumberFormatException e) { throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'"); } } private void toSlime(JobStatus.JobRun jobRun, Cursor object) { object.setLong("id", jobRun.id()); object.setString("version", jobRun.platform().toFullString()); if (!jobRun.application().isUnknown()) toSlime(jobRun.application(), object.setObject("revision")); object.setString("reason", jobRun.reason()); object.setLong("at", jobRun.at().toEpochMilli()); } private Slime toSlime(InputStream jsonStream) { try { byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000); return SlimeUtils.jsonToSlime(jsonBytes); } catch (IOException e) { throw new RuntimeException(); } } private void throwIfNotAthenzDomainAdmin(AthenzDomain tenantDomain, HttpRequest request) { AthenzIdentity identity = getUserPrincipal(request).getIdentity(); boolean isDomainAdmin = zmsClient.isDomainAdmin(identity, tenantDomain); if ( ! isDomainAdmin) { throw new ForbiddenException( String.format("The user '%s' is not admin in Athenz domain '%s'", identity.getFullName(), tenantDomain.getName())); } } private static Optional<UserId> getUserId(HttpRequest request) { return Optional.of(getUserPrincipal(request)) .map(AthenzPrincipal::getIdentity) .filter(AthenzUser.class::isInstance) .map(AthenzUser.class::cast) .map(AthenzUser::getName) .map(UserId::new); } private static AthenzPrincipal getUserPrincipal(HttpRequest request) { Principal principal = request.getJDiscRequest().getUserPrincipal(); if (principal == null) throw new InternalServerErrorException("Expected a user principal"); if (!(principal instanceof AthenzPrincipal)) throw new InternalServerErrorException( String.format("Expected principal of type %s, got %s", AthenzPrincipal.class.getSimpleName(), principal.getClass().getName())); return (AthenzPrincipal) principal; } private Inspector mandatory(String key, Inspector object) { if ( ! object.field(key).valid()) throw new IllegalArgumentException("'" + key + "' is missing"); return object.field(key); } private Optional<String> optional(String key, Inspector object) { return SlimeUtils.optionalString(object.field(key)); } private static String path(Object... elements) { return Joiner.on("/").join(elements); } private void toSlime(Application application, Cursor object, HttpRequest request) { object.setString("application", application.id().application().value()); object.setString("instance", application.id().instance().value()); object.setString("url", withPath("/application/v4/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value(), request.getUri()).toString()); } private Slime toSlime(ActivateResult result) { Slime slime = new Slime(); Cursor object = slime.setObject(); object.setString("revisionId", result.revisionId().id()); object.setLong("applicationZipSize", result.applicationZipSizeBytes()); Cursor logArray = object.setArray("prepareMessages"); if (result.prepareResponse().log != null) { for (Log logMessage : result.prepareResponse().log) { Cursor logObject = logArray.addObject(); logObject.setLong("time", logMessage.time); logObject.setString("level", logMessage.level); logObject.setString("message", logMessage.message); } } Cursor changeObject = object.setObject("configChangeActions"); Cursor restartActionsArray = changeObject.setArray("restart"); for (RestartAction restartAction : result.prepareResponse().configChangeActions.restartActions) { Cursor restartActionObject = restartActionsArray.addObject(); restartActionObject.setString("clusterName", restartAction.clusterName); restartActionObject.setString("clusterType", restartAction.clusterType); restartActionObject.setString("serviceType", restartAction.serviceType); serviceInfosToSlime(restartAction.services, restartActionObject.setArray("services")); stringsToSlime(restartAction.messages, restartActionObject.setArray("messages")); } Cursor refeedActionsArray = changeObject.setArray("refeed"); for (RefeedAction refeedAction : result.prepareResponse().configChangeActions.refeedActions) { Cursor refeedActionObject = refeedActionsArray.addObject(); refeedActionObject.setString("name", refeedAction.name); refeedActionObject.setBool("allowed", refeedAction.allowed); refeedActionObject.setString("documentType", refeedAction.documentType); refeedActionObject.setString("clusterName", refeedAction.clusterName); serviceInfosToSlime(refeedAction.services, refeedActionObject.setArray("services")); stringsToSlime(refeedAction.messages, refeedActionObject.setArray("messages")); } return slime; } private void serviceInfosToSlime(List<ServiceInfo> serviceInfoList, Cursor array) { for (ServiceInfo serviceInfo : serviceInfoList) { Cursor serviceInfoObject = array.addObject(); serviceInfoObject.setString("serviceName", serviceInfo.serviceName); serviceInfoObject.setString("serviceType", serviceInfo.serviceType); serviceInfoObject.setString("configId", serviceInfo.configId); serviceInfoObject.setString("hostName", serviceInfo.hostName); } } private void stringsToSlime(List<String> strings, Cursor array) { for (String string : strings) array.addString(string); } private String readToString(InputStream stream) { Scanner scanner = new Scanner(stream).useDelimiter("\\A"); if ( ! scanner.hasNext()) return null; return scanner.next(); } private boolean systemHasVersion(Version version) { return controller.versionStatus().versions().stream().anyMatch(v -> v.versionNumber().equals(version)); } public static void toSlime(DeploymentCost deploymentCost, Cursor object) { object.setLong("tco", (long)deploymentCost.getTco()); object.setLong("waste", (long)deploymentCost.getWaste()); object.setDouble("utilization", deploymentCost.getUtilization()); Cursor clustersObject = object.setObject("cluster"); for (Map.Entry<String, ClusterCost> clusterEntry : deploymentCost.getCluster().entrySet()) toSlime(clusterEntry.getValue(), clustersObject.setObject(clusterEntry.getKey())); } private static void toSlime(ClusterCost clusterCost, Cursor object) { object.setLong("count", clusterCost.getClusterInfo().getHostnames().size()); object.setString("resource", getResourceName(clusterCost.getResultUtilization())); object.setDouble("utilization", clusterCost.getResultUtilization().getMaxUtilization()); object.setLong("tco", (int)clusterCost.getTco()); object.setLong("waste", (int)clusterCost.getWaste()); object.setString("flavor", clusterCost.getClusterInfo().getFlavor()); object.setDouble("flavorCost", clusterCost.getClusterInfo().getFlavorCost()); object.setDouble("flavorCpu", clusterCost.getClusterInfo().getFlavorCPU()); object.setDouble("flavorMem", clusterCost.getClusterInfo().getFlavorMem()); object.setDouble("flavorDisk", clusterCost.getClusterInfo().getFlavorDisk()); object.setString("type", clusterCost.getClusterInfo().getClusterType().name()); Cursor utilObject = object.setObject("util"); utilObject.setDouble("cpu", clusterCost.getResultUtilization().getCpu()); utilObject.setDouble("mem", clusterCost.getResultUtilization().getMemory()); utilObject.setDouble("disk", clusterCost.getResultUtilization().getDisk()); utilObject.setDouble("diskBusy", clusterCost.getResultUtilization().getDiskBusy()); Cursor usageObject = object.setObject("usage"); usageObject.setDouble("cpu", clusterCost.getSystemUtilization().getCpu()); usageObject.setDouble("mem", clusterCost.getSystemUtilization().getMemory()); usageObject.setDouble("disk", clusterCost.getSystemUtilization().getDisk()); usageObject.setDouble("diskBusy", clusterCost.getSystemUtilization().getDiskBusy()); Cursor hostnamesArray = object.setArray("hostnames"); for (String hostname : clusterCost.getClusterInfo().getHostnames()) hostnamesArray.addString(hostname); } private static String getResourceName(ClusterUtilization utilization) { String name = "cpu"; double max = utilization.getMaxUtilization(); if (utilization.getMemory() == max) { name = "mem"; } else if (utilization.getDisk() == max) { name = "disk"; } else if (utilization.getDiskBusy() == max) { name = "diskbusy"; } return name; } private static boolean recurseOverTenants(HttpRequest request) { return recurseOverApplications(request) || "tenant".equals(request.getProperty("recursive")); } private static boolean recurseOverApplications(HttpRequest request) { return recurseOverDeployments(request) || "application".equals(request.getProperty("recursive")); } private static boolean recurseOverDeployments(HttpRequest request) { return ImmutableSet.of("all", "true", "deployment").contains(request.getProperty("recursive")); } private static String tentantType(Tenant tenant) { if (tenant instanceof AthenzTenant) { return "ATHENS"; } else if (tenant instanceof UserTenant) { return "USER"; } throw new IllegalArgumentException("Unknown tenant type: " + tenant.getClass().getSimpleName()); } private static OktaAccessToken requireOktaAccessToken(HttpRequest request, String message) { return getOktaAccessToken(request) .orElseThrow(() -> new IllegalArgumentException(message + ": No Okta Access Token provided")); } private static Optional<OktaAccessToken> getOktaAccessToken(HttpRequest request) { return Optional.ofNullable(request.getJDiscRequest().context().get("okta.access-token")) .map(attribute -> new OktaAccessToken((String) attribute)); } private static ApplicationId appIdFromPath(Path path) { return ApplicationId.from(path.get("tenant"), path.get("application"), path.get("instance")); } private static JobType jobTypeFromPath(Path path) { return JobType.fromJobName(path.get("jobtype")); } private static RunId runIdFromPath(Path path) { long number = Long.parseLong(path.get("number")); return new RunId(appIdFromPath(path), jobTypeFromPath(path), number); } private HttpResponse submit(String tenant, String application, HttpRequest request) { Map<String, byte[]> dataParts = new MultipartParser().parse(request); Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get(); SourceRevision sourceRevision = toSourceRevision(submitOptions); String authorEmail = submitOptions.field("authorEmail").asString(); long projectId = Math.max(1, submitOptions.field("projectId").asLong()); byte[] applicationZip = dataParts.get(EnvironmentResource.APPLICATION_ZIP); controller.applications().verifyApplicationIdentityConfiguration(TenantName.from(tenant), new ApplicationPackage(applicationZip), Optional.of(getUserPrincipal(request).getIdentity())); return JobControllerApiHandlerHelper.submitResponse(controller.jobController(), tenant, application, sourceRevision, authorEmail, projectId, applicationZip, dataParts.get(EnvironmentResource.APPLICATION_TEST_ZIP)); } }
class ApplicationApiHandler extends LoggingRequestHandler { private final Controller controller; private final ZmsClientFacade zmsClient; @Inject public ApplicationApiHandler(LoggingRequestHandler.Context parentCtx, Controller controller, AthenzClientFactory athenzClientFactory) { super(parentCtx); this.controller = controller; this.zmsClient = new ZmsClientFacade(athenzClientFactory.createZmsClient(), athenzClientFactory.getControllerIdentity()); } @Override public Duration getTimeout() { return Duration.ofMinutes(20); } @Override public HttpResponse handle(HttpRequest request) { try { switch (request.getMethod()) { case GET: return handleGET(request); case PUT: return handlePUT(request); case POST: return handlePOST(request); case DELETE: return handleDELETE(request); case OPTIONS: return handleOPTIONS(); default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported"); } } catch (ForbiddenException e) { return ErrorResponse.forbidden(Exceptions.toMessageString(e)); } catch (NotAuthorizedException e) { return ErrorResponse.unauthorized(Exceptions.toMessageString(e)); } catch (NotExistsException e) { return ErrorResponse.notFoundError(Exceptions.toMessageString(e)); } catch (IllegalArgumentException e) { return ErrorResponse.badRequest(Exceptions.toMessageString(e)); } catch (ConfigServerException e) { return ErrorResponse.from(e); } catch (RuntimeException e) { log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e); return ErrorResponse.internalServerError(Exceptions.toMessageString(e)); } } private HttpResponse handleGET(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/")) return root(request); if (path.matches("/application/v4/user")) return authenticatedUser(request); if (path.matches("/application/v4/tenant")) return tenants(request); if (path.matches("/application/v4/tenant-pipeline")) return tenantPipelines(); if (path.matches("/application/v4/athensDomain")) return athenzDomains(request); if (path.matches("/application/v4/property")) return properties(); if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.getUri().getQuery()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/badge")) return badge(path.get("tenant"), path.get("application"), path.get("instance")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/badge/{job}")) return badge(path.get("tenant"), path.get("application"), path.get("instance"), path.get("job"), request.getProperty("historyLength")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePUT(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/user")) return createUser(request); if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePOST(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/promote")) return promoteApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), readToString(request.getData()), false); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), readToString(request.getData()), true); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/jobreport")) return notifyJobCompletion(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return submit(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return trigger(appIdFromPath(path), jobTypeFromPath(path), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return pause(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/promote")) return promoteApplicationDeployment(path.get("tenant"), path.get("application"), path.get("environment"), path.get("region"), path.get("instance"), request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleDELETE(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), "all"); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("choice")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return JobControllerApiHandlerHelper.unregisterResponse(controller.jobController(), path.get("tenant"), path.get("application")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.abortJobResponse(controller.jobController(), appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleOPTIONS() { EmptyJsonResponse response = new EmptyJsonResponse(); response.headers().put("Allow", "GET,PUT,POST,DELETE,OPTIONS"); return response; } private HttpResponse recursiveRoot(HttpRequest request) { Slime slime = new Slime(); Cursor tenantArray = slime.setArray(); for (Tenant tenant : controller.tenants().asList()) toSlime(tenantArray.addObject(), tenant, request, true); return new SlimeJsonResponse(slime); } private HttpResponse root(HttpRequest request) { return recurseOverTenants(request) ? recursiveRoot(request) : new ResourceResponse(request, "user", "tenant", "tenant-pipeline", "athensDomain", "property"); } private HttpResponse authenticatedUser(HttpRequest request) { String userIdString = request.getProperty("userOverride"); if (userIdString == null) userIdString = getUserId(request) .map(UserId::id) .orElseThrow(() -> new ForbiddenException("You must be authenticated or specify userOverride")); UserId userId = new UserId(userIdString); List<Tenant> tenants = controller.tenants().asList(userId); Slime slime = new Slime(); Cursor response = slime.setObject(); response.setString("user", userId.id()); Cursor tenantsArray = response.setArray("tenants"); for (Tenant tenant : tenants) tenantInTenantsListToSlime(tenant, request.getUri(), tenantsArray.addObject()); response.setBool("tenantExists", tenants.stream().anyMatch(tenant -> tenant instanceof UserTenant && ((UserTenant) tenant).is(userId.id()))); return new SlimeJsonResponse(slime); } private HttpResponse tenants(HttpRequest request) { Slime slime = new Slime(); Cursor response = slime.setArray(); for (Tenant tenant : controller.tenants().asList()) tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject()); return new SlimeJsonResponse(slime); } /** Lists the screwdriver project id for each application */ private HttpResponse tenantPipelines() { Slime slime = new Slime(); Cursor response = slime.setObject(); Cursor pipelinesArray = response.setArray("tenantPipelines"); for (Application application : controller.applications().asList()) { if ( ! application.deploymentJobs().projectId().isPresent()) continue; Cursor pipelineObject = pipelinesArray.addObject(); pipelineObject.setString("screwdriverId", String.valueOf(application.deploymentJobs().projectId().getAsLong())); pipelineObject.setString("tenant", application.id().tenant().value()); pipelineObject.setString("application", application.id().application().value()); pipelineObject.setString("instance", application.id().instance().value()); } response.setArray("brokenTenantPipelines"); return new SlimeJsonResponse(slime); } private HttpResponse athenzDomains(HttpRequest request) { Slime slime = new Slime(); Cursor response = slime.setObject(); Cursor array = response.setArray("data"); for (AthenzDomain athenzDomain : controller.getDomainList(request.getProperty("prefix"))) { array.addString(athenzDomain.getName()); } return new SlimeJsonResponse(slime); } private HttpResponse properties() { Slime slime = new Slime(); Cursor response = slime.setObject(); Cursor array = response.setArray("properties"); for (Map.Entry<PropertyId, Property> entry : controller.fetchPropertyList().entrySet()) { Cursor propertyObject = array.addObject(); propertyObject.setString("propertyid", entry.getKey().id()); propertyObject.setString("property", entry.getValue().id()); } return new SlimeJsonResponse(slime); } private HttpResponse tenant(String tenantName, HttpRequest request) { return controller.tenants().tenant(TenantName.from(tenantName)) .map(tenant -> tenant(tenant, request, true)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist")); } private HttpResponse tenant(Tenant tenant, HttpRequest request, boolean listApplications) { Slime slime = new Slime(); toSlime(slime.setObject(), tenant, request, listApplications); return new SlimeJsonResponse(slime); } private HttpResponse applications(String tenantName, HttpRequest request) { TenantName tenant = TenantName.from(tenantName); Slime slime = new Slime(); Cursor array = slime.setArray(); for (Application application : controller.applications().asList(tenant)) toSlime(application, array.addObject(), request); return new SlimeJsonResponse(slime); } private HttpResponse application(String tenantName, String applicationName, HttpRequest request) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, "default"); Application application = controller.applications().get(applicationId) .orElseThrow(() -> new NotExistsException(applicationId + " not found")); Slime slime = new Slime(); toSlime(slime.setObject(), application, request); return new SlimeJsonResponse(slime); } private HttpResponse logs(String tenantName, String applicationName, String instanceName, String environment, String region, String query) { ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = ZoneId.from(environment, region); DeploymentId deployment = new DeploymentId(application, zone); HashMap<String, String> queryParameters = getParameters(query); Optional<Logs> response = controller.configServer().getLogs(deployment, queryParameters); Slime slime = new Slime(); Cursor object = slime.setObject(); if (response.isPresent()) { response.get().logs().entrySet().stream().forEach(entry -> object.setString(entry.getKey(), entry.getValue())); } return new SlimeJsonResponse(slime); } private HttpResponse trigger(ApplicationId id, JobType type, HttpRequest request) { String triggered = controller.applications().deploymentTrigger() .forceTrigger(id, type, request.getJDiscRequest().getUserPrincipal().getName()) .stream().map(JobType::jobName).collect(joining(", ")); return new MessageResponse(triggered.isEmpty() ? "Job " + type.jobName() + " for " + id + " not triggered" : "Triggered " + triggered + " for " + id); } private HttpResponse pause(ApplicationId id, JobType type) { Instant until = controller.clock().instant().plus(DeploymentTrigger.maxPause); controller.applications().deploymentTrigger().pauseJob(id, type, until); return new MessageResponse(type.jobName() + " for " + id + " paused for " + DeploymentTrigger.maxPause); } private HashMap<String, String> getParameters(String query) { HashMap<String, String> keyValPair = new HashMap<>(); Arrays.stream(query.split("&")).forEach(pair -> { String[] splitPair = pair.split("="); keyValPair.put(splitPair[0], splitPair[1]); }); return keyValPair; } private void toSlime(Cursor object, Application application, HttpRequest request) { object.setString("application", application.id().application().value()); object.setString("instance", application.id().instance().value()); object.setString("deployments", withPath("/application/v4" + "/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value() + "/instance/" + application.id().instance().value() + "/job/", request.getUri()).toString()); application.deploymentJobs().statusOf(JobType.component) .flatMap(status -> status.lastSuccess()) .map(run -> run.application().source()) .ifPresent(source -> sourceRevisionToSlime(source, object.setObject("source"))); application.deploymentJobs().projectId() .ifPresent(id -> object.setLong("projectId", id)); if (application.change().isPresent()) { toSlime(object.setObject("deploying"), application.change()); } if (application.outstandingChange().isPresent()) { toSlime(object.setObject("outstandingChange"), application.outstandingChange()); } List<JobStatus> jobStatus = controller.applications().deploymentTrigger() .steps(application.deploymentSpec()) .sortedJobs(application.deploymentJobs().jobStatus().values()); object.setBool("deployedInternally", application.deploymentJobs().deployedInternally()); Cursor deploymentsArray = object.setArray("deploymentJobs"); for (JobStatus job : jobStatus) { Cursor jobObject = deploymentsArray.addObject(); jobObject.setString("type", job.type().jobName()); jobObject.setBool("success", job.isSuccess()); job.lastTriggered().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastTriggered"))); job.lastCompleted().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastCompleted"))); job.firstFailing().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("firstFailing"))); job.lastSuccess().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastSuccess"))); } Cursor changeBlockers = object.setArray("changeBlockers"); application.deploymentSpec().changeBlocker().forEach(changeBlocker -> { Cursor changeBlockerObject = changeBlockers.addObject(); changeBlockerObject.setBool("versions", changeBlocker.blocksVersions()); changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions()); changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId()); Cursor days = changeBlockerObject.setArray("days"); changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong); Cursor hours = changeBlockerObject.setArray("hours"); changeBlocker.window().hours().forEach(hours::addLong); }); object.setString("compileVersion", controller.applications().oldestInstalledPlatform(application.id()).toFullString()); Cursor globalRotationsArray = object.setArray("globalRotations"); application.globalDnsName(controller.system()).ifPresent(rotation -> { globalRotationsArray.addString(rotation.url().toString()); globalRotationsArray.addString(rotation.secureUrl().toString()); globalRotationsArray.addString(rotation.oathUrl().toString()); object.setString("rotationId", application.rotation().get().asString()); }); List<Deployment> deployments = controller.applications().deploymentTrigger() .steps(application.deploymentSpec()) .sortedDeployments(application.deployments().values()); Cursor instancesArray = object.setArray("instances"); for (Deployment deployment : deployments) { Cursor deploymentObject = instancesArray.addObject(); deploymentObject.setString("environment", deployment.zone().environment().value()); deploymentObject.setString("region", deployment.zone().region().value()); deploymentObject.setString("instance", application.id().instance().value()); if (application.rotation().isPresent() && deployment.zone().environment() == Environment.prod) { toSlime(application.rotationStatus(deployment), deploymentObject); } if (recurseOverDeployments(request)) toSlime(deploymentObject, new DeploymentId(application.id(), deployment.zone()), deployment, request); else deploymentObject.setString("url", withPath(request.getUri().getPath() + "/environment/" + deployment.zone().environment().value() + "/region/" + deployment.zone().region().value() + "/instance/" + application.id().instance().value(), request.getUri()).toString()); } Cursor metricsObject = object.setObject("metrics"); metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality()); metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality()); Cursor activity = object.setObject("activity"); application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value())); application.owner().ifPresent(owner -> object.setString("owner", owner.username())); application.deploymentJobs().issueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value())); } private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); Application application = controller.applications().get(id) .orElseThrow(() -> new NotExistsException(id + " not found")); DeploymentId deploymentId = new DeploymentId(application.id(), ZoneId.from(environment, region)); Deployment deployment = application.deployments().get(deploymentId.zoneId()); if (deployment == null) throw new NotExistsException(application + " is not deployed in " + deploymentId.zoneId()); Slime slime = new Slime(); toSlime(slime.setObject(), deploymentId, deployment, request); return new SlimeJsonResponse(slime); } private void toSlime(Cursor object, Change change) { change.platform().ifPresent(version -> object.setString("version", version.toString())); change.application() .filter(version -> !version.isUnknown()) .ifPresent(version -> toSlime(version, object.setObject("revision"))); } private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) { Cursor serviceUrlArray = response.setArray("serviceUrls"); controller.applications().getDeploymentEndpoints(deploymentId) .ifPresent(endpoints -> endpoints.forEach(endpoint -> serviceUrlArray.addString(endpoint.toString()))); response.setString("nodes", withPath("/zone/v2/" + deploymentId.zoneId().environment() + "/" + deploymentId.zoneId().region() + "/nodes/v2/node/?&recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString()); controller.zoneRegistry().getLogServerUri(deploymentId) .ifPresent(elkUrl -> response.setString("elkUrl", elkUrl.toString())); response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString()); response.setString("version", deployment.version().toFullString()); response.setString("revision", deployment.applicationVersion().id()); response.setLong("deployTimeEpochMs", deployment.at().toEpochMilli()); controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zoneId()) .ifPresent(deploymentTimeToLive -> response.setLong("expiryTimeEpochMs", deployment.at().plus(deploymentTimeToLive).toEpochMilli())); controller.applications().require(deploymentId.applicationId()).deploymentJobs().projectId() .ifPresent(i -> response.setString("screwdriverId", String.valueOf(i))); sourceRevisionToSlime(deployment.applicationVersion().source(), response); Cursor activity = response.setObject("activity"); deployment.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); deployment.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); deployment.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); deployment.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); DeploymentCost appCost = deployment.calculateCost(); Cursor costObject = response.setObject("cost"); toSlime(appCost, costObject); DeploymentMetrics metrics = deployment.metrics(); Cursor metricsObject = response.setObject("metrics"); metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond()); metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond()); metricsObject.setDouble("documentCount", metrics.documentCount()); metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis()); metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis()); metrics.instant().ifPresent(instant -> metricsObject.setLong("lastUpdated", instant.toEpochMilli())); } private void toSlime(ApplicationVersion applicationVersion, Cursor object) { if (!applicationVersion.isUnknown()) { object.setString("hash", applicationVersion.id()); sourceRevisionToSlime(applicationVersion.source(), object.setObject("source")); } } private void sourceRevisionToSlime(Optional<SourceRevision> revision, Cursor object) { if ( ! revision.isPresent()) return; object.setString("gitRepository", revision.get().repository()); object.setString("gitBranch", revision.get().branch()); object.setString("gitCommit", revision.get().commit()); } private void toSlime(RotationStatus status, Cursor object) { Cursor bcpStatus = object.setObject("bcpStatus"); bcpStatus.setString("rotationStatus", status.name().toUpperCase()); } private URI monitoringSystemUri(DeploymentId deploymentId) { return controller.zoneRegistry().getMonitoringSystemUri(deploymentId); } private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) { Application application = controller.applications().require(ApplicationId.from(tenantName, applicationName, instanceName)); ZoneId zone = ZoneId.from(environment, region); Deployment deployment = application.deployments().get(zone); if (deployment == null) { throw new NotExistsException(application + " has no deployment in " + zone); } Inspector requestData = toSlime(request.getData()).get(); String reason = mandatory("reason", requestData).asString(); String agent = getUserPrincipal(request).getIdentity().getFullName(); long timestamp = controller.clock().instant().getEpochSecond(); EndpointStatus.Status status = inService ? EndpointStatus.Status.in : EndpointStatus.Status.out; EndpointStatus endpointStatus = new EndpointStatus(status, reason, agent, timestamp); controller.applications().setGlobalRotationStatus(new DeploymentId(application.id(), deployment.zone()), endpointStatus); return new MessageResponse(String.format("Successfully set %s in %s.%s %s service", application.id().toShortString(), deployment.zone().environment().value(), deployment.zone().region().value(), inService ? "in" : "out of")); } private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), ZoneId.from(environment, region)); Slime slime = new Slime(); Cursor array = slime.setObject().setArray("globalrotationoverride"); Map<RoutingEndpoint, EndpointStatus> status = controller.applications().globalRotationStatus(deploymentId); for (RoutingEndpoint endpoint : status.keySet()) { EndpointStatus currentStatus = status.get(endpoint); array.addString(endpoint.upstreamName()); Cursor statusObject = array.addObject(); statusObject.setString("status", currentStatus.getStatus().name()); statusObject.setString("reason", currentStatus.getReason() == null ? "" : currentStatus.getReason()); statusObject.setString("agent", currentStatus.getAgent() == null ? "" : currentStatus.getAgent()); statusObject.setLong("timestamp", currentStatus.getEpoch()); } return new SlimeJsonResponse(slime); } private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); Application application = controller.applications().require(applicationId); ZoneId zone = ZoneId.from(environment, region); if (!application.rotation().isPresent()) { throw new NotExistsException("global rotation does not exist for " + application); } Deployment deployment = application.deployments().get(zone); if (deployment == null) { throw new NotExistsException(application + " has no deployment in " + zone); } Slime slime = new Slime(); Cursor response = slime.setObject(); toSlime(application.rotationStatus(deployment), response); return new SlimeJsonResponse(slime); } private HttpResponse suspended(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), ZoneId.from(environment, region)); boolean suspended = controller.applications().isSuspended(deploymentId); Slime slime = new Slime(); Cursor response = slime.setObject(); response.setBool("suspended", suspended); return new SlimeJsonResponse(slime); } private HttpResponse services(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationView applicationView = controller.getApplicationView(tenantName, applicationName, instanceName, environment, region); ServiceApiResponse response = new ServiceApiResponse(ZoneId.from(environment, region), new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(), controller.zoneRegistry().getConfigServerApiUris(ZoneId.from(environment, region)), request.getUri()); response.setResponse(applicationView); return response; } private HttpResponse service(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String restPath, HttpRequest request) { Map<?,?> result = controller.getServiceApiResponse(tenantName, applicationName, instanceName, environment, region, serviceName, restPath); ServiceApiResponse response = new ServiceApiResponse(ZoneId.from(environment, region), new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(), controller.zoneRegistry().getConfigServerApiUris(ZoneId.from(environment, region)), request.getUri()); response.setResponse(result, serviceName, restPath); return response; } private HttpResponse createUser(HttpRequest request) { Optional<UserId> user = getUserId(request); if ( ! user.isPresent() ) throw new ForbiddenException("Not authenticated or not an user."); String username = UserTenant.normalizeUser(user.get().id()); try { controller.tenants().create(UserTenant.create(username)); return new MessageResponse("Created user '" + username + "'"); } catch (AlreadyExistsException e) { return new MessageResponse("User '" + username + "' already exists"); } } private HttpResponse updateTenant(String tenantName, HttpRequest request) { Optional<AthenzTenant> tenant = controller.tenants().athenzTenant(TenantName.from(tenantName)); if ( ! tenant.isPresent()) return ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"); Inspector requestData = toSlime(request.getData()).get(); OktaAccessToken token = requireOktaAccessToken(request, "Could not update " + tenantName); controller.tenants().lockOrThrow(tenant.get().name(), lockedTenant -> { lockedTenant = lockedTenant.with(new Property(mandatory("property", requestData).asString())); lockedTenant = controller.tenants().withDomain( lockedTenant, new AthenzDomain(mandatory("athensDomain", requestData).asString()), token ); Optional<PropertyId> propertyId = optional("propertyId", requestData).map(PropertyId::new); if (propertyId.isPresent()) { lockedTenant = lockedTenant.with(propertyId.get()); } controller.tenants().store(lockedTenant); }); return tenant(controller.tenants().requireAthenzTenant(tenant.get().name()), request, true); } private HttpResponse createTenant(String tenantName, HttpRequest request) { Inspector requestData = toSlime(request.getData()).get(); AthenzTenant tenant = AthenzTenant.create(TenantName.from(tenantName), new AthenzDomain(mandatory("athensDomain", requestData).asString()), new Property(mandatory("property", requestData).asString()), optional("propertyId", requestData).map(PropertyId::new)); throwIfNotAthenzDomainAdmin(tenant.domain(), request); controller.tenants().create(tenant, requireOktaAccessToken(request, "Could not create " + tenantName)); return tenant(tenant, request, true); } private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) { Application application; try { application = controller.applications().createApplication(ApplicationId.from(tenantName, applicationName, "default"), getOktaAccessToken(request)); } catch (ZmsClientException e) { if (e.getErrorCode() == com.yahoo.jdisc.Response.Status.FORBIDDEN) throw new ForbiddenException("Not authorized to create application", e); else throw e; } Slime slime = new Slime(); toSlime(application, slime.setObject(), request); return new SlimeJsonResponse(slime); } /** * Trigger deployment of the given Vespa version if a valid one is given, e.g., "7.8.9", * optionally pinning to that version if. */ private HttpResponse deployPlatform(String tenantName, String applicationName, String versionString, boolean pin) { ApplicationId id = ApplicationId.from(tenantName, applicationName, "default"); StringBuilder response = new StringBuilder(); controller.applications().lockOrThrow(id, application -> { Version version = Version.fromString(versionString); if (version.equals(Version.emptyVersion)) version = controller.systemVersion(); if ( ! systemHasVersion(version)) throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " + "Version is not active in this system. " + "Active versions: " + controller.versionStatus().versions() .stream() .map(VespaVersion::versionNumber) .map(Version::toString) .collect(joining(", "))); Change change = Change.of(version); if (pin) change = change.withPin(); controller.applications().deploymentTrigger().forceChange(id, change); response.append("Triggered " + change + " for " + id); }); return new MessageResponse(response.toString()); } /** Trigger deployment to the last known application package for the given application. */ private HttpResponse deployApplication(String tenantName, String applicationName) { ApplicationId id = ApplicationId.from(tenantName, applicationName, "default"); StringBuilder response = new StringBuilder(); controller.applications().lockOrThrow(id, application -> { Change change = Change.of(application.get().deploymentJobs().statusOf(JobType.component).get().lastSuccess().get().application()); controller.applications().deploymentTrigger().forceChange(id, change); response.append("Triggered " + change + " for " + id); }); return new MessageResponse(response.toString()); } /** Cancel ongoing change for given application, e.g., everything with {"cancel":"all"} */ private HttpResponse cancelDeploy(String tenantName, String applicationName, String choice) { ApplicationId id = ApplicationId.from(tenantName, applicationName, "default"); StringBuilder response = new StringBuilder(); controller.applications().lockOrThrow(id, application -> { Change change = application.get().change(); if ( ! change.isPresent() && ! change.isPinned()) { response.append("No deployment in progress for " + application + " at this time"); return; } ChangesToCancel cancel = ChangesToCancel.valueOf(choice.toUpperCase()); controller.applications().deploymentTrigger().cancelChange(id, cancel); response.append("Changed deployment from '" + change + "' to '" + controller.applications().require(id).change() + "' for " + application); }); return new MessageResponse(response.toString()); } /** Schedule restart of deployment, or specific host in a deployment */ private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), ZoneId.from(environment, region)); Optional<Hostname> hostname = Optional.ofNullable(request.getProperty("hostname")).map(Hostname::new); controller.applications().restart(deploymentId, hostname); return new StringResponse("Requested restart of " + path(TenantResource.API_PATH, tenantName, ApplicationResource.API_PATH, applicationName, EnvironmentResource.API_PATH, environment, "region", region, "instance", instanceName)); } private HttpResponse deleteTenant(String tenantName, HttpRequest request) { Optional<Tenant> tenant = controller.tenants().tenant(tenantName); if ( ! tenant.isPresent()) return ErrorResponse.notFoundError("Could not delete tenant '" + tenantName + "': Tenant not found"); if (tenant.get() instanceof AthenzTenant) { controller.tenants().deleteTenant((AthenzTenant) tenant.get(), requireOktaAccessToken(request, "Could not delete " + tenantName)); } else if (tenant.get() instanceof UserTenant) { controller.tenants().deleteTenant((UserTenant) tenant.get()); } else { throw new IllegalArgumentException("Unknown tenant type:" + tenant.get().getClass().getSimpleName() + ", for " + tenant.get()); } return tenant(tenant.get(), request, false); } private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, "default"); controller.applications().deleteApplication(id, getOktaAccessToken(request)); return new EmptyJsonResponse(); } private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { Application application = controller.applications().require(ApplicationId.from(tenantName, applicationName, instanceName)); controller.applications().deactivate(application.id(), ZoneId.from(environment, region)); return new StringResponse("Deactivated " + path(TenantResource.API_PATH, tenantName, ApplicationResource.API_PATH, applicationName, EnvironmentResource.API_PATH, environment, "region", region, "instance", instanceName)); } /** * Promote application Chef environments. To be used by component jobs only */ private HttpResponse promoteApplication(String tenantName, String applicationName, HttpRequest request) { try{ ApplicationChefEnvironment chefEnvironment = new ApplicationChefEnvironment(controller.system()); String sourceEnvironment = chefEnvironment.systemChefEnvironment(); String targetEnvironment = chefEnvironment.applicationSourceEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName)); controller.chefClient().copyChefEnvironment(sourceEnvironment, targetEnvironment); return new MessageResponse(String.format("Successfully copied environment %s to %s", sourceEnvironment, targetEnvironment)); } catch (Exception e) { log.log(LogLevel.ERROR, String.format("Error during Chef copy environment. (%s.%s)", tenantName, applicationName), e); return ErrorResponse.internalServerError("Unable to promote Chef environments for application"); } } /** * Promote application Chef environments for jobs that deploy applications */ private HttpResponse promoteApplicationDeployment(String tenantName, String applicationName, String environmentName, String regionName, String instanceName, HttpRequest request) { try { ApplicationChefEnvironment chefEnvironment = new ApplicationChefEnvironment(controller.system()); String sourceEnvironment = chefEnvironment.applicationSourceEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName)); String targetEnvironment = chefEnvironment.applicationTargetEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName), Environment.from(environmentName), RegionName.from(regionName)); controller.chefClient().copyChefEnvironment(sourceEnvironment, targetEnvironment); return new MessageResponse(String.format("Successfully copied environment %s to %s", sourceEnvironment, targetEnvironment)); } catch (Exception e) { log.log(LogLevel.ERROR, String.format("Error during Chef copy environment. (%s.%s %s.%s)", tenantName, applicationName, environmentName, regionName), e); return ErrorResponse.internalServerError("Unable to promote Chef environments for application"); } } private HttpResponse notifyJobCompletion(String tenant, String application, HttpRequest request) { try { DeploymentJobs.JobReport report = toJobReport(tenant, application, toSlime(request.getData()).get()); if ( report.jobType() == JobType.component && controller.applications().require(report.applicationId()).deploymentJobs().deployedInternally()) throw new IllegalArgumentException(report.applicationId() + " is set up to be deployed from internally, and no " + "longer accepts submissions from Screwdriver v3 jobs. If you need to revert " + "to the old pipeline, please file a ticket at yo/vespa-support and request this."); controller.applications().deploymentTrigger().notifyOfCompletion(report); return new MessageResponse("ok"); } catch (IllegalStateException e) { return ErrorResponse.badRequest(Exceptions.toMessageString(e)); } } /** Returns a URI which points to an overview badge for the given application. */ private HttpResponse badge(String tenant, String application, String instance) { URI location = controller.jobController().overviewBadge(ApplicationId.from(tenant, application, instance)); return redirect(location); } /** Returns a URI which points to a history badge for the given application and job type. */ private HttpResponse badge(String tenant, String application, String instance, String jobName, String historyLength) { URI location = controller.jobController().historicBadge(ApplicationId.from(tenant, application, instance), JobType.fromJobName(jobName), historyLength == null ? 5 : Math.min(32, Math.max(0, Integer.parseInt(historyLength)))); return redirect(location); } private static HttpResponse redirect(URI location) { HttpResponse httpResponse = new HttpResponse(Response.Status.FOUND) { @Override public void render(OutputStream outputStream) { } }; httpResponse.headers().add("Location", location.toString()); return httpResponse; } private static DeploymentJobs.JobReport toJobReport(String tenantName, String applicationName, Inspector report) { Optional<DeploymentJobs.JobError> jobError = Optional.empty(); if (report.field("jobError").valid()) { jobError = Optional.of(DeploymentJobs.JobError.valueOf(report.field("jobError").asString())); } ApplicationId id = ApplicationId.from(tenantName, applicationName, report.field("instance").asString()); JobType type = JobType.fromJobName(report.field("jobName").asString()); long buildNumber = report.field("buildNumber").asLong(); if (type == JobType.component) return DeploymentJobs.JobReport.ofComponent(id, report.field("projectId").asLong(), buildNumber, jobError, toSourceRevision(report.field("sourceRevision"))); else return DeploymentJobs.JobReport.ofJob(id, type, buildNumber, jobError); } private static SourceRevision toSourceRevision(Inspector object) { if (!object.field("repository").valid() || !object.field("branch").valid() || !object.field("commit").valid()) { throw new IllegalArgumentException("Must specify \"repository\", \"branch\", and \"commit\"."); } return new SourceRevision(object.field("repository").asString(), object.field("branch").asString(), object.field("commit").asString()); } private Tenant getTenantOrThrow(String tenantName) { return controller.tenants().tenant(tenantName) .orElseThrow(() -> new NotExistsException(new TenantId(tenantName))); } private void toSlime(Cursor object, Tenant tenant, HttpRequest request, boolean listApplications) { object.setString("tenant", tenant.name().value()); object.setString("type", tentantType(tenant)); if (tenant instanceof AthenzTenant) { AthenzTenant athenzTenant = (AthenzTenant) tenant; object.setString("athensDomain", athenzTenant.domain().getName()); object.setString("property", athenzTenant.property().id()); athenzTenant.propertyId().ifPresent(id -> object.setString("propertyId", id.toString())); } Cursor applicationArray = object.setArray("applications"); if (listApplications) { for (Application application : controller.applications().asList(tenant.name())) { if (application.id().instance().isDefault()) { if (recurseOverApplications(request)) toSlime(applicationArray.addObject(), application, request); else toSlime(application, applicationArray.addObject(), request); } } } if (tenant instanceof AthenzTenant) { AthenzTenant athenzTenant = (AthenzTenant) tenant; athenzTenant.contact().ifPresent(c -> { object.setString("propertyUrl", c.propertyUrl().toString()); object.setString("contactsUrl", c.url().toString()); object.setString("issueCreationUrl", c.issueTrackerUrl().toString()); Cursor contactsArray = object.setArray("contacts"); c.persons().forEach(persons -> { Cursor personArray = contactsArray.addArray(); persons.forEach(personArray::addString); }); }); } } private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) { object.setString("tenant", tenant.name().value()); Cursor metaData = object.setObject("metaData"); metaData.setString("type", tentantType(tenant)); if (tenant instanceof AthenzTenant) { AthenzTenant athenzTenant = (AthenzTenant) tenant; metaData.setString("athensDomain", athenzTenant.domain().getName()); metaData.setString("property", athenzTenant.property().id()); } object.setString("url", withPath("/application/v4/tenant/" + tenant.name().value(), requestURI).toString()); } /** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */ private URI withPath(String newPath, URI uri) { try { return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, null, null); } catch (URISyntaxException e) { throw new RuntimeException("Will not happen", e); } } private long asLong(String valueOrNull, long defaultWhenNull) { if (valueOrNull == null) return defaultWhenNull; try { return Long.parseLong(valueOrNull); } catch (NumberFormatException e) { throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'"); } } private void toSlime(JobStatus.JobRun jobRun, Cursor object) { object.setLong("id", jobRun.id()); object.setString("version", jobRun.platform().toFullString()); if (!jobRun.application().isUnknown()) toSlime(jobRun.application(), object.setObject("revision")); object.setString("reason", jobRun.reason()); object.setLong("at", jobRun.at().toEpochMilli()); } private Slime toSlime(InputStream jsonStream) { try { byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000); return SlimeUtils.jsonToSlime(jsonBytes); } catch (IOException e) { throw new RuntimeException(); } } private void throwIfNotAthenzDomainAdmin(AthenzDomain tenantDomain, HttpRequest request) { AthenzIdentity identity = getUserPrincipal(request).getIdentity(); boolean isDomainAdmin = zmsClient.isDomainAdmin(identity, tenantDomain); if ( ! isDomainAdmin) { throw new ForbiddenException( String.format("The user '%s' is not admin in Athenz domain '%s'", identity.getFullName(), tenantDomain.getName())); } } private static Optional<UserId> getUserId(HttpRequest request) { return Optional.of(getUserPrincipal(request)) .map(AthenzPrincipal::getIdentity) .filter(AthenzUser.class::isInstance) .map(AthenzUser.class::cast) .map(AthenzUser::getName) .map(UserId::new); } private static AthenzPrincipal getUserPrincipal(HttpRequest request) { Principal principal = request.getJDiscRequest().getUserPrincipal(); if (principal == null) throw new InternalServerErrorException("Expected a user principal"); if (!(principal instanceof AthenzPrincipal)) throw new InternalServerErrorException( String.format("Expected principal of type %s, got %s", AthenzPrincipal.class.getSimpleName(), principal.getClass().getName())); return (AthenzPrincipal) principal; } private Inspector mandatory(String key, Inspector object) { if ( ! object.field(key).valid()) throw new IllegalArgumentException("'" + key + "' is missing"); return object.field(key); } private Optional<String> optional(String key, Inspector object) { return SlimeUtils.optionalString(object.field(key)); } private static String path(Object... elements) { return Joiner.on("/").join(elements); } private void toSlime(Application application, Cursor object, HttpRequest request) { object.setString("application", application.id().application().value()); object.setString("instance", application.id().instance().value()); object.setString("url", withPath("/application/v4/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value(), request.getUri()).toString()); } private Slime toSlime(ActivateResult result) { Slime slime = new Slime(); Cursor object = slime.setObject(); object.setString("revisionId", result.revisionId().id()); object.setLong("applicationZipSize", result.applicationZipSizeBytes()); Cursor logArray = object.setArray("prepareMessages"); if (result.prepareResponse().log != null) { for (Log logMessage : result.prepareResponse().log) { Cursor logObject = logArray.addObject(); logObject.setLong("time", logMessage.time); logObject.setString("level", logMessage.level); logObject.setString("message", logMessage.message); } } Cursor changeObject = object.setObject("configChangeActions"); Cursor restartActionsArray = changeObject.setArray("restart"); for (RestartAction restartAction : result.prepareResponse().configChangeActions.restartActions) { Cursor restartActionObject = restartActionsArray.addObject(); restartActionObject.setString("clusterName", restartAction.clusterName); restartActionObject.setString("clusterType", restartAction.clusterType); restartActionObject.setString("serviceType", restartAction.serviceType); serviceInfosToSlime(restartAction.services, restartActionObject.setArray("services")); stringsToSlime(restartAction.messages, restartActionObject.setArray("messages")); } Cursor refeedActionsArray = changeObject.setArray("refeed"); for (RefeedAction refeedAction : result.prepareResponse().configChangeActions.refeedActions) { Cursor refeedActionObject = refeedActionsArray.addObject(); refeedActionObject.setString("name", refeedAction.name); refeedActionObject.setBool("allowed", refeedAction.allowed); refeedActionObject.setString("documentType", refeedAction.documentType); refeedActionObject.setString("clusterName", refeedAction.clusterName); serviceInfosToSlime(refeedAction.services, refeedActionObject.setArray("services")); stringsToSlime(refeedAction.messages, refeedActionObject.setArray("messages")); } return slime; } private void serviceInfosToSlime(List<ServiceInfo> serviceInfoList, Cursor array) { for (ServiceInfo serviceInfo : serviceInfoList) { Cursor serviceInfoObject = array.addObject(); serviceInfoObject.setString("serviceName", serviceInfo.serviceName); serviceInfoObject.setString("serviceType", serviceInfo.serviceType); serviceInfoObject.setString("configId", serviceInfo.configId); serviceInfoObject.setString("hostName", serviceInfo.hostName); } } private void stringsToSlime(List<String> strings, Cursor array) { for (String string : strings) array.addString(string); } private String readToString(InputStream stream) { Scanner scanner = new Scanner(stream).useDelimiter("\\A"); if ( ! scanner.hasNext()) return null; return scanner.next(); } private boolean systemHasVersion(Version version) { return controller.versionStatus().versions().stream().anyMatch(v -> v.versionNumber().equals(version)); } public static void toSlime(DeploymentCost deploymentCost, Cursor object) { object.setLong("tco", (long)deploymentCost.getTco()); object.setLong("waste", (long)deploymentCost.getWaste()); object.setDouble("utilization", deploymentCost.getUtilization()); Cursor clustersObject = object.setObject("cluster"); for (Map.Entry<String, ClusterCost> clusterEntry : deploymentCost.getCluster().entrySet()) toSlime(clusterEntry.getValue(), clustersObject.setObject(clusterEntry.getKey())); } private static void toSlime(ClusterCost clusterCost, Cursor object) { object.setLong("count", clusterCost.getClusterInfo().getHostnames().size()); object.setString("resource", getResourceName(clusterCost.getResultUtilization())); object.setDouble("utilization", clusterCost.getResultUtilization().getMaxUtilization()); object.setLong("tco", (int)clusterCost.getTco()); object.setLong("waste", (int)clusterCost.getWaste()); object.setString("flavor", clusterCost.getClusterInfo().getFlavor()); object.setDouble("flavorCost", clusterCost.getClusterInfo().getFlavorCost()); object.setDouble("flavorCpu", clusterCost.getClusterInfo().getFlavorCPU()); object.setDouble("flavorMem", clusterCost.getClusterInfo().getFlavorMem()); object.setDouble("flavorDisk", clusterCost.getClusterInfo().getFlavorDisk()); object.setString("type", clusterCost.getClusterInfo().getClusterType().name()); Cursor utilObject = object.setObject("util"); utilObject.setDouble("cpu", clusterCost.getResultUtilization().getCpu()); utilObject.setDouble("mem", clusterCost.getResultUtilization().getMemory()); utilObject.setDouble("disk", clusterCost.getResultUtilization().getDisk()); utilObject.setDouble("diskBusy", clusterCost.getResultUtilization().getDiskBusy()); Cursor usageObject = object.setObject("usage"); usageObject.setDouble("cpu", clusterCost.getSystemUtilization().getCpu()); usageObject.setDouble("mem", clusterCost.getSystemUtilization().getMemory()); usageObject.setDouble("disk", clusterCost.getSystemUtilization().getDisk()); usageObject.setDouble("diskBusy", clusterCost.getSystemUtilization().getDiskBusy()); Cursor hostnamesArray = object.setArray("hostnames"); for (String hostname : clusterCost.getClusterInfo().getHostnames()) hostnamesArray.addString(hostname); } private static String getResourceName(ClusterUtilization utilization) { String name = "cpu"; double max = utilization.getMaxUtilization(); if (utilization.getMemory() == max) { name = "mem"; } else if (utilization.getDisk() == max) { name = "disk"; } else if (utilization.getDiskBusy() == max) { name = "diskbusy"; } return name; } private static boolean recurseOverTenants(HttpRequest request) { return recurseOverApplications(request) || "tenant".equals(request.getProperty("recursive")); } private static boolean recurseOverApplications(HttpRequest request) { return recurseOverDeployments(request) || "application".equals(request.getProperty("recursive")); } private static boolean recurseOverDeployments(HttpRequest request) { return ImmutableSet.of("all", "true", "deployment").contains(request.getProperty("recursive")); } private static String tentantType(Tenant tenant) { if (tenant instanceof AthenzTenant) { return "ATHENS"; } else if (tenant instanceof UserTenant) { return "USER"; } throw new IllegalArgumentException("Unknown tenant type: " + tenant.getClass().getSimpleName()); } private static OktaAccessToken requireOktaAccessToken(HttpRequest request, String message) { return getOktaAccessToken(request) .orElseThrow(() -> new IllegalArgumentException(message + ": No Okta Access Token provided")); } private static Optional<OktaAccessToken> getOktaAccessToken(HttpRequest request) { return Optional.ofNullable(request.getJDiscRequest().context().get("okta.access-token")) .map(attribute -> new OktaAccessToken((String) attribute)); } private static ApplicationId appIdFromPath(Path path) { return ApplicationId.from(path.get("tenant"), path.get("application"), path.get("instance")); } private static JobType jobTypeFromPath(Path path) { return JobType.fromJobName(path.get("jobtype")); } private static RunId runIdFromPath(Path path) { long number = Long.parseLong(path.get("number")); return new RunId(appIdFromPath(path), jobTypeFromPath(path), number); } private HttpResponse submit(String tenant, String application, HttpRequest request) { Map<String, byte[]> dataParts = new MultipartParser().parse(request); Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get(); SourceRevision sourceRevision = toSourceRevision(submitOptions); String authorEmail = submitOptions.field("authorEmail").asString(); long projectId = Math.max(1, submitOptions.field("projectId").asLong()); byte[] applicationZip = dataParts.get(EnvironmentResource.APPLICATION_ZIP); controller.applications().verifyApplicationIdentityConfiguration(TenantName.from(tenant), new ApplicationPackage(applicationZip), Optional.of(getUserPrincipal(request).getIdentity())); return JobControllerApiHandlerHelper.submitResponse(controller.jobController(), tenant, application, sourceRevision, authorEmail, projectId, applicationZip, dataParts.get(EnvironmentResource.APPLICATION_TEST_ZIP)); } }
Also never `null`.
private HttpResponse deploy(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = ZoneId.from(environment, region); Map<String, byte[]> dataParts = new MultipartParser().parse(request); if ( ! dataParts.containsKey("deployOptions")) return ErrorResponse.badRequest("Missing required form part 'deployOptions'"); Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get(); Optional<ApplicationPackage> applicationPackage = Optional.ofNullable(dataParts.get("applicationZip")) .map(ApplicationPackage::new); Inspector sourceRevision = deployOptions.field("sourceRevision"); Inspector buildNumber = deployOptions.field("buildNumber"); if (sourceRevision.valid() != buildNumber.valid()) throw new IllegalArgumentException("Source revision and build number must both be provided, or not"); Optional<ApplicationVersion> applicationVersion = Optional.empty(); if (sourceRevision.valid()) { if (applicationPackage.isPresent()) throw new IllegalArgumentException("Application version and application package can't both be provided."); applicationVersion = Optional.of(ApplicationVersion.from(toSourceRevision(sourceRevision), buildNumber.asLong())); applicationPackage = Optional.of(controller.applications().getApplicationPackage(controller.applications().require(applicationId), applicationVersion.get())); } boolean deployDirectly = deployOptions.field("deployDirectly").asBool(); Optional<Version> vespaVersion = optional("vespaVersion", deployOptions).map(Version::new); if(deployDirectly && !applicationPackage.isPresent() && !applicationVersion.isPresent() && !vespaVersion.isPresent()) { Optional<Deployment> deployment = controller.applications().get(applicationId) .map(Application::deployments) .flatMap(deployments -> Optional.ofNullable(deployments.get(zone))); if(!deployment.isPresent()) throw new IllegalArgumentException("Can't redeploy application, no deployment currently exist"); applicationVersion = Optional.ofNullable(deployment.get().applicationVersion()); if(!applicationVersion.isPresent()) throw new IllegalArgumentException("Can't redeploy application, application version is unknown"); vespaVersion = Optional.ofNullable(deployment.get().version()); applicationPackage = Optional.of(controller.applications().getApplicationPackage(controller.applications().require(applicationId), applicationVersion.get())); } DeployOptions deployOptionsJsonClass = new DeployOptions(deployDirectly, vespaVersion, deployOptions.field("ignoreValidationErrors").asBool(), deployOptions.field("deployCurrentVersion").asBool()); ActivateResult result = controller.applications().deploy(applicationId, zone, applicationPackage, applicationVersion, deployOptionsJsonClass, Optional.of(getUserPrincipal(request).getIdentity())); return new SlimeJsonResponse(toSlime(result)); }
vespaVersion = Optional.ofNullable(deployment.get().version());
private HttpResponse deploy(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = ZoneId.from(environment, region); Map<String, byte[]> dataParts = new MultipartParser().parse(request); if ( ! dataParts.containsKey("deployOptions")) return ErrorResponse.badRequest("Missing required form part 'deployOptions'"); Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get(); Optional<ApplicationPackage> applicationPackage = Optional.ofNullable(dataParts.get("applicationZip")) .map(ApplicationPackage::new); Inspector sourceRevision = deployOptions.field("sourceRevision"); Inspector buildNumber = deployOptions.field("buildNumber"); if (sourceRevision.valid() != buildNumber.valid()) throw new IllegalArgumentException("Source revision and build number must both be provided, or not"); Optional<ApplicationVersion> applicationVersion = Optional.empty(); if (sourceRevision.valid()) { if (applicationPackage.isPresent()) throw new IllegalArgumentException("Application version and application package can't both be provided."); applicationVersion = Optional.of(ApplicationVersion.from(toSourceRevision(sourceRevision), buildNumber.asLong())); applicationPackage = Optional.of(controller.applications().getApplicationPackage(controller.applications().require(applicationId), applicationVersion.get())); } boolean deployDirectly = deployOptions.field("deployDirectly").asBool(); Optional<Version> vespaVersion = optional("vespaVersion", deployOptions).map(Version::new); if(deployDirectly && !applicationPackage.isPresent() && !applicationVersion.isPresent() && !vespaVersion.isPresent()) { Optional<Deployment> deployment = controller.applications().get(applicationId) .map(Application::deployments) .flatMap(deployments -> Optional.ofNullable(deployments.get(zone))); if(!deployment.isPresent()) throw new IllegalArgumentException("Can't redeploy application, no deployment currently exist"); ApplicationVersion version = deployment.get().applicationVersion(); if(version.isUnknown()) throw new IllegalArgumentException("Can't redeploy application, application version is unknown"); applicationVersion = Optional.of(version); vespaVersion = Optional.of(deployment.get().version()); applicationPackage = Optional.of(controller.applications().getApplicationPackage(controller.applications().require(applicationId), applicationVersion.get())); } DeployOptions deployOptionsJsonClass = new DeployOptions(deployDirectly, vespaVersion, deployOptions.field("ignoreValidationErrors").asBool(), deployOptions.field("deployCurrentVersion").asBool()); ActivateResult result = controller.applications().deploy(applicationId, zone, applicationPackage, applicationVersion, deployOptionsJsonClass, Optional.of(getUserPrincipal(request).getIdentity())); return new SlimeJsonResponse(toSlime(result)); }
class ApplicationApiHandler extends LoggingRequestHandler { private final Controller controller; private final ZmsClientFacade zmsClient; @Inject public ApplicationApiHandler(LoggingRequestHandler.Context parentCtx, Controller controller, AthenzClientFactory athenzClientFactory) { super(parentCtx); this.controller = controller; this.zmsClient = new ZmsClientFacade(athenzClientFactory.createZmsClient(), athenzClientFactory.getControllerIdentity()); } @Override public Duration getTimeout() { return Duration.ofMinutes(20); } @Override public HttpResponse handle(HttpRequest request) { try { switch (request.getMethod()) { case GET: return handleGET(request); case PUT: return handlePUT(request); case POST: return handlePOST(request); case DELETE: return handleDELETE(request); case OPTIONS: return handleOPTIONS(); default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported"); } } catch (ForbiddenException e) { return ErrorResponse.forbidden(Exceptions.toMessageString(e)); } catch (NotAuthorizedException e) { return ErrorResponse.unauthorized(Exceptions.toMessageString(e)); } catch (NotExistsException e) { return ErrorResponse.notFoundError(Exceptions.toMessageString(e)); } catch (IllegalArgumentException e) { return ErrorResponse.badRequest(Exceptions.toMessageString(e)); } catch (ConfigServerException e) { return ErrorResponse.from(e); } catch (RuntimeException e) { log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e); return ErrorResponse.internalServerError(Exceptions.toMessageString(e)); } } private HttpResponse handleGET(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/")) return root(request); if (path.matches("/application/v4/user")) return authenticatedUser(request); if (path.matches("/application/v4/tenant")) return tenants(request); if (path.matches("/application/v4/tenant-pipeline")) return tenantPipelines(); if (path.matches("/application/v4/athensDomain")) return athenzDomains(request); if (path.matches("/application/v4/property")) return properties(); if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.getUri().getQuery()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/badge")) return badge(path.get("tenant"), path.get("application"), path.get("instance")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/badge/{job}")) return badge(path.get("tenant"), path.get("application"), path.get("instance"), path.get("job"), request.getProperty("historyLength")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePUT(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/user")) return createUser(request); if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePOST(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/promote")) return promoteApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), readToString(request.getData()), false); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), readToString(request.getData()), true); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/jobreport")) return notifyJobCompletion(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return submit(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return trigger(appIdFromPath(path), jobTypeFromPath(path), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return pause(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/promote")) return promoteApplicationDeployment(path.get("tenant"), path.get("application"), path.get("environment"), path.get("region"), path.get("instance"), request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleDELETE(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), "all"); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("choice")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return JobControllerApiHandlerHelper.unregisterResponse(controller.jobController(), path.get("tenant"), path.get("application")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.abortJobResponse(controller.jobController(), appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleOPTIONS() { EmptyJsonResponse response = new EmptyJsonResponse(); response.headers().put("Allow", "GET,PUT,POST,DELETE,OPTIONS"); return response; } private HttpResponse recursiveRoot(HttpRequest request) { Slime slime = new Slime(); Cursor tenantArray = slime.setArray(); for (Tenant tenant : controller.tenants().asList()) toSlime(tenantArray.addObject(), tenant, request, true); return new SlimeJsonResponse(slime); } private HttpResponse root(HttpRequest request) { return recurseOverTenants(request) ? recursiveRoot(request) : new ResourceResponse(request, "user", "tenant", "tenant-pipeline", "athensDomain", "property"); } private HttpResponse authenticatedUser(HttpRequest request) { String userIdString = request.getProperty("userOverride"); if (userIdString == null) userIdString = getUserId(request) .map(UserId::id) .orElseThrow(() -> new ForbiddenException("You must be authenticated or specify userOverride")); UserId userId = new UserId(userIdString); List<Tenant> tenants = controller.tenants().asList(userId); Slime slime = new Slime(); Cursor response = slime.setObject(); response.setString("user", userId.id()); Cursor tenantsArray = response.setArray("tenants"); for (Tenant tenant : tenants) tenantInTenantsListToSlime(tenant, request.getUri(), tenantsArray.addObject()); response.setBool("tenantExists", tenants.stream().anyMatch(tenant -> tenant instanceof UserTenant && ((UserTenant) tenant).is(userId.id()))); return new SlimeJsonResponse(slime); } private HttpResponse tenants(HttpRequest request) { Slime slime = new Slime(); Cursor response = slime.setArray(); for (Tenant tenant : controller.tenants().asList()) tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject()); return new SlimeJsonResponse(slime); } /** Lists the screwdriver project id for each application */ private HttpResponse tenantPipelines() { Slime slime = new Slime(); Cursor response = slime.setObject(); Cursor pipelinesArray = response.setArray("tenantPipelines"); for (Application application : controller.applications().asList()) { if ( ! application.deploymentJobs().projectId().isPresent()) continue; Cursor pipelineObject = pipelinesArray.addObject(); pipelineObject.setString("screwdriverId", String.valueOf(application.deploymentJobs().projectId().getAsLong())); pipelineObject.setString("tenant", application.id().tenant().value()); pipelineObject.setString("application", application.id().application().value()); pipelineObject.setString("instance", application.id().instance().value()); } response.setArray("brokenTenantPipelines"); return new SlimeJsonResponse(slime); } private HttpResponse athenzDomains(HttpRequest request) { Slime slime = new Slime(); Cursor response = slime.setObject(); Cursor array = response.setArray("data"); for (AthenzDomain athenzDomain : controller.getDomainList(request.getProperty("prefix"))) { array.addString(athenzDomain.getName()); } return new SlimeJsonResponse(slime); } private HttpResponse properties() { Slime slime = new Slime(); Cursor response = slime.setObject(); Cursor array = response.setArray("properties"); for (Map.Entry<PropertyId, Property> entry : controller.fetchPropertyList().entrySet()) { Cursor propertyObject = array.addObject(); propertyObject.setString("propertyid", entry.getKey().id()); propertyObject.setString("property", entry.getValue().id()); } return new SlimeJsonResponse(slime); } private HttpResponse tenant(String tenantName, HttpRequest request) { return controller.tenants().tenant(TenantName.from(tenantName)) .map(tenant -> tenant(tenant, request, true)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist")); } private HttpResponse tenant(Tenant tenant, HttpRequest request, boolean listApplications) { Slime slime = new Slime(); toSlime(slime.setObject(), tenant, request, listApplications); return new SlimeJsonResponse(slime); } private HttpResponse applications(String tenantName, HttpRequest request) { TenantName tenant = TenantName.from(tenantName); Slime slime = new Slime(); Cursor array = slime.setArray(); for (Application application : controller.applications().asList(tenant)) toSlime(application, array.addObject(), request); return new SlimeJsonResponse(slime); } private HttpResponse application(String tenantName, String applicationName, HttpRequest request) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, "default"); Application application = controller.applications().get(applicationId) .orElseThrow(() -> new NotExistsException(applicationId + " not found")); Slime slime = new Slime(); toSlime(slime.setObject(), application, request); return new SlimeJsonResponse(slime); } private HttpResponse logs(String tenantName, String applicationName, String instanceName, String environment, String region, String query) { ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = ZoneId.from(environment, region); DeploymentId deployment = new DeploymentId(application, zone); HashMap<String, String> queryParameters = getParameters(query); Optional<Logs> response = controller.configServer().getLogs(deployment, queryParameters); Slime slime = new Slime(); Cursor object = slime.setObject(); if (response.isPresent()) { response.get().logs().entrySet().stream().forEach(entry -> object.setString(entry.getKey(), entry.getValue())); } return new SlimeJsonResponse(slime); } private HttpResponse trigger(ApplicationId id, JobType type, HttpRequest request) { String triggered = controller.applications().deploymentTrigger() .forceTrigger(id, type, request.getJDiscRequest().getUserPrincipal().getName()) .stream().map(JobType::jobName).collect(joining(", ")); return new MessageResponse(triggered.isEmpty() ? "Job " + type.jobName() + " for " + id + " not triggered" : "Triggered " + triggered + " for " + id); } private HttpResponse pause(ApplicationId id, JobType type) { Instant until = controller.clock().instant().plus(DeploymentTrigger.maxPause); controller.applications().deploymentTrigger().pauseJob(id, type, until); return new MessageResponse(type.jobName() + " for " + id + " paused for " + DeploymentTrigger.maxPause); } private HashMap<String, String> getParameters(String query) { HashMap<String, String> keyValPair = new HashMap<>(); Arrays.stream(query.split("&")).forEach(pair -> { String[] splitPair = pair.split("="); keyValPair.put(splitPair[0], splitPair[1]); }); return keyValPair; } private void toSlime(Cursor object, Application application, HttpRequest request) { object.setString("application", application.id().application().value()); object.setString("instance", application.id().instance().value()); object.setString("deployments", withPath("/application/v4" + "/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value() + "/instance/" + application.id().instance().value() + "/job/", request.getUri()).toString()); application.deploymentJobs().statusOf(JobType.component) .flatMap(status -> status.lastSuccess()) .map(run -> run.application().source()) .ifPresent(source -> sourceRevisionToSlime(source, object.setObject("source"))); application.deploymentJobs().projectId() .ifPresent(id -> object.setLong("projectId", id)); if (application.change().isPresent()) { toSlime(object.setObject("deploying"), application.change()); } if (application.outstandingChange().isPresent()) { toSlime(object.setObject("outstandingChange"), application.outstandingChange()); } List<JobStatus> jobStatus = controller.applications().deploymentTrigger() .steps(application.deploymentSpec()) .sortedJobs(application.deploymentJobs().jobStatus().values()); object.setBool("deployedInternally", application.deploymentJobs().deployedInternally()); Cursor deploymentsArray = object.setArray("deploymentJobs"); for (JobStatus job : jobStatus) { Cursor jobObject = deploymentsArray.addObject(); jobObject.setString("type", job.type().jobName()); jobObject.setBool("success", job.isSuccess()); job.lastTriggered().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastTriggered"))); job.lastCompleted().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastCompleted"))); job.firstFailing().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("firstFailing"))); job.lastSuccess().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastSuccess"))); } Cursor changeBlockers = object.setArray("changeBlockers"); application.deploymentSpec().changeBlocker().forEach(changeBlocker -> { Cursor changeBlockerObject = changeBlockers.addObject(); changeBlockerObject.setBool("versions", changeBlocker.blocksVersions()); changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions()); changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId()); Cursor days = changeBlockerObject.setArray("days"); changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong); Cursor hours = changeBlockerObject.setArray("hours"); changeBlocker.window().hours().forEach(hours::addLong); }); object.setString("compileVersion", controller.applications().oldestInstalledPlatform(application.id()).toFullString()); Cursor globalRotationsArray = object.setArray("globalRotations"); application.globalDnsName(controller.system()).ifPresent(rotation -> { globalRotationsArray.addString(rotation.url().toString()); globalRotationsArray.addString(rotation.secureUrl().toString()); globalRotationsArray.addString(rotation.oathUrl().toString()); object.setString("rotationId", application.rotation().get().asString()); }); List<Deployment> deployments = controller.applications().deploymentTrigger() .steps(application.deploymentSpec()) .sortedDeployments(application.deployments().values()); Cursor instancesArray = object.setArray("instances"); for (Deployment deployment : deployments) { Cursor deploymentObject = instancesArray.addObject(); deploymentObject.setString("environment", deployment.zone().environment().value()); deploymentObject.setString("region", deployment.zone().region().value()); deploymentObject.setString("instance", application.id().instance().value()); if (application.rotation().isPresent() && deployment.zone().environment() == Environment.prod) { toSlime(application.rotationStatus(deployment), deploymentObject); } if (recurseOverDeployments(request)) toSlime(deploymentObject, new DeploymentId(application.id(), deployment.zone()), deployment, request); else deploymentObject.setString("url", withPath(request.getUri().getPath() + "/environment/" + deployment.zone().environment().value() + "/region/" + deployment.zone().region().value() + "/instance/" + application.id().instance().value(), request.getUri()).toString()); } Cursor metricsObject = object.setObject("metrics"); metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality()); metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality()); Cursor activity = object.setObject("activity"); application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value())); application.owner().ifPresent(owner -> object.setString("owner", owner.username())); application.deploymentJobs().issueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value())); } private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); Application application = controller.applications().get(id) .orElseThrow(() -> new NotExistsException(id + " not found")); DeploymentId deploymentId = new DeploymentId(application.id(), ZoneId.from(environment, region)); Deployment deployment = application.deployments().get(deploymentId.zoneId()); if (deployment == null) throw new NotExistsException(application + " is not deployed in " + deploymentId.zoneId()); Slime slime = new Slime(); toSlime(slime.setObject(), deploymentId, deployment, request); return new SlimeJsonResponse(slime); } private void toSlime(Cursor object, Change change) { change.platform().ifPresent(version -> object.setString("version", version.toString())); change.application() .filter(version -> !version.isUnknown()) .ifPresent(version -> toSlime(version, object.setObject("revision"))); } private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) { Cursor serviceUrlArray = response.setArray("serviceUrls"); controller.applications().getDeploymentEndpoints(deploymentId) .ifPresent(endpoints -> endpoints.forEach(endpoint -> serviceUrlArray.addString(endpoint.toString()))); response.setString("nodes", withPath("/zone/v2/" + deploymentId.zoneId().environment() + "/" + deploymentId.zoneId().region() + "/nodes/v2/node/?&recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString()); controller.zoneRegistry().getLogServerUri(deploymentId) .ifPresent(elkUrl -> response.setString("elkUrl", elkUrl.toString())); response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString()); response.setString("version", deployment.version().toFullString()); response.setString("revision", deployment.applicationVersion().id()); response.setLong("deployTimeEpochMs", deployment.at().toEpochMilli()); controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zoneId()) .ifPresent(deploymentTimeToLive -> response.setLong("expiryTimeEpochMs", deployment.at().plus(deploymentTimeToLive).toEpochMilli())); controller.applications().require(deploymentId.applicationId()).deploymentJobs().projectId() .ifPresent(i -> response.setString("screwdriverId", String.valueOf(i))); sourceRevisionToSlime(deployment.applicationVersion().source(), response); Cursor activity = response.setObject("activity"); deployment.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); deployment.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); deployment.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); deployment.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); DeploymentCost appCost = deployment.calculateCost(); Cursor costObject = response.setObject("cost"); toSlime(appCost, costObject); DeploymentMetrics metrics = deployment.metrics(); Cursor metricsObject = response.setObject("metrics"); metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond()); metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond()); metricsObject.setDouble("documentCount", metrics.documentCount()); metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis()); metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis()); metrics.instant().ifPresent(instant -> metricsObject.setLong("lastUpdated", instant.toEpochMilli())); } private void toSlime(ApplicationVersion applicationVersion, Cursor object) { if (!applicationVersion.isUnknown()) { object.setString("hash", applicationVersion.id()); sourceRevisionToSlime(applicationVersion.source(), object.setObject("source")); } } private void sourceRevisionToSlime(Optional<SourceRevision> revision, Cursor object) { if ( ! revision.isPresent()) return; object.setString("gitRepository", revision.get().repository()); object.setString("gitBranch", revision.get().branch()); object.setString("gitCommit", revision.get().commit()); } private void toSlime(RotationStatus status, Cursor object) { Cursor bcpStatus = object.setObject("bcpStatus"); bcpStatus.setString("rotationStatus", status.name().toUpperCase()); } private URI monitoringSystemUri(DeploymentId deploymentId) { return controller.zoneRegistry().getMonitoringSystemUri(deploymentId); } private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) { Application application = controller.applications().require(ApplicationId.from(tenantName, applicationName, instanceName)); ZoneId zone = ZoneId.from(environment, region); Deployment deployment = application.deployments().get(zone); if (deployment == null) { throw new NotExistsException(application + " has no deployment in " + zone); } Inspector requestData = toSlime(request.getData()).get(); String reason = mandatory("reason", requestData).asString(); String agent = getUserPrincipal(request).getIdentity().getFullName(); long timestamp = controller.clock().instant().getEpochSecond(); EndpointStatus.Status status = inService ? EndpointStatus.Status.in : EndpointStatus.Status.out; EndpointStatus endpointStatus = new EndpointStatus(status, reason, agent, timestamp); controller.applications().setGlobalRotationStatus(new DeploymentId(application.id(), deployment.zone()), endpointStatus); return new MessageResponse(String.format("Successfully set %s in %s.%s %s service", application.id().toShortString(), deployment.zone().environment().value(), deployment.zone().region().value(), inService ? "in" : "out of")); } private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), ZoneId.from(environment, region)); Slime slime = new Slime(); Cursor array = slime.setObject().setArray("globalrotationoverride"); Map<RoutingEndpoint, EndpointStatus> status = controller.applications().globalRotationStatus(deploymentId); for (RoutingEndpoint endpoint : status.keySet()) { EndpointStatus currentStatus = status.get(endpoint); array.addString(endpoint.upstreamName()); Cursor statusObject = array.addObject(); statusObject.setString("status", currentStatus.getStatus().name()); statusObject.setString("reason", currentStatus.getReason() == null ? "" : currentStatus.getReason()); statusObject.setString("agent", currentStatus.getAgent() == null ? "" : currentStatus.getAgent()); statusObject.setLong("timestamp", currentStatus.getEpoch()); } return new SlimeJsonResponse(slime); } private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); Application application = controller.applications().require(applicationId); ZoneId zone = ZoneId.from(environment, region); if (!application.rotation().isPresent()) { throw new NotExistsException("global rotation does not exist for " + application); } Deployment deployment = application.deployments().get(zone); if (deployment == null) { throw new NotExistsException(application + " has no deployment in " + zone); } Slime slime = new Slime(); Cursor response = slime.setObject(); toSlime(application.rotationStatus(deployment), response); return new SlimeJsonResponse(slime); } private HttpResponse suspended(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), ZoneId.from(environment, region)); boolean suspended = controller.applications().isSuspended(deploymentId); Slime slime = new Slime(); Cursor response = slime.setObject(); response.setBool("suspended", suspended); return new SlimeJsonResponse(slime); } private HttpResponse services(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationView applicationView = controller.getApplicationView(tenantName, applicationName, instanceName, environment, region); ServiceApiResponse response = new ServiceApiResponse(ZoneId.from(environment, region), new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(), controller.zoneRegistry().getConfigServerApiUris(ZoneId.from(environment, region)), request.getUri()); response.setResponse(applicationView); return response; } private HttpResponse service(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String restPath, HttpRequest request) { Map<?,?> result = controller.getServiceApiResponse(tenantName, applicationName, instanceName, environment, region, serviceName, restPath); ServiceApiResponse response = new ServiceApiResponse(ZoneId.from(environment, region), new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(), controller.zoneRegistry().getConfigServerApiUris(ZoneId.from(environment, region)), request.getUri()); response.setResponse(result, serviceName, restPath); return response; } private HttpResponse createUser(HttpRequest request) { Optional<UserId> user = getUserId(request); if ( ! user.isPresent() ) throw new ForbiddenException("Not authenticated or not an user."); String username = UserTenant.normalizeUser(user.get().id()); try { controller.tenants().create(UserTenant.create(username)); return new MessageResponse("Created user '" + username + "'"); } catch (AlreadyExistsException e) { return new MessageResponse("User '" + username + "' already exists"); } } private HttpResponse updateTenant(String tenantName, HttpRequest request) { Optional<AthenzTenant> tenant = controller.tenants().athenzTenant(TenantName.from(tenantName)); if ( ! tenant.isPresent()) return ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"); Inspector requestData = toSlime(request.getData()).get(); OktaAccessToken token = requireOktaAccessToken(request, "Could not update " + tenantName); controller.tenants().lockOrThrow(tenant.get().name(), lockedTenant -> { lockedTenant = lockedTenant.with(new Property(mandatory("property", requestData).asString())); lockedTenant = controller.tenants().withDomain( lockedTenant, new AthenzDomain(mandatory("athensDomain", requestData).asString()), token ); Optional<PropertyId> propertyId = optional("propertyId", requestData).map(PropertyId::new); if (propertyId.isPresent()) { lockedTenant = lockedTenant.with(propertyId.get()); } controller.tenants().store(lockedTenant); }); return tenant(controller.tenants().requireAthenzTenant(tenant.get().name()), request, true); } private HttpResponse createTenant(String tenantName, HttpRequest request) { Inspector requestData = toSlime(request.getData()).get(); AthenzTenant tenant = AthenzTenant.create(TenantName.from(tenantName), new AthenzDomain(mandatory("athensDomain", requestData).asString()), new Property(mandatory("property", requestData).asString()), optional("propertyId", requestData).map(PropertyId::new)); throwIfNotAthenzDomainAdmin(tenant.domain(), request); controller.tenants().create(tenant, requireOktaAccessToken(request, "Could not create " + tenantName)); return tenant(tenant, request, true); } private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) { Application application; try { application = controller.applications().createApplication(ApplicationId.from(tenantName, applicationName, "default"), getOktaAccessToken(request)); } catch (ZmsClientException e) { if (e.getErrorCode() == com.yahoo.jdisc.Response.Status.FORBIDDEN) throw new ForbiddenException("Not authorized to create application", e); else throw e; } Slime slime = new Slime(); toSlime(application, slime.setObject(), request); return new SlimeJsonResponse(slime); } /** * Trigger deployment of the given Vespa version if a valid one is given, e.g., "7.8.9", * optionally pinning to that version if. */ private HttpResponse deployPlatform(String tenantName, String applicationName, String versionString, boolean pin) { ApplicationId id = ApplicationId.from(tenantName, applicationName, "default"); StringBuilder response = new StringBuilder(); controller.applications().lockOrThrow(id, application -> { Version version = Version.fromString(versionString); if (version.equals(Version.emptyVersion)) version = controller.systemVersion(); if ( ! systemHasVersion(version)) throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " + "Version is not active in this system. " + "Active versions: " + controller.versionStatus().versions() .stream() .map(VespaVersion::versionNumber) .map(Version::toString) .collect(joining(", "))); Change change = Change.of(version); if (pin) change = change.withPin(); controller.applications().deploymentTrigger().forceChange(id, change); response.append("Triggered " + change + " for " + id); }); return new MessageResponse(response.toString()); } /** Trigger deployment to the last known application package for the given application. */ private HttpResponse deployApplication(String tenantName, String applicationName) { ApplicationId id = ApplicationId.from(tenantName, applicationName, "default"); StringBuilder response = new StringBuilder(); controller.applications().lockOrThrow(id, application -> { Change change = Change.of(application.get().deploymentJobs().statusOf(JobType.component).get().lastSuccess().get().application()); controller.applications().deploymentTrigger().forceChange(id, change); response.append("Triggered " + change + " for " + id); }); return new MessageResponse(response.toString()); } /** Cancel ongoing change for given application, e.g., everything with {"cancel":"all"} */ private HttpResponse cancelDeploy(String tenantName, String applicationName, String choice) { ApplicationId id = ApplicationId.from(tenantName, applicationName, "default"); StringBuilder response = new StringBuilder(); controller.applications().lockOrThrow(id, application -> { Change change = application.get().change(); if ( ! change.isPresent() && ! change.isPinned()) { response.append("No deployment in progress for " + application + " at this time"); return; } ChangesToCancel cancel = ChangesToCancel.valueOf(choice.toUpperCase()); controller.applications().deploymentTrigger().cancelChange(id, cancel); response.append("Changed deployment from '" + change + "' to '" + controller.applications().require(id).change() + "' for " + application); }); return new MessageResponse(response.toString()); } /** Schedule restart of deployment, or specific host in a deployment */ private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), ZoneId.from(environment, region)); Optional<Hostname> hostname = Optional.ofNullable(request.getProperty("hostname")).map(Hostname::new); controller.applications().restart(deploymentId, hostname); return new StringResponse("Requested restart of " + path(TenantResource.API_PATH, tenantName, ApplicationResource.API_PATH, applicationName, EnvironmentResource.API_PATH, environment, "region", region, "instance", instanceName)); } private HttpResponse deleteTenant(String tenantName, HttpRequest request) { Optional<Tenant> tenant = controller.tenants().tenant(tenantName); if ( ! tenant.isPresent()) return ErrorResponse.notFoundError("Could not delete tenant '" + tenantName + "': Tenant not found"); if (tenant.get() instanceof AthenzTenant) { controller.tenants().deleteTenant((AthenzTenant) tenant.get(), requireOktaAccessToken(request, "Could not delete " + tenantName)); } else if (tenant.get() instanceof UserTenant) { controller.tenants().deleteTenant((UserTenant) tenant.get()); } else { throw new IllegalArgumentException("Unknown tenant type:" + tenant.get().getClass().getSimpleName() + ", for " + tenant.get()); } return tenant(tenant.get(), request, false); } private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, "default"); controller.applications().deleteApplication(id, getOktaAccessToken(request)); return new EmptyJsonResponse(); } private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { Application application = controller.applications().require(ApplicationId.from(tenantName, applicationName, instanceName)); controller.applications().deactivate(application.id(), ZoneId.from(environment, region)); return new StringResponse("Deactivated " + path(TenantResource.API_PATH, tenantName, ApplicationResource.API_PATH, applicationName, EnvironmentResource.API_PATH, environment, "region", region, "instance", instanceName)); } /** * Promote application Chef environments. To be used by component jobs only */ private HttpResponse promoteApplication(String tenantName, String applicationName, HttpRequest request) { try{ ApplicationChefEnvironment chefEnvironment = new ApplicationChefEnvironment(controller.system()); String sourceEnvironment = chefEnvironment.systemChefEnvironment(); String targetEnvironment = chefEnvironment.applicationSourceEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName)); controller.chefClient().copyChefEnvironment(sourceEnvironment, targetEnvironment); return new MessageResponse(String.format("Successfully copied environment %s to %s", sourceEnvironment, targetEnvironment)); } catch (Exception e) { log.log(LogLevel.ERROR, String.format("Error during Chef copy environment. (%s.%s)", tenantName, applicationName), e); return ErrorResponse.internalServerError("Unable to promote Chef environments for application"); } } /** * Promote application Chef environments for jobs that deploy applications */ private HttpResponse promoteApplicationDeployment(String tenantName, String applicationName, String environmentName, String regionName, String instanceName, HttpRequest request) { try { ApplicationChefEnvironment chefEnvironment = new ApplicationChefEnvironment(controller.system()); String sourceEnvironment = chefEnvironment.applicationSourceEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName)); String targetEnvironment = chefEnvironment.applicationTargetEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName), Environment.from(environmentName), RegionName.from(regionName)); controller.chefClient().copyChefEnvironment(sourceEnvironment, targetEnvironment); return new MessageResponse(String.format("Successfully copied environment %s to %s", sourceEnvironment, targetEnvironment)); } catch (Exception e) { log.log(LogLevel.ERROR, String.format("Error during Chef copy environment. (%s.%s %s.%s)", tenantName, applicationName, environmentName, regionName), e); return ErrorResponse.internalServerError("Unable to promote Chef environments for application"); } } private HttpResponse notifyJobCompletion(String tenant, String application, HttpRequest request) { try { DeploymentJobs.JobReport report = toJobReport(tenant, application, toSlime(request.getData()).get()); if ( report.jobType() == JobType.component && controller.applications().require(report.applicationId()).deploymentJobs().deployedInternally()) throw new IllegalArgumentException(report.applicationId() + " is set up to be deployed from internally, and no " + "longer accepts submissions from Screwdriver v3 jobs. If you need to revert " + "to the old pipeline, please file a ticket at yo/vespa-support and request this."); controller.applications().deploymentTrigger().notifyOfCompletion(report); return new MessageResponse("ok"); } catch (IllegalStateException e) { return ErrorResponse.badRequest(Exceptions.toMessageString(e)); } } /** Returns a URI which points to an overview badge for the given application. */ private HttpResponse badge(String tenant, String application, String instance) { URI location = controller.jobController().overviewBadge(ApplicationId.from(tenant, application, instance)); return redirect(location); } /** Returns a URI which points to a history badge for the given application and job type. */ private HttpResponse badge(String tenant, String application, String instance, String jobName, String historyLength) { URI location = controller.jobController().historicBadge(ApplicationId.from(tenant, application, instance), JobType.fromJobName(jobName), historyLength == null ? 5 : Math.min(32, Math.max(0, Integer.parseInt(historyLength)))); return redirect(location); } private static HttpResponse redirect(URI location) { HttpResponse httpResponse = new HttpResponse(Response.Status.FOUND) { @Override public void render(OutputStream outputStream) { } }; httpResponse.headers().add("Location", location.toString()); return httpResponse; } private static DeploymentJobs.JobReport toJobReport(String tenantName, String applicationName, Inspector report) { Optional<DeploymentJobs.JobError> jobError = Optional.empty(); if (report.field("jobError").valid()) { jobError = Optional.of(DeploymentJobs.JobError.valueOf(report.field("jobError").asString())); } ApplicationId id = ApplicationId.from(tenantName, applicationName, report.field("instance").asString()); JobType type = JobType.fromJobName(report.field("jobName").asString()); long buildNumber = report.field("buildNumber").asLong(); if (type == JobType.component) return DeploymentJobs.JobReport.ofComponent(id, report.field("projectId").asLong(), buildNumber, jobError, toSourceRevision(report.field("sourceRevision"))); else return DeploymentJobs.JobReport.ofJob(id, type, buildNumber, jobError); } private static SourceRevision toSourceRevision(Inspector object) { if (!object.field("repository").valid() || !object.field("branch").valid() || !object.field("commit").valid()) { throw new IllegalArgumentException("Must specify \"repository\", \"branch\", and \"commit\"."); } return new SourceRevision(object.field("repository").asString(), object.field("branch").asString(), object.field("commit").asString()); } private Tenant getTenantOrThrow(String tenantName) { return controller.tenants().tenant(tenantName) .orElseThrow(() -> new NotExistsException(new TenantId(tenantName))); } private void toSlime(Cursor object, Tenant tenant, HttpRequest request, boolean listApplications) { object.setString("tenant", tenant.name().value()); object.setString("type", tentantType(tenant)); if (tenant instanceof AthenzTenant) { AthenzTenant athenzTenant = (AthenzTenant) tenant; object.setString("athensDomain", athenzTenant.domain().getName()); object.setString("property", athenzTenant.property().id()); athenzTenant.propertyId().ifPresent(id -> object.setString("propertyId", id.toString())); } Cursor applicationArray = object.setArray("applications"); if (listApplications) { for (Application application : controller.applications().asList(tenant.name())) { if (application.id().instance().isDefault()) { if (recurseOverApplications(request)) toSlime(applicationArray.addObject(), application, request); else toSlime(application, applicationArray.addObject(), request); } } } if (tenant instanceof AthenzTenant) { AthenzTenant athenzTenant = (AthenzTenant) tenant; athenzTenant.contact().ifPresent(c -> { object.setString("propertyUrl", c.propertyUrl().toString()); object.setString("contactsUrl", c.url().toString()); object.setString("issueCreationUrl", c.issueTrackerUrl().toString()); Cursor contactsArray = object.setArray("contacts"); c.persons().forEach(persons -> { Cursor personArray = contactsArray.addArray(); persons.forEach(personArray::addString); }); }); } } private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) { object.setString("tenant", tenant.name().value()); Cursor metaData = object.setObject("metaData"); metaData.setString("type", tentantType(tenant)); if (tenant instanceof AthenzTenant) { AthenzTenant athenzTenant = (AthenzTenant) tenant; metaData.setString("athensDomain", athenzTenant.domain().getName()); metaData.setString("property", athenzTenant.property().id()); } object.setString("url", withPath("/application/v4/tenant/" + tenant.name().value(), requestURI).toString()); } /** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */ private URI withPath(String newPath, URI uri) { try { return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, null, null); } catch (URISyntaxException e) { throw new RuntimeException("Will not happen", e); } } private long asLong(String valueOrNull, long defaultWhenNull) { if (valueOrNull == null) return defaultWhenNull; try { return Long.parseLong(valueOrNull); } catch (NumberFormatException e) { throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'"); } } private void toSlime(JobStatus.JobRun jobRun, Cursor object) { object.setLong("id", jobRun.id()); object.setString("version", jobRun.platform().toFullString()); if (!jobRun.application().isUnknown()) toSlime(jobRun.application(), object.setObject("revision")); object.setString("reason", jobRun.reason()); object.setLong("at", jobRun.at().toEpochMilli()); } private Slime toSlime(InputStream jsonStream) { try { byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000); return SlimeUtils.jsonToSlime(jsonBytes); } catch (IOException e) { throw new RuntimeException(); } } private void throwIfNotAthenzDomainAdmin(AthenzDomain tenantDomain, HttpRequest request) { AthenzIdentity identity = getUserPrincipal(request).getIdentity(); boolean isDomainAdmin = zmsClient.isDomainAdmin(identity, tenantDomain); if ( ! isDomainAdmin) { throw new ForbiddenException( String.format("The user '%s' is not admin in Athenz domain '%s'", identity.getFullName(), tenantDomain.getName())); } } private static Optional<UserId> getUserId(HttpRequest request) { return Optional.of(getUserPrincipal(request)) .map(AthenzPrincipal::getIdentity) .filter(AthenzUser.class::isInstance) .map(AthenzUser.class::cast) .map(AthenzUser::getName) .map(UserId::new); } private static AthenzPrincipal getUserPrincipal(HttpRequest request) { Principal principal = request.getJDiscRequest().getUserPrincipal(); if (principal == null) throw new InternalServerErrorException("Expected a user principal"); if (!(principal instanceof AthenzPrincipal)) throw new InternalServerErrorException( String.format("Expected principal of type %s, got %s", AthenzPrincipal.class.getSimpleName(), principal.getClass().getName())); return (AthenzPrincipal) principal; } private Inspector mandatory(String key, Inspector object) { if ( ! object.field(key).valid()) throw new IllegalArgumentException("'" + key + "' is missing"); return object.field(key); } private Optional<String> optional(String key, Inspector object) { return SlimeUtils.optionalString(object.field(key)); } private static String path(Object... elements) { return Joiner.on("/").join(elements); } private void toSlime(Application application, Cursor object, HttpRequest request) { object.setString("application", application.id().application().value()); object.setString("instance", application.id().instance().value()); object.setString("url", withPath("/application/v4/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value(), request.getUri()).toString()); } private Slime toSlime(ActivateResult result) { Slime slime = new Slime(); Cursor object = slime.setObject(); object.setString("revisionId", result.revisionId().id()); object.setLong("applicationZipSize", result.applicationZipSizeBytes()); Cursor logArray = object.setArray("prepareMessages"); if (result.prepareResponse().log != null) { for (Log logMessage : result.prepareResponse().log) { Cursor logObject = logArray.addObject(); logObject.setLong("time", logMessage.time); logObject.setString("level", logMessage.level); logObject.setString("message", logMessage.message); } } Cursor changeObject = object.setObject("configChangeActions"); Cursor restartActionsArray = changeObject.setArray("restart"); for (RestartAction restartAction : result.prepareResponse().configChangeActions.restartActions) { Cursor restartActionObject = restartActionsArray.addObject(); restartActionObject.setString("clusterName", restartAction.clusterName); restartActionObject.setString("clusterType", restartAction.clusterType); restartActionObject.setString("serviceType", restartAction.serviceType); serviceInfosToSlime(restartAction.services, restartActionObject.setArray("services")); stringsToSlime(restartAction.messages, restartActionObject.setArray("messages")); } Cursor refeedActionsArray = changeObject.setArray("refeed"); for (RefeedAction refeedAction : result.prepareResponse().configChangeActions.refeedActions) { Cursor refeedActionObject = refeedActionsArray.addObject(); refeedActionObject.setString("name", refeedAction.name); refeedActionObject.setBool("allowed", refeedAction.allowed); refeedActionObject.setString("documentType", refeedAction.documentType); refeedActionObject.setString("clusterName", refeedAction.clusterName); serviceInfosToSlime(refeedAction.services, refeedActionObject.setArray("services")); stringsToSlime(refeedAction.messages, refeedActionObject.setArray("messages")); } return slime; } private void serviceInfosToSlime(List<ServiceInfo> serviceInfoList, Cursor array) { for (ServiceInfo serviceInfo : serviceInfoList) { Cursor serviceInfoObject = array.addObject(); serviceInfoObject.setString("serviceName", serviceInfo.serviceName); serviceInfoObject.setString("serviceType", serviceInfo.serviceType); serviceInfoObject.setString("configId", serviceInfo.configId); serviceInfoObject.setString("hostName", serviceInfo.hostName); } } private void stringsToSlime(List<String> strings, Cursor array) { for (String string : strings) array.addString(string); } private String readToString(InputStream stream) { Scanner scanner = new Scanner(stream).useDelimiter("\\A"); if ( ! scanner.hasNext()) return null; return scanner.next(); } private boolean systemHasVersion(Version version) { return controller.versionStatus().versions().stream().anyMatch(v -> v.versionNumber().equals(version)); } public static void toSlime(DeploymentCost deploymentCost, Cursor object) { object.setLong("tco", (long)deploymentCost.getTco()); object.setLong("waste", (long)deploymentCost.getWaste()); object.setDouble("utilization", deploymentCost.getUtilization()); Cursor clustersObject = object.setObject("cluster"); for (Map.Entry<String, ClusterCost> clusterEntry : deploymentCost.getCluster().entrySet()) toSlime(clusterEntry.getValue(), clustersObject.setObject(clusterEntry.getKey())); } private static void toSlime(ClusterCost clusterCost, Cursor object) { object.setLong("count", clusterCost.getClusterInfo().getHostnames().size()); object.setString("resource", getResourceName(clusterCost.getResultUtilization())); object.setDouble("utilization", clusterCost.getResultUtilization().getMaxUtilization()); object.setLong("tco", (int)clusterCost.getTco()); object.setLong("waste", (int)clusterCost.getWaste()); object.setString("flavor", clusterCost.getClusterInfo().getFlavor()); object.setDouble("flavorCost", clusterCost.getClusterInfo().getFlavorCost()); object.setDouble("flavorCpu", clusterCost.getClusterInfo().getFlavorCPU()); object.setDouble("flavorMem", clusterCost.getClusterInfo().getFlavorMem()); object.setDouble("flavorDisk", clusterCost.getClusterInfo().getFlavorDisk()); object.setString("type", clusterCost.getClusterInfo().getClusterType().name()); Cursor utilObject = object.setObject("util"); utilObject.setDouble("cpu", clusterCost.getResultUtilization().getCpu()); utilObject.setDouble("mem", clusterCost.getResultUtilization().getMemory()); utilObject.setDouble("disk", clusterCost.getResultUtilization().getDisk()); utilObject.setDouble("diskBusy", clusterCost.getResultUtilization().getDiskBusy()); Cursor usageObject = object.setObject("usage"); usageObject.setDouble("cpu", clusterCost.getSystemUtilization().getCpu()); usageObject.setDouble("mem", clusterCost.getSystemUtilization().getMemory()); usageObject.setDouble("disk", clusterCost.getSystemUtilization().getDisk()); usageObject.setDouble("diskBusy", clusterCost.getSystemUtilization().getDiskBusy()); Cursor hostnamesArray = object.setArray("hostnames"); for (String hostname : clusterCost.getClusterInfo().getHostnames()) hostnamesArray.addString(hostname); } private static String getResourceName(ClusterUtilization utilization) { String name = "cpu"; double max = utilization.getMaxUtilization(); if (utilization.getMemory() == max) { name = "mem"; } else if (utilization.getDisk() == max) { name = "disk"; } else if (utilization.getDiskBusy() == max) { name = "diskbusy"; } return name; } private static boolean recurseOverTenants(HttpRequest request) { return recurseOverApplications(request) || "tenant".equals(request.getProperty("recursive")); } private static boolean recurseOverApplications(HttpRequest request) { return recurseOverDeployments(request) || "application".equals(request.getProperty("recursive")); } private static boolean recurseOverDeployments(HttpRequest request) { return ImmutableSet.of("all", "true", "deployment").contains(request.getProperty("recursive")); } private static String tentantType(Tenant tenant) { if (tenant instanceof AthenzTenant) { return "ATHENS"; } else if (tenant instanceof UserTenant) { return "USER"; } throw new IllegalArgumentException("Unknown tenant type: " + tenant.getClass().getSimpleName()); } private static OktaAccessToken requireOktaAccessToken(HttpRequest request, String message) { return getOktaAccessToken(request) .orElseThrow(() -> new IllegalArgumentException(message + ": No Okta Access Token provided")); } private static Optional<OktaAccessToken> getOktaAccessToken(HttpRequest request) { return Optional.ofNullable(request.getJDiscRequest().context().get("okta.access-token")) .map(attribute -> new OktaAccessToken((String) attribute)); } private static ApplicationId appIdFromPath(Path path) { return ApplicationId.from(path.get("tenant"), path.get("application"), path.get("instance")); } private static JobType jobTypeFromPath(Path path) { return JobType.fromJobName(path.get("jobtype")); } private static RunId runIdFromPath(Path path) { long number = Long.parseLong(path.get("number")); return new RunId(appIdFromPath(path), jobTypeFromPath(path), number); } private HttpResponse submit(String tenant, String application, HttpRequest request) { Map<String, byte[]> dataParts = new MultipartParser().parse(request); Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get(); SourceRevision sourceRevision = toSourceRevision(submitOptions); String authorEmail = submitOptions.field("authorEmail").asString(); long projectId = Math.max(1, submitOptions.field("projectId").asLong()); byte[] applicationZip = dataParts.get(EnvironmentResource.APPLICATION_ZIP); controller.applications().verifyApplicationIdentityConfiguration(TenantName.from(tenant), new ApplicationPackage(applicationZip), Optional.of(getUserPrincipal(request).getIdentity())); return JobControllerApiHandlerHelper.submitResponse(controller.jobController(), tenant, application, sourceRevision, authorEmail, projectId, applicationZip, dataParts.get(EnvironmentResource.APPLICATION_TEST_ZIP)); } }
class ApplicationApiHandler extends LoggingRequestHandler { private final Controller controller; private final ZmsClientFacade zmsClient; @Inject public ApplicationApiHandler(LoggingRequestHandler.Context parentCtx, Controller controller, AthenzClientFactory athenzClientFactory) { super(parentCtx); this.controller = controller; this.zmsClient = new ZmsClientFacade(athenzClientFactory.createZmsClient(), athenzClientFactory.getControllerIdentity()); } @Override public Duration getTimeout() { return Duration.ofMinutes(20); } @Override public HttpResponse handle(HttpRequest request) { try { switch (request.getMethod()) { case GET: return handleGET(request); case PUT: return handlePUT(request); case POST: return handlePOST(request); case DELETE: return handleDELETE(request); case OPTIONS: return handleOPTIONS(); default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported"); } } catch (ForbiddenException e) { return ErrorResponse.forbidden(Exceptions.toMessageString(e)); } catch (NotAuthorizedException e) { return ErrorResponse.unauthorized(Exceptions.toMessageString(e)); } catch (NotExistsException e) { return ErrorResponse.notFoundError(Exceptions.toMessageString(e)); } catch (IllegalArgumentException e) { return ErrorResponse.badRequest(Exceptions.toMessageString(e)); } catch (ConfigServerException e) { return ErrorResponse.from(e); } catch (RuntimeException e) { log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e); return ErrorResponse.internalServerError(Exceptions.toMessageString(e)); } } private HttpResponse handleGET(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/")) return root(request); if (path.matches("/application/v4/user")) return authenticatedUser(request); if (path.matches("/application/v4/tenant")) return tenants(request); if (path.matches("/application/v4/tenant-pipeline")) return tenantPipelines(); if (path.matches("/application/v4/athensDomain")) return athenzDomains(request); if (path.matches("/application/v4/property")) return properties(); if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.getUri().getQuery()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/badge")) return badge(path.get("tenant"), path.get("application"), path.get("instance")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/badge/{job}")) return badge(path.get("tenant"), path.get("application"), path.get("instance"), path.get("job"), request.getProperty("historyLength")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePUT(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/user")) return createUser(request); if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePOST(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/promote")) return promoteApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), readToString(request.getData()), false); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), readToString(request.getData()), true); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/jobreport")) return notifyJobCompletion(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return submit(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return trigger(appIdFromPath(path), jobTypeFromPath(path), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return pause(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/promote")) return promoteApplicationDeployment(path.get("tenant"), path.get("application"), path.get("environment"), path.get("region"), path.get("instance"), request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleDELETE(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), "all"); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("choice")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return JobControllerApiHandlerHelper.unregisterResponse(controller.jobController(), path.get("tenant"), path.get("application")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.abortJobResponse(controller.jobController(), appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleOPTIONS() { EmptyJsonResponse response = new EmptyJsonResponse(); response.headers().put("Allow", "GET,PUT,POST,DELETE,OPTIONS"); return response; } private HttpResponse recursiveRoot(HttpRequest request) { Slime slime = new Slime(); Cursor tenantArray = slime.setArray(); for (Tenant tenant : controller.tenants().asList()) toSlime(tenantArray.addObject(), tenant, request, true); return new SlimeJsonResponse(slime); } private HttpResponse root(HttpRequest request) { return recurseOverTenants(request) ? recursiveRoot(request) : new ResourceResponse(request, "user", "tenant", "tenant-pipeline", "athensDomain", "property"); } private HttpResponse authenticatedUser(HttpRequest request) { String userIdString = request.getProperty("userOverride"); if (userIdString == null) userIdString = getUserId(request) .map(UserId::id) .orElseThrow(() -> new ForbiddenException("You must be authenticated or specify userOverride")); UserId userId = new UserId(userIdString); List<Tenant> tenants = controller.tenants().asList(userId); Slime slime = new Slime(); Cursor response = slime.setObject(); response.setString("user", userId.id()); Cursor tenantsArray = response.setArray("tenants"); for (Tenant tenant : tenants) tenantInTenantsListToSlime(tenant, request.getUri(), tenantsArray.addObject()); response.setBool("tenantExists", tenants.stream().anyMatch(tenant -> tenant instanceof UserTenant && ((UserTenant) tenant).is(userId.id()))); return new SlimeJsonResponse(slime); } private HttpResponse tenants(HttpRequest request) { Slime slime = new Slime(); Cursor response = slime.setArray(); for (Tenant tenant : controller.tenants().asList()) tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject()); return new SlimeJsonResponse(slime); } /** Lists the screwdriver project id for each application */ private HttpResponse tenantPipelines() { Slime slime = new Slime(); Cursor response = slime.setObject(); Cursor pipelinesArray = response.setArray("tenantPipelines"); for (Application application : controller.applications().asList()) { if ( ! application.deploymentJobs().projectId().isPresent()) continue; Cursor pipelineObject = pipelinesArray.addObject(); pipelineObject.setString("screwdriverId", String.valueOf(application.deploymentJobs().projectId().getAsLong())); pipelineObject.setString("tenant", application.id().tenant().value()); pipelineObject.setString("application", application.id().application().value()); pipelineObject.setString("instance", application.id().instance().value()); } response.setArray("brokenTenantPipelines"); return new SlimeJsonResponse(slime); } private HttpResponse athenzDomains(HttpRequest request) { Slime slime = new Slime(); Cursor response = slime.setObject(); Cursor array = response.setArray("data"); for (AthenzDomain athenzDomain : controller.getDomainList(request.getProperty("prefix"))) { array.addString(athenzDomain.getName()); } return new SlimeJsonResponse(slime); } private HttpResponse properties() { Slime slime = new Slime(); Cursor response = slime.setObject(); Cursor array = response.setArray("properties"); for (Map.Entry<PropertyId, Property> entry : controller.fetchPropertyList().entrySet()) { Cursor propertyObject = array.addObject(); propertyObject.setString("propertyid", entry.getKey().id()); propertyObject.setString("property", entry.getValue().id()); } return new SlimeJsonResponse(slime); } private HttpResponse tenant(String tenantName, HttpRequest request) { return controller.tenants().tenant(TenantName.from(tenantName)) .map(tenant -> tenant(tenant, request, true)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist")); } private HttpResponse tenant(Tenant tenant, HttpRequest request, boolean listApplications) { Slime slime = new Slime(); toSlime(slime.setObject(), tenant, request, listApplications); return new SlimeJsonResponse(slime); } private HttpResponse applications(String tenantName, HttpRequest request) { TenantName tenant = TenantName.from(tenantName); Slime slime = new Slime(); Cursor array = slime.setArray(); for (Application application : controller.applications().asList(tenant)) toSlime(application, array.addObject(), request); return new SlimeJsonResponse(slime); } private HttpResponse application(String tenantName, String applicationName, HttpRequest request) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, "default"); Application application = controller.applications().get(applicationId) .orElseThrow(() -> new NotExistsException(applicationId + " not found")); Slime slime = new Slime(); toSlime(slime.setObject(), application, request); return new SlimeJsonResponse(slime); } private HttpResponse logs(String tenantName, String applicationName, String instanceName, String environment, String region, String query) { ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = ZoneId.from(environment, region); DeploymentId deployment = new DeploymentId(application, zone); HashMap<String, String> queryParameters = getParameters(query); Optional<Logs> response = controller.configServer().getLogs(deployment, queryParameters); Slime slime = new Slime(); Cursor object = slime.setObject(); if (response.isPresent()) { response.get().logs().entrySet().stream().forEach(entry -> object.setString(entry.getKey(), entry.getValue())); } return new SlimeJsonResponse(slime); } private HttpResponse trigger(ApplicationId id, JobType type, HttpRequest request) { String triggered = controller.applications().deploymentTrigger() .forceTrigger(id, type, request.getJDiscRequest().getUserPrincipal().getName()) .stream().map(JobType::jobName).collect(joining(", ")); return new MessageResponse(triggered.isEmpty() ? "Job " + type.jobName() + " for " + id + " not triggered" : "Triggered " + triggered + " for " + id); } private HttpResponse pause(ApplicationId id, JobType type) { Instant until = controller.clock().instant().plus(DeploymentTrigger.maxPause); controller.applications().deploymentTrigger().pauseJob(id, type, until); return new MessageResponse(type.jobName() + " for " + id + " paused for " + DeploymentTrigger.maxPause); } private HashMap<String, String> getParameters(String query) { HashMap<String, String> keyValPair = new HashMap<>(); Arrays.stream(query.split("&")).forEach(pair -> { String[] splitPair = pair.split("="); keyValPair.put(splitPair[0], splitPair[1]); }); return keyValPair; } private void toSlime(Cursor object, Application application, HttpRequest request) { object.setString("application", application.id().application().value()); object.setString("instance", application.id().instance().value()); object.setString("deployments", withPath("/application/v4" + "/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value() + "/instance/" + application.id().instance().value() + "/job/", request.getUri()).toString()); application.deploymentJobs().statusOf(JobType.component) .flatMap(status -> status.lastSuccess()) .map(run -> run.application().source()) .ifPresent(source -> sourceRevisionToSlime(source, object.setObject("source"))); application.deploymentJobs().projectId() .ifPresent(id -> object.setLong("projectId", id)); if (application.change().isPresent()) { toSlime(object.setObject("deploying"), application.change()); } if (application.outstandingChange().isPresent()) { toSlime(object.setObject("outstandingChange"), application.outstandingChange()); } List<JobStatus> jobStatus = controller.applications().deploymentTrigger() .steps(application.deploymentSpec()) .sortedJobs(application.deploymentJobs().jobStatus().values()); object.setBool("deployedInternally", application.deploymentJobs().deployedInternally()); Cursor deploymentsArray = object.setArray("deploymentJobs"); for (JobStatus job : jobStatus) { Cursor jobObject = deploymentsArray.addObject(); jobObject.setString("type", job.type().jobName()); jobObject.setBool("success", job.isSuccess()); job.lastTriggered().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastTriggered"))); job.lastCompleted().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastCompleted"))); job.firstFailing().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("firstFailing"))); job.lastSuccess().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastSuccess"))); } Cursor changeBlockers = object.setArray("changeBlockers"); application.deploymentSpec().changeBlocker().forEach(changeBlocker -> { Cursor changeBlockerObject = changeBlockers.addObject(); changeBlockerObject.setBool("versions", changeBlocker.blocksVersions()); changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions()); changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId()); Cursor days = changeBlockerObject.setArray("days"); changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong); Cursor hours = changeBlockerObject.setArray("hours"); changeBlocker.window().hours().forEach(hours::addLong); }); object.setString("compileVersion", controller.applications().oldestInstalledPlatform(application.id()).toFullString()); Cursor globalRotationsArray = object.setArray("globalRotations"); application.globalDnsName(controller.system()).ifPresent(rotation -> { globalRotationsArray.addString(rotation.url().toString()); globalRotationsArray.addString(rotation.secureUrl().toString()); globalRotationsArray.addString(rotation.oathUrl().toString()); object.setString("rotationId", application.rotation().get().asString()); }); List<Deployment> deployments = controller.applications().deploymentTrigger() .steps(application.deploymentSpec()) .sortedDeployments(application.deployments().values()); Cursor instancesArray = object.setArray("instances"); for (Deployment deployment : deployments) { Cursor deploymentObject = instancesArray.addObject(); deploymentObject.setString("environment", deployment.zone().environment().value()); deploymentObject.setString("region", deployment.zone().region().value()); deploymentObject.setString("instance", application.id().instance().value()); if (application.rotation().isPresent() && deployment.zone().environment() == Environment.prod) { toSlime(application.rotationStatus(deployment), deploymentObject); } if (recurseOverDeployments(request)) toSlime(deploymentObject, new DeploymentId(application.id(), deployment.zone()), deployment, request); else deploymentObject.setString("url", withPath(request.getUri().getPath() + "/environment/" + deployment.zone().environment().value() + "/region/" + deployment.zone().region().value() + "/instance/" + application.id().instance().value(), request.getUri()).toString()); } Cursor metricsObject = object.setObject("metrics"); metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality()); metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality()); Cursor activity = object.setObject("activity"); application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value())); application.owner().ifPresent(owner -> object.setString("owner", owner.username())); application.deploymentJobs().issueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value())); } private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); Application application = controller.applications().get(id) .orElseThrow(() -> new NotExistsException(id + " not found")); DeploymentId deploymentId = new DeploymentId(application.id(), ZoneId.from(environment, region)); Deployment deployment = application.deployments().get(deploymentId.zoneId()); if (deployment == null) throw new NotExistsException(application + " is not deployed in " + deploymentId.zoneId()); Slime slime = new Slime(); toSlime(slime.setObject(), deploymentId, deployment, request); return new SlimeJsonResponse(slime); } private void toSlime(Cursor object, Change change) { change.platform().ifPresent(version -> object.setString("version", version.toString())); change.application() .filter(version -> !version.isUnknown()) .ifPresent(version -> toSlime(version, object.setObject("revision"))); } private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) { Cursor serviceUrlArray = response.setArray("serviceUrls"); controller.applications().getDeploymentEndpoints(deploymentId) .ifPresent(endpoints -> endpoints.forEach(endpoint -> serviceUrlArray.addString(endpoint.toString()))); response.setString("nodes", withPath("/zone/v2/" + deploymentId.zoneId().environment() + "/" + deploymentId.zoneId().region() + "/nodes/v2/node/?&recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString()); controller.zoneRegistry().getLogServerUri(deploymentId) .ifPresent(elkUrl -> response.setString("elkUrl", elkUrl.toString())); response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString()); response.setString("version", deployment.version().toFullString()); response.setString("revision", deployment.applicationVersion().id()); response.setLong("deployTimeEpochMs", deployment.at().toEpochMilli()); controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zoneId()) .ifPresent(deploymentTimeToLive -> response.setLong("expiryTimeEpochMs", deployment.at().plus(deploymentTimeToLive).toEpochMilli())); controller.applications().require(deploymentId.applicationId()).deploymentJobs().projectId() .ifPresent(i -> response.setString("screwdriverId", String.valueOf(i))); sourceRevisionToSlime(deployment.applicationVersion().source(), response); Cursor activity = response.setObject("activity"); deployment.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); deployment.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); deployment.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); deployment.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); DeploymentCost appCost = deployment.calculateCost(); Cursor costObject = response.setObject("cost"); toSlime(appCost, costObject); DeploymentMetrics metrics = deployment.metrics(); Cursor metricsObject = response.setObject("metrics"); metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond()); metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond()); metricsObject.setDouble("documentCount", metrics.documentCount()); metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis()); metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis()); metrics.instant().ifPresent(instant -> metricsObject.setLong("lastUpdated", instant.toEpochMilli())); } private void toSlime(ApplicationVersion applicationVersion, Cursor object) { if (!applicationVersion.isUnknown()) { object.setString("hash", applicationVersion.id()); sourceRevisionToSlime(applicationVersion.source(), object.setObject("source")); } } private void sourceRevisionToSlime(Optional<SourceRevision> revision, Cursor object) { if ( ! revision.isPresent()) return; object.setString("gitRepository", revision.get().repository()); object.setString("gitBranch", revision.get().branch()); object.setString("gitCommit", revision.get().commit()); } private void toSlime(RotationStatus status, Cursor object) { Cursor bcpStatus = object.setObject("bcpStatus"); bcpStatus.setString("rotationStatus", status.name().toUpperCase()); } private URI monitoringSystemUri(DeploymentId deploymentId) { return controller.zoneRegistry().getMonitoringSystemUri(deploymentId); } private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) { Application application = controller.applications().require(ApplicationId.from(tenantName, applicationName, instanceName)); ZoneId zone = ZoneId.from(environment, region); Deployment deployment = application.deployments().get(zone); if (deployment == null) { throw new NotExistsException(application + " has no deployment in " + zone); } Inspector requestData = toSlime(request.getData()).get(); String reason = mandatory("reason", requestData).asString(); String agent = getUserPrincipal(request).getIdentity().getFullName(); long timestamp = controller.clock().instant().getEpochSecond(); EndpointStatus.Status status = inService ? EndpointStatus.Status.in : EndpointStatus.Status.out; EndpointStatus endpointStatus = new EndpointStatus(status, reason, agent, timestamp); controller.applications().setGlobalRotationStatus(new DeploymentId(application.id(), deployment.zone()), endpointStatus); return new MessageResponse(String.format("Successfully set %s in %s.%s %s service", application.id().toShortString(), deployment.zone().environment().value(), deployment.zone().region().value(), inService ? "in" : "out of")); } private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), ZoneId.from(environment, region)); Slime slime = new Slime(); Cursor array = slime.setObject().setArray("globalrotationoverride"); Map<RoutingEndpoint, EndpointStatus> status = controller.applications().globalRotationStatus(deploymentId); for (RoutingEndpoint endpoint : status.keySet()) { EndpointStatus currentStatus = status.get(endpoint); array.addString(endpoint.upstreamName()); Cursor statusObject = array.addObject(); statusObject.setString("status", currentStatus.getStatus().name()); statusObject.setString("reason", currentStatus.getReason() == null ? "" : currentStatus.getReason()); statusObject.setString("agent", currentStatus.getAgent() == null ? "" : currentStatus.getAgent()); statusObject.setLong("timestamp", currentStatus.getEpoch()); } return new SlimeJsonResponse(slime); } private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); Application application = controller.applications().require(applicationId); ZoneId zone = ZoneId.from(environment, region); if (!application.rotation().isPresent()) { throw new NotExistsException("global rotation does not exist for " + application); } Deployment deployment = application.deployments().get(zone); if (deployment == null) { throw new NotExistsException(application + " has no deployment in " + zone); } Slime slime = new Slime(); Cursor response = slime.setObject(); toSlime(application.rotationStatus(deployment), response); return new SlimeJsonResponse(slime); } private HttpResponse suspended(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), ZoneId.from(environment, region)); boolean suspended = controller.applications().isSuspended(deploymentId); Slime slime = new Slime(); Cursor response = slime.setObject(); response.setBool("suspended", suspended); return new SlimeJsonResponse(slime); } private HttpResponse services(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationView applicationView = controller.getApplicationView(tenantName, applicationName, instanceName, environment, region); ServiceApiResponse response = new ServiceApiResponse(ZoneId.from(environment, region), new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(), controller.zoneRegistry().getConfigServerApiUris(ZoneId.from(environment, region)), request.getUri()); response.setResponse(applicationView); return response; } private HttpResponse service(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String restPath, HttpRequest request) { Map<?,?> result = controller.getServiceApiResponse(tenantName, applicationName, instanceName, environment, region, serviceName, restPath); ServiceApiResponse response = new ServiceApiResponse(ZoneId.from(environment, region), new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(), controller.zoneRegistry().getConfigServerApiUris(ZoneId.from(environment, region)), request.getUri()); response.setResponse(result, serviceName, restPath); return response; } private HttpResponse createUser(HttpRequest request) { Optional<UserId> user = getUserId(request); if ( ! user.isPresent() ) throw new ForbiddenException("Not authenticated or not an user."); String username = UserTenant.normalizeUser(user.get().id()); try { controller.tenants().create(UserTenant.create(username)); return new MessageResponse("Created user '" + username + "'"); } catch (AlreadyExistsException e) { return new MessageResponse("User '" + username + "' already exists"); } } private HttpResponse updateTenant(String tenantName, HttpRequest request) { Optional<AthenzTenant> tenant = controller.tenants().athenzTenant(TenantName.from(tenantName)); if ( ! tenant.isPresent()) return ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"); Inspector requestData = toSlime(request.getData()).get(); OktaAccessToken token = requireOktaAccessToken(request, "Could not update " + tenantName); controller.tenants().lockOrThrow(tenant.get().name(), lockedTenant -> { lockedTenant = lockedTenant.with(new Property(mandatory("property", requestData).asString())); lockedTenant = controller.tenants().withDomain( lockedTenant, new AthenzDomain(mandatory("athensDomain", requestData).asString()), token ); Optional<PropertyId> propertyId = optional("propertyId", requestData).map(PropertyId::new); if (propertyId.isPresent()) { lockedTenant = lockedTenant.with(propertyId.get()); } controller.tenants().store(lockedTenant); }); return tenant(controller.tenants().requireAthenzTenant(tenant.get().name()), request, true); } private HttpResponse createTenant(String tenantName, HttpRequest request) { Inspector requestData = toSlime(request.getData()).get(); AthenzTenant tenant = AthenzTenant.create(TenantName.from(tenantName), new AthenzDomain(mandatory("athensDomain", requestData).asString()), new Property(mandatory("property", requestData).asString()), optional("propertyId", requestData).map(PropertyId::new)); throwIfNotAthenzDomainAdmin(tenant.domain(), request); controller.tenants().create(tenant, requireOktaAccessToken(request, "Could not create " + tenantName)); return tenant(tenant, request, true); } private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) { Application application; try { application = controller.applications().createApplication(ApplicationId.from(tenantName, applicationName, "default"), getOktaAccessToken(request)); } catch (ZmsClientException e) { if (e.getErrorCode() == com.yahoo.jdisc.Response.Status.FORBIDDEN) throw new ForbiddenException("Not authorized to create application", e); else throw e; } Slime slime = new Slime(); toSlime(application, slime.setObject(), request); return new SlimeJsonResponse(slime); } /** * Trigger deployment of the given Vespa version if a valid one is given, e.g., "7.8.9", * optionally pinning to that version if. */ private HttpResponse deployPlatform(String tenantName, String applicationName, String versionString, boolean pin) { ApplicationId id = ApplicationId.from(tenantName, applicationName, "default"); StringBuilder response = new StringBuilder(); controller.applications().lockOrThrow(id, application -> { Version version = Version.fromString(versionString); if (version.equals(Version.emptyVersion)) version = controller.systemVersion(); if ( ! systemHasVersion(version)) throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " + "Version is not active in this system. " + "Active versions: " + controller.versionStatus().versions() .stream() .map(VespaVersion::versionNumber) .map(Version::toString) .collect(joining(", "))); Change change = Change.of(version); if (pin) change = change.withPin(); controller.applications().deploymentTrigger().forceChange(id, change); response.append("Triggered " + change + " for " + id); }); return new MessageResponse(response.toString()); } /** Trigger deployment to the last known application package for the given application. */ private HttpResponse deployApplication(String tenantName, String applicationName) { ApplicationId id = ApplicationId.from(tenantName, applicationName, "default"); StringBuilder response = new StringBuilder(); controller.applications().lockOrThrow(id, application -> { Change change = Change.of(application.get().deploymentJobs().statusOf(JobType.component).get().lastSuccess().get().application()); controller.applications().deploymentTrigger().forceChange(id, change); response.append("Triggered " + change + " for " + id); }); return new MessageResponse(response.toString()); } /** Cancel ongoing change for given application, e.g., everything with {"cancel":"all"} */ private HttpResponse cancelDeploy(String tenantName, String applicationName, String choice) { ApplicationId id = ApplicationId.from(tenantName, applicationName, "default"); StringBuilder response = new StringBuilder(); controller.applications().lockOrThrow(id, application -> { Change change = application.get().change(); if ( ! change.isPresent() && ! change.isPinned()) { response.append("No deployment in progress for " + application + " at this time"); return; } ChangesToCancel cancel = ChangesToCancel.valueOf(choice.toUpperCase()); controller.applications().deploymentTrigger().cancelChange(id, cancel); response.append("Changed deployment from '" + change + "' to '" + controller.applications().require(id).change() + "' for " + application); }); return new MessageResponse(response.toString()); } /** Schedule restart of deployment, or specific host in a deployment */ private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), ZoneId.from(environment, region)); Optional<Hostname> hostname = Optional.ofNullable(request.getProperty("hostname")).map(Hostname::new); controller.applications().restart(deploymentId, hostname); return new StringResponse("Requested restart of " + path(TenantResource.API_PATH, tenantName, ApplicationResource.API_PATH, applicationName, EnvironmentResource.API_PATH, environment, "region", region, "instance", instanceName)); } private HttpResponse deleteTenant(String tenantName, HttpRequest request) { Optional<Tenant> tenant = controller.tenants().tenant(tenantName); if ( ! tenant.isPresent()) return ErrorResponse.notFoundError("Could not delete tenant '" + tenantName + "': Tenant not found"); if (tenant.get() instanceof AthenzTenant) { controller.tenants().deleteTenant((AthenzTenant) tenant.get(), requireOktaAccessToken(request, "Could not delete " + tenantName)); } else if (tenant.get() instanceof UserTenant) { controller.tenants().deleteTenant((UserTenant) tenant.get()); } else { throw new IllegalArgumentException("Unknown tenant type:" + tenant.get().getClass().getSimpleName() + ", for " + tenant.get()); } return tenant(tenant.get(), request, false); } private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, "default"); controller.applications().deleteApplication(id, getOktaAccessToken(request)); return new EmptyJsonResponse(); } private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { Application application = controller.applications().require(ApplicationId.from(tenantName, applicationName, instanceName)); controller.applications().deactivate(application.id(), ZoneId.from(environment, region)); return new StringResponse("Deactivated " + path(TenantResource.API_PATH, tenantName, ApplicationResource.API_PATH, applicationName, EnvironmentResource.API_PATH, environment, "region", region, "instance", instanceName)); } /** * Promote application Chef environments. To be used by component jobs only */ private HttpResponse promoteApplication(String tenantName, String applicationName, HttpRequest request) { try{ ApplicationChefEnvironment chefEnvironment = new ApplicationChefEnvironment(controller.system()); String sourceEnvironment = chefEnvironment.systemChefEnvironment(); String targetEnvironment = chefEnvironment.applicationSourceEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName)); controller.chefClient().copyChefEnvironment(sourceEnvironment, targetEnvironment); return new MessageResponse(String.format("Successfully copied environment %s to %s", sourceEnvironment, targetEnvironment)); } catch (Exception e) { log.log(LogLevel.ERROR, String.format("Error during Chef copy environment. (%s.%s)", tenantName, applicationName), e); return ErrorResponse.internalServerError("Unable to promote Chef environments for application"); } } /** * Promote application Chef environments for jobs that deploy applications */ private HttpResponse promoteApplicationDeployment(String tenantName, String applicationName, String environmentName, String regionName, String instanceName, HttpRequest request) { try { ApplicationChefEnvironment chefEnvironment = new ApplicationChefEnvironment(controller.system()); String sourceEnvironment = chefEnvironment.applicationSourceEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName)); String targetEnvironment = chefEnvironment.applicationTargetEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName), Environment.from(environmentName), RegionName.from(regionName)); controller.chefClient().copyChefEnvironment(sourceEnvironment, targetEnvironment); return new MessageResponse(String.format("Successfully copied environment %s to %s", sourceEnvironment, targetEnvironment)); } catch (Exception e) { log.log(LogLevel.ERROR, String.format("Error during Chef copy environment. (%s.%s %s.%s)", tenantName, applicationName, environmentName, regionName), e); return ErrorResponse.internalServerError("Unable to promote Chef environments for application"); } } private HttpResponse notifyJobCompletion(String tenant, String application, HttpRequest request) { try { DeploymentJobs.JobReport report = toJobReport(tenant, application, toSlime(request.getData()).get()); if ( report.jobType() == JobType.component && controller.applications().require(report.applicationId()).deploymentJobs().deployedInternally()) throw new IllegalArgumentException(report.applicationId() + " is set up to be deployed from internally, and no " + "longer accepts submissions from Screwdriver v3 jobs. If you need to revert " + "to the old pipeline, please file a ticket at yo/vespa-support and request this."); controller.applications().deploymentTrigger().notifyOfCompletion(report); return new MessageResponse("ok"); } catch (IllegalStateException e) { return ErrorResponse.badRequest(Exceptions.toMessageString(e)); } } /** Returns a URI which points to an overview badge for the given application. */ private HttpResponse badge(String tenant, String application, String instance) { URI location = controller.jobController().overviewBadge(ApplicationId.from(tenant, application, instance)); return redirect(location); } /** Returns a URI which points to a history badge for the given application and job type. */ private HttpResponse badge(String tenant, String application, String instance, String jobName, String historyLength) { URI location = controller.jobController().historicBadge(ApplicationId.from(tenant, application, instance), JobType.fromJobName(jobName), historyLength == null ? 5 : Math.min(32, Math.max(0, Integer.parseInt(historyLength)))); return redirect(location); } private static HttpResponse redirect(URI location) { HttpResponse httpResponse = new HttpResponse(Response.Status.FOUND) { @Override public void render(OutputStream outputStream) { } }; httpResponse.headers().add("Location", location.toString()); return httpResponse; } private static DeploymentJobs.JobReport toJobReport(String tenantName, String applicationName, Inspector report) { Optional<DeploymentJobs.JobError> jobError = Optional.empty(); if (report.field("jobError").valid()) { jobError = Optional.of(DeploymentJobs.JobError.valueOf(report.field("jobError").asString())); } ApplicationId id = ApplicationId.from(tenantName, applicationName, report.field("instance").asString()); JobType type = JobType.fromJobName(report.field("jobName").asString()); long buildNumber = report.field("buildNumber").asLong(); if (type == JobType.component) return DeploymentJobs.JobReport.ofComponent(id, report.field("projectId").asLong(), buildNumber, jobError, toSourceRevision(report.field("sourceRevision"))); else return DeploymentJobs.JobReport.ofJob(id, type, buildNumber, jobError); } private static SourceRevision toSourceRevision(Inspector object) { if (!object.field("repository").valid() || !object.field("branch").valid() || !object.field("commit").valid()) { throw new IllegalArgumentException("Must specify \"repository\", \"branch\", and \"commit\"."); } return new SourceRevision(object.field("repository").asString(), object.field("branch").asString(), object.field("commit").asString()); } private Tenant getTenantOrThrow(String tenantName) { return controller.tenants().tenant(tenantName) .orElseThrow(() -> new NotExistsException(new TenantId(tenantName))); } private void toSlime(Cursor object, Tenant tenant, HttpRequest request, boolean listApplications) { object.setString("tenant", tenant.name().value()); object.setString("type", tentantType(tenant)); if (tenant instanceof AthenzTenant) { AthenzTenant athenzTenant = (AthenzTenant) tenant; object.setString("athensDomain", athenzTenant.domain().getName()); object.setString("property", athenzTenant.property().id()); athenzTenant.propertyId().ifPresent(id -> object.setString("propertyId", id.toString())); } Cursor applicationArray = object.setArray("applications"); if (listApplications) { for (Application application : controller.applications().asList(tenant.name())) { if (application.id().instance().isDefault()) { if (recurseOverApplications(request)) toSlime(applicationArray.addObject(), application, request); else toSlime(application, applicationArray.addObject(), request); } } } if (tenant instanceof AthenzTenant) { AthenzTenant athenzTenant = (AthenzTenant) tenant; athenzTenant.contact().ifPresent(c -> { object.setString("propertyUrl", c.propertyUrl().toString()); object.setString("contactsUrl", c.url().toString()); object.setString("issueCreationUrl", c.issueTrackerUrl().toString()); Cursor contactsArray = object.setArray("contacts"); c.persons().forEach(persons -> { Cursor personArray = contactsArray.addArray(); persons.forEach(personArray::addString); }); }); } } private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) { object.setString("tenant", tenant.name().value()); Cursor metaData = object.setObject("metaData"); metaData.setString("type", tentantType(tenant)); if (tenant instanceof AthenzTenant) { AthenzTenant athenzTenant = (AthenzTenant) tenant; metaData.setString("athensDomain", athenzTenant.domain().getName()); metaData.setString("property", athenzTenant.property().id()); } object.setString("url", withPath("/application/v4/tenant/" + tenant.name().value(), requestURI).toString()); } /** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */ private URI withPath(String newPath, URI uri) { try { return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, null, null); } catch (URISyntaxException e) { throw new RuntimeException("Will not happen", e); } } private long asLong(String valueOrNull, long defaultWhenNull) { if (valueOrNull == null) return defaultWhenNull; try { return Long.parseLong(valueOrNull); } catch (NumberFormatException e) { throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'"); } } private void toSlime(JobStatus.JobRun jobRun, Cursor object) { object.setLong("id", jobRun.id()); object.setString("version", jobRun.platform().toFullString()); if (!jobRun.application().isUnknown()) toSlime(jobRun.application(), object.setObject("revision")); object.setString("reason", jobRun.reason()); object.setLong("at", jobRun.at().toEpochMilli()); } private Slime toSlime(InputStream jsonStream) { try { byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000); return SlimeUtils.jsonToSlime(jsonBytes); } catch (IOException e) { throw new RuntimeException(); } } private void throwIfNotAthenzDomainAdmin(AthenzDomain tenantDomain, HttpRequest request) { AthenzIdentity identity = getUserPrincipal(request).getIdentity(); boolean isDomainAdmin = zmsClient.isDomainAdmin(identity, tenantDomain); if ( ! isDomainAdmin) { throw new ForbiddenException( String.format("The user '%s' is not admin in Athenz domain '%s'", identity.getFullName(), tenantDomain.getName())); } } private static Optional<UserId> getUserId(HttpRequest request) { return Optional.of(getUserPrincipal(request)) .map(AthenzPrincipal::getIdentity) .filter(AthenzUser.class::isInstance) .map(AthenzUser.class::cast) .map(AthenzUser::getName) .map(UserId::new); } private static AthenzPrincipal getUserPrincipal(HttpRequest request) { Principal principal = request.getJDiscRequest().getUserPrincipal(); if (principal == null) throw new InternalServerErrorException("Expected a user principal"); if (!(principal instanceof AthenzPrincipal)) throw new InternalServerErrorException( String.format("Expected principal of type %s, got %s", AthenzPrincipal.class.getSimpleName(), principal.getClass().getName())); return (AthenzPrincipal) principal; } private Inspector mandatory(String key, Inspector object) { if ( ! object.field(key).valid()) throw new IllegalArgumentException("'" + key + "' is missing"); return object.field(key); } private Optional<String> optional(String key, Inspector object) { return SlimeUtils.optionalString(object.field(key)); } private static String path(Object... elements) { return Joiner.on("/").join(elements); } private void toSlime(Application application, Cursor object, HttpRequest request) { object.setString("application", application.id().application().value()); object.setString("instance", application.id().instance().value()); object.setString("url", withPath("/application/v4/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value(), request.getUri()).toString()); } private Slime toSlime(ActivateResult result) { Slime slime = new Slime(); Cursor object = slime.setObject(); object.setString("revisionId", result.revisionId().id()); object.setLong("applicationZipSize", result.applicationZipSizeBytes()); Cursor logArray = object.setArray("prepareMessages"); if (result.prepareResponse().log != null) { for (Log logMessage : result.prepareResponse().log) { Cursor logObject = logArray.addObject(); logObject.setLong("time", logMessage.time); logObject.setString("level", logMessage.level); logObject.setString("message", logMessage.message); } } Cursor changeObject = object.setObject("configChangeActions"); Cursor restartActionsArray = changeObject.setArray("restart"); for (RestartAction restartAction : result.prepareResponse().configChangeActions.restartActions) { Cursor restartActionObject = restartActionsArray.addObject(); restartActionObject.setString("clusterName", restartAction.clusterName); restartActionObject.setString("clusterType", restartAction.clusterType); restartActionObject.setString("serviceType", restartAction.serviceType); serviceInfosToSlime(restartAction.services, restartActionObject.setArray("services")); stringsToSlime(restartAction.messages, restartActionObject.setArray("messages")); } Cursor refeedActionsArray = changeObject.setArray("refeed"); for (RefeedAction refeedAction : result.prepareResponse().configChangeActions.refeedActions) { Cursor refeedActionObject = refeedActionsArray.addObject(); refeedActionObject.setString("name", refeedAction.name); refeedActionObject.setBool("allowed", refeedAction.allowed); refeedActionObject.setString("documentType", refeedAction.documentType); refeedActionObject.setString("clusterName", refeedAction.clusterName); serviceInfosToSlime(refeedAction.services, refeedActionObject.setArray("services")); stringsToSlime(refeedAction.messages, refeedActionObject.setArray("messages")); } return slime; } private void serviceInfosToSlime(List<ServiceInfo> serviceInfoList, Cursor array) { for (ServiceInfo serviceInfo : serviceInfoList) { Cursor serviceInfoObject = array.addObject(); serviceInfoObject.setString("serviceName", serviceInfo.serviceName); serviceInfoObject.setString("serviceType", serviceInfo.serviceType); serviceInfoObject.setString("configId", serviceInfo.configId); serviceInfoObject.setString("hostName", serviceInfo.hostName); } } private void stringsToSlime(List<String> strings, Cursor array) { for (String string : strings) array.addString(string); } private String readToString(InputStream stream) { Scanner scanner = new Scanner(stream).useDelimiter("\\A"); if ( ! scanner.hasNext()) return null; return scanner.next(); } private boolean systemHasVersion(Version version) { return controller.versionStatus().versions().stream().anyMatch(v -> v.versionNumber().equals(version)); } public static void toSlime(DeploymentCost deploymentCost, Cursor object) { object.setLong("tco", (long)deploymentCost.getTco()); object.setLong("waste", (long)deploymentCost.getWaste()); object.setDouble("utilization", deploymentCost.getUtilization()); Cursor clustersObject = object.setObject("cluster"); for (Map.Entry<String, ClusterCost> clusterEntry : deploymentCost.getCluster().entrySet()) toSlime(clusterEntry.getValue(), clustersObject.setObject(clusterEntry.getKey())); } private static void toSlime(ClusterCost clusterCost, Cursor object) { object.setLong("count", clusterCost.getClusterInfo().getHostnames().size()); object.setString("resource", getResourceName(clusterCost.getResultUtilization())); object.setDouble("utilization", clusterCost.getResultUtilization().getMaxUtilization()); object.setLong("tco", (int)clusterCost.getTco()); object.setLong("waste", (int)clusterCost.getWaste()); object.setString("flavor", clusterCost.getClusterInfo().getFlavor()); object.setDouble("flavorCost", clusterCost.getClusterInfo().getFlavorCost()); object.setDouble("flavorCpu", clusterCost.getClusterInfo().getFlavorCPU()); object.setDouble("flavorMem", clusterCost.getClusterInfo().getFlavorMem()); object.setDouble("flavorDisk", clusterCost.getClusterInfo().getFlavorDisk()); object.setString("type", clusterCost.getClusterInfo().getClusterType().name()); Cursor utilObject = object.setObject("util"); utilObject.setDouble("cpu", clusterCost.getResultUtilization().getCpu()); utilObject.setDouble("mem", clusterCost.getResultUtilization().getMemory()); utilObject.setDouble("disk", clusterCost.getResultUtilization().getDisk()); utilObject.setDouble("diskBusy", clusterCost.getResultUtilization().getDiskBusy()); Cursor usageObject = object.setObject("usage"); usageObject.setDouble("cpu", clusterCost.getSystemUtilization().getCpu()); usageObject.setDouble("mem", clusterCost.getSystemUtilization().getMemory()); usageObject.setDouble("disk", clusterCost.getSystemUtilization().getDisk()); usageObject.setDouble("diskBusy", clusterCost.getSystemUtilization().getDiskBusy()); Cursor hostnamesArray = object.setArray("hostnames"); for (String hostname : clusterCost.getClusterInfo().getHostnames()) hostnamesArray.addString(hostname); } private static String getResourceName(ClusterUtilization utilization) { String name = "cpu"; double max = utilization.getMaxUtilization(); if (utilization.getMemory() == max) { name = "mem"; } else if (utilization.getDisk() == max) { name = "disk"; } else if (utilization.getDiskBusy() == max) { name = "diskbusy"; } return name; } private static boolean recurseOverTenants(HttpRequest request) { return recurseOverApplications(request) || "tenant".equals(request.getProperty("recursive")); } private static boolean recurseOverApplications(HttpRequest request) { return recurseOverDeployments(request) || "application".equals(request.getProperty("recursive")); } private static boolean recurseOverDeployments(HttpRequest request) { return ImmutableSet.of("all", "true", "deployment").contains(request.getProperty("recursive")); } private static String tentantType(Tenant tenant) { if (tenant instanceof AthenzTenant) { return "ATHENS"; } else if (tenant instanceof UserTenant) { return "USER"; } throw new IllegalArgumentException("Unknown tenant type: " + tenant.getClass().getSimpleName()); } private static OktaAccessToken requireOktaAccessToken(HttpRequest request, String message) { return getOktaAccessToken(request) .orElseThrow(() -> new IllegalArgumentException(message + ": No Okta Access Token provided")); } private static Optional<OktaAccessToken> getOktaAccessToken(HttpRequest request) { return Optional.ofNullable(request.getJDiscRequest().context().get("okta.access-token")) .map(attribute -> new OktaAccessToken((String) attribute)); } private static ApplicationId appIdFromPath(Path path) { return ApplicationId.from(path.get("tenant"), path.get("application"), path.get("instance")); } private static JobType jobTypeFromPath(Path path) { return JobType.fromJobName(path.get("jobtype")); } private static RunId runIdFromPath(Path path) { long number = Long.parseLong(path.get("number")); return new RunId(appIdFromPath(path), jobTypeFromPath(path), number); } private HttpResponse submit(String tenant, String application, HttpRequest request) { Map<String, byte[]> dataParts = new MultipartParser().parse(request); Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get(); SourceRevision sourceRevision = toSourceRevision(submitOptions); String authorEmail = submitOptions.field("authorEmail").asString(); long projectId = Math.max(1, submitOptions.field("projectId").asLong()); byte[] applicationZip = dataParts.get(EnvironmentResource.APPLICATION_ZIP); controller.applications().verifyApplicationIdentityConfiguration(TenantName.from(tenant), new ApplicationPackage(applicationZip), Optional.of(getUserPrincipal(request).getIdentity())); return JobControllerApiHandlerHelper.submitResponse(controller.jobController(), tenant, application, sourceRevision, authorEmail, projectId, applicationZip, dataParts.get(EnvironmentResource.APPLICATION_TEST_ZIP)); } }
This must be implemented I see we're missing tests of RawFlag serialization/deserialization that should be added... It would have been great to have tests of: Flag + JSON string gets deserialized to expected values, and serialized back to the same JSON. Could you add ListFlag to InMemoryFlagSource?
public RawFlag serialize(List<T> value) { return null; }
return null;
public RawFlag serialize(List<T> value) { return JsonNodeRawFlag.fromJacksonClass(value); }
class JacksonArraySerializer<T> implements FlagSerializer<List<T>> { @SuppressWarnings("unchecked") @Override public List<T> deserialize(RawFlag rawFlag) { return (List<T>) JsonNodeRawFlag.fromJsonNode(rawFlag.asJsonNode()).toJacksonClass(List.class); } @Override }
class JacksonArraySerializer<T> implements FlagSerializer<List<T>> { @SuppressWarnings("unchecked") @Override public List<T> deserialize(RawFlag rawFlag) { return (List<T>) JsonNodeRawFlag.fromJsonNode(rawFlag.asJsonNode()).toJacksonClass(List.class); } @Override }
Oops! Implemented. Added basic serialization/deserialization to `FlagsTest` Added `ListFlag` to `InMemoryFlagSource`
public RawFlag serialize(List<T> value) { return null; }
return null;
public RawFlag serialize(List<T> value) { return JsonNodeRawFlag.fromJacksonClass(value); }
class JacksonArraySerializer<T> implements FlagSerializer<List<T>> { @SuppressWarnings("unchecked") @Override public List<T> deserialize(RawFlag rawFlag) { return (List<T>) JsonNodeRawFlag.fromJsonNode(rawFlag.asJsonNode()).toJacksonClass(List.class); } @Override }
class JacksonArraySerializer<T> implements FlagSerializer<List<T>> { @SuppressWarnings("unchecked") @Override public List<T> deserialize(RawFlag rawFlag) { return (List<T>) JsonNodeRawFlag.fromJsonNode(rawFlag.asJsonNode()).toJacksonClass(List.class); } @Override }
Do you mind changing this properties() method to return a TestProperties? If necessary, set it up with the exact same values to avoid impacting any tests. This would avoid needing to touch this file when Properties are changed in the future.
private ModelContext createMockModelContext(String hosts, String services, HostProvisioner provisionerToOverride) { return new MockModelContext() { @Override public ApplicationPackage applicationPackage() { return new MockApplicationPackage.Builder().withHosts(hosts).withServices(services).build(); } @Override public Optional<HostProvisioner> hostProvisioner() { return Optional.of(provisionerToOverride); } @Override public Properties properties() { return new Properties() { @Override public boolean multitenant() { return true; } @Override public boolean hostedVespa() { return true; } @Override public Zone zone() { return Zone.defaultZone(); } @Override public Set<Rotation> rotations() { return new HashSet<>(); } @Override public ApplicationId applicationId() { return ApplicationId.from(TenantName.from("hosted-vespa"), ApplicationName.from("routing"), InstanceName.defaultName()); } @Override public List<ConfigServerSpec> configServerSpecs() { return Collections.emptyList(); } @Override public HostName loadBalancerName() { return null; } @Override public URI ztsUrl() { return null; } @Override public String athenzDnsSuffix() { return null; } @Override public boolean isBootstrap() { return false; } @Override public boolean isFirstTimeDeployment() { return false; } @Override public boolean useDedicatedNodeForLogserver() { return false; } @Override public boolean useFdispatchByDefault() { return true; } }; } }; }
public boolean useFdispatchByDefault() { return true; }
private ModelContext createMockModelContext(String hosts, String services, HostProvisioner provisionerToOverride) { return new MockModelContext() { @Override public ApplicationPackage applicationPackage() { return new MockApplicationPackage.Builder().withHosts(hosts).withServices(services).build(); } @Override public Optional<HostProvisioner> hostProvisioner() { return Optional.of(provisionerToOverride); } @Override public Properties properties() { return new TestProperties(); } }; }
class VespaModelFactoryTest { private ModelContext testModelContext; @Before public void setupContext() { testModelContext = new MockModelContext(); } @Test public void testThatFactoryCanBuildModel() { VespaModelFactory modelFactory = new VespaModelFactory(new NullConfigModelRegistry()); Model model = modelFactory.createModel(testModelContext); assertNotNull(model); assertTrue(model instanceof VespaModel); } @Test(expected = IllegalArgumentException.class) public void testThatFactoryModelValidationFailsWithIllegalArgumentException() { VespaModelFactory modelFactory = new VespaModelFactory(new NullConfigModelRegistry()); modelFactory.createAndValidateModel(new MockModelContext(createApplicationPackageThatFailsWhenValidating()), new ValidationParameters()); } @Test(expected = RuntimeException.class) public void testThatFactoryModelValidationFails() { VespaModelFactory modelFactory = new VespaModelFactory(new NullConfigModelRegistry()); modelFactory.createAndValidateModel(testModelContext, new ValidationParameters()); } @Test public void testThatFactoryModelValidationCanBeIgnored() { VespaModelFactory modelFactory = new VespaModelFactory(new NullConfigModelRegistry()); ModelCreateResult createResult = modelFactory.createAndValidateModel( new MockModelContext(createApplicationPackageThatFailsWhenValidating()), new ValidationParameters(ValidationParameters.IgnoreValidationErrors.TRUE)); assertNotNull(createResult.getModel()); assertNotNull(createResult.getConfigChangeActions()); assertTrue(createResult.getConfigChangeActions().isEmpty()); } @Test public void hostedVespaZoneApplicationAllocatesNodesFromNodeRepo() { String hostName = "test-host-name"; String routingClusterName = "routing-cluster"; String hosts = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<hosts>\n" + " <host name='" + hostName + "'>\n" + " <alias>proxy1</alias>\n" + " </host>\n" + "</hosts>"; String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services version='1.0' xmlns:deploy='vespa'>\n" + " <admin version='2.0'>\n" + " <adminserver hostalias='proxy1' />\n" + " </admin>" + " <jdisc id='" + routingClusterName + "' version='1.0'>\n" + " <nodes type='proxy'/>\n" + " </jdisc>\n" + "</services>"; HostProvisioner provisionerToOverride = new HostProvisioner() { @Override public HostSpec allocateHost(String alias) { return new HostSpec(hostName, Collections.emptyList(), ClusterMembership.from(ClusterSpec.from(ClusterSpec.Type.admin, new ClusterSpec.Id(routingClusterName), ClusterSpec.Group.from(0), Version.fromString("6.42"), false), 0)); } @Override public List<HostSpec> prepare(ClusterSpec cluster, Capacity capacity, int groups, ProvisionLogger logger) { return Collections.singletonList(new HostSpec(hostName, Collections.emptyList(), ClusterMembership.from(ClusterSpec.from(ClusterSpec.Type.container, new ClusterSpec.Id(routingClusterName), ClusterSpec.Group.from(0), Version.fromString("6.42"), false), 0))); } }; ModelContext modelContext = createMockModelContext(hosts, services, provisionerToOverride); Model model = new VespaModelFactory(new NullConfigModelRegistry()).createModel(modelContext); List<HostInfo> allocatedHosts = new ArrayList<>(model.getHosts()); assertThat(allocatedHosts.size(), is(1)); HostInfo hostInfo = allocatedHosts.get(0); assertThat(hostInfo.getHostname(), is(hostName)); assertTrue("Routing service should run on host " + hostName, hostInfo.getServices().stream() .map(ServiceInfo::getConfigId) .anyMatch(configId -> configId.contains(routingClusterName))); } ApplicationPackage createApplicationPackageThatFailsWhenValidating() { return new MockApplicationPackage.Builder().withEmptyHosts().withEmptyServices().failOnValidateXml().build(); } }
class VespaModelFactoryTest { private ModelContext testModelContext; @Before public void setupContext() { testModelContext = new MockModelContext(); } @Test public void testThatFactoryCanBuildModel() { VespaModelFactory modelFactory = new VespaModelFactory(new NullConfigModelRegistry()); Model model = modelFactory.createModel(testModelContext); assertNotNull(model); assertTrue(model instanceof VespaModel); } @Test(expected = IllegalArgumentException.class) public void testThatFactoryModelValidationFailsWithIllegalArgumentException() { VespaModelFactory modelFactory = new VespaModelFactory(new NullConfigModelRegistry()); modelFactory.createAndValidateModel(new MockModelContext(createApplicationPackageThatFailsWhenValidating()), new ValidationParameters()); } @Test(expected = RuntimeException.class) public void testThatFactoryModelValidationFails() { VespaModelFactory modelFactory = new VespaModelFactory(new NullConfigModelRegistry()); modelFactory.createAndValidateModel(testModelContext, new ValidationParameters()); } @Test public void testThatFactoryModelValidationCanBeIgnored() { VespaModelFactory modelFactory = new VespaModelFactory(new NullConfigModelRegistry()); ModelCreateResult createResult = modelFactory.createAndValidateModel( new MockModelContext(createApplicationPackageThatFailsWhenValidating()), new ValidationParameters(ValidationParameters.IgnoreValidationErrors.TRUE)); assertNotNull(createResult.getModel()); assertNotNull(createResult.getConfigChangeActions()); assertTrue(createResult.getConfigChangeActions().isEmpty()); } @Test public void hostedVespaZoneApplicationAllocatesNodesFromNodeRepo() { String hostName = "test-host-name"; String routingClusterName = "routing-cluster"; String hosts = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<hosts>\n" + " <host name='" + hostName + "'>\n" + " <alias>proxy1</alias>\n" + " </host>\n" + "</hosts>"; String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services version='1.0' xmlns:deploy='vespa'>\n" + " <admin version='2.0'>\n" + " <adminserver hostalias='proxy1' />\n" + " </admin>" + " <jdisc id='" + routingClusterName + "' version='1.0'>\n" + " <nodes type='proxy'/>\n" + " </jdisc>\n" + "</services>"; HostProvisioner provisionerToOverride = new HostProvisioner() { @Override public HostSpec allocateHost(String alias) { return new HostSpec(hostName, Collections.emptyList(), ClusterMembership.from(ClusterSpec.from(ClusterSpec.Type.admin, new ClusterSpec.Id(routingClusterName), ClusterSpec.Group.from(0), Version.fromString("6.42"), false), 0)); } @Override public List<HostSpec> prepare(ClusterSpec cluster, Capacity capacity, int groups, ProvisionLogger logger) { return Collections.singletonList(new HostSpec(hostName, Collections.emptyList(), ClusterMembership.from(ClusterSpec.from(ClusterSpec.Type.container, new ClusterSpec.Id(routingClusterName), ClusterSpec.Group.from(0), Version.fromString("6.42"), false), 0))); } }; ModelContext modelContext = createMockModelContext(hosts, services, provisionerToOverride); Model model = new VespaModelFactory(new NullConfigModelRegistry()).createModel(modelContext); List<HostInfo> allocatedHosts = new ArrayList<>(model.getHosts()); assertThat(allocatedHosts.size(), is(1)); HostInfo hostInfo = allocatedHosts.get(0); assertThat(hostInfo.getHostname(), is(hostName)); assertTrue("Routing service should run on host " + hostName, hostInfo.getServices().stream() .map(ServiceInfo::getConfigId) .anyMatch(configId -> configId.contains(routingClusterName))); } ApplicationPackage createApplicationPackageThatFailsWhenValidating() { return new MockApplicationPackage.Builder().withEmptyHosts().withEmptyServices().failOnValidateXml().build(); } }
Will do.
private ModelContext createMockModelContext(String hosts, String services, HostProvisioner provisionerToOverride) { return new MockModelContext() { @Override public ApplicationPackage applicationPackage() { return new MockApplicationPackage.Builder().withHosts(hosts).withServices(services).build(); } @Override public Optional<HostProvisioner> hostProvisioner() { return Optional.of(provisionerToOverride); } @Override public Properties properties() { return new Properties() { @Override public boolean multitenant() { return true; } @Override public boolean hostedVespa() { return true; } @Override public Zone zone() { return Zone.defaultZone(); } @Override public Set<Rotation> rotations() { return new HashSet<>(); } @Override public ApplicationId applicationId() { return ApplicationId.from(TenantName.from("hosted-vespa"), ApplicationName.from("routing"), InstanceName.defaultName()); } @Override public List<ConfigServerSpec> configServerSpecs() { return Collections.emptyList(); } @Override public HostName loadBalancerName() { return null; } @Override public URI ztsUrl() { return null; } @Override public String athenzDnsSuffix() { return null; } @Override public boolean isBootstrap() { return false; } @Override public boolean isFirstTimeDeployment() { return false; } @Override public boolean useDedicatedNodeForLogserver() { return false; } @Override public boolean useFdispatchByDefault() { return true; } }; } }; }
public boolean useFdispatchByDefault() { return true; }
private ModelContext createMockModelContext(String hosts, String services, HostProvisioner provisionerToOverride) { return new MockModelContext() { @Override public ApplicationPackage applicationPackage() { return new MockApplicationPackage.Builder().withHosts(hosts).withServices(services).build(); } @Override public Optional<HostProvisioner> hostProvisioner() { return Optional.of(provisionerToOverride); } @Override public Properties properties() { return new TestProperties(); } }; }
class VespaModelFactoryTest { private ModelContext testModelContext; @Before public void setupContext() { testModelContext = new MockModelContext(); } @Test public void testThatFactoryCanBuildModel() { VespaModelFactory modelFactory = new VespaModelFactory(new NullConfigModelRegistry()); Model model = modelFactory.createModel(testModelContext); assertNotNull(model); assertTrue(model instanceof VespaModel); } @Test(expected = IllegalArgumentException.class) public void testThatFactoryModelValidationFailsWithIllegalArgumentException() { VespaModelFactory modelFactory = new VespaModelFactory(new NullConfigModelRegistry()); modelFactory.createAndValidateModel(new MockModelContext(createApplicationPackageThatFailsWhenValidating()), new ValidationParameters()); } @Test(expected = RuntimeException.class) public void testThatFactoryModelValidationFails() { VespaModelFactory modelFactory = new VespaModelFactory(new NullConfigModelRegistry()); modelFactory.createAndValidateModel(testModelContext, new ValidationParameters()); } @Test public void testThatFactoryModelValidationCanBeIgnored() { VespaModelFactory modelFactory = new VespaModelFactory(new NullConfigModelRegistry()); ModelCreateResult createResult = modelFactory.createAndValidateModel( new MockModelContext(createApplicationPackageThatFailsWhenValidating()), new ValidationParameters(ValidationParameters.IgnoreValidationErrors.TRUE)); assertNotNull(createResult.getModel()); assertNotNull(createResult.getConfigChangeActions()); assertTrue(createResult.getConfigChangeActions().isEmpty()); } @Test public void hostedVespaZoneApplicationAllocatesNodesFromNodeRepo() { String hostName = "test-host-name"; String routingClusterName = "routing-cluster"; String hosts = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<hosts>\n" + " <host name='" + hostName + "'>\n" + " <alias>proxy1</alias>\n" + " </host>\n" + "</hosts>"; String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services version='1.0' xmlns:deploy='vespa'>\n" + " <admin version='2.0'>\n" + " <adminserver hostalias='proxy1' />\n" + " </admin>" + " <jdisc id='" + routingClusterName + "' version='1.0'>\n" + " <nodes type='proxy'/>\n" + " </jdisc>\n" + "</services>"; HostProvisioner provisionerToOverride = new HostProvisioner() { @Override public HostSpec allocateHost(String alias) { return new HostSpec(hostName, Collections.emptyList(), ClusterMembership.from(ClusterSpec.from(ClusterSpec.Type.admin, new ClusterSpec.Id(routingClusterName), ClusterSpec.Group.from(0), Version.fromString("6.42"), false), 0)); } @Override public List<HostSpec> prepare(ClusterSpec cluster, Capacity capacity, int groups, ProvisionLogger logger) { return Collections.singletonList(new HostSpec(hostName, Collections.emptyList(), ClusterMembership.from(ClusterSpec.from(ClusterSpec.Type.container, new ClusterSpec.Id(routingClusterName), ClusterSpec.Group.from(0), Version.fromString("6.42"), false), 0))); } }; ModelContext modelContext = createMockModelContext(hosts, services, provisionerToOverride); Model model = new VespaModelFactory(new NullConfigModelRegistry()).createModel(modelContext); List<HostInfo> allocatedHosts = new ArrayList<>(model.getHosts()); assertThat(allocatedHosts.size(), is(1)); HostInfo hostInfo = allocatedHosts.get(0); assertThat(hostInfo.getHostname(), is(hostName)); assertTrue("Routing service should run on host " + hostName, hostInfo.getServices().stream() .map(ServiceInfo::getConfigId) .anyMatch(configId -> configId.contains(routingClusterName))); } ApplicationPackage createApplicationPackageThatFailsWhenValidating() { return new MockApplicationPackage.Builder().withEmptyHosts().withEmptyServices().failOnValidateXml().build(); } }
class VespaModelFactoryTest { private ModelContext testModelContext; @Before public void setupContext() { testModelContext = new MockModelContext(); } @Test public void testThatFactoryCanBuildModel() { VespaModelFactory modelFactory = new VespaModelFactory(new NullConfigModelRegistry()); Model model = modelFactory.createModel(testModelContext); assertNotNull(model); assertTrue(model instanceof VespaModel); } @Test(expected = IllegalArgumentException.class) public void testThatFactoryModelValidationFailsWithIllegalArgumentException() { VespaModelFactory modelFactory = new VespaModelFactory(new NullConfigModelRegistry()); modelFactory.createAndValidateModel(new MockModelContext(createApplicationPackageThatFailsWhenValidating()), new ValidationParameters()); } @Test(expected = RuntimeException.class) public void testThatFactoryModelValidationFails() { VespaModelFactory modelFactory = new VespaModelFactory(new NullConfigModelRegistry()); modelFactory.createAndValidateModel(testModelContext, new ValidationParameters()); } @Test public void testThatFactoryModelValidationCanBeIgnored() { VespaModelFactory modelFactory = new VespaModelFactory(new NullConfigModelRegistry()); ModelCreateResult createResult = modelFactory.createAndValidateModel( new MockModelContext(createApplicationPackageThatFailsWhenValidating()), new ValidationParameters(ValidationParameters.IgnoreValidationErrors.TRUE)); assertNotNull(createResult.getModel()); assertNotNull(createResult.getConfigChangeActions()); assertTrue(createResult.getConfigChangeActions().isEmpty()); } @Test public void hostedVespaZoneApplicationAllocatesNodesFromNodeRepo() { String hostName = "test-host-name"; String routingClusterName = "routing-cluster"; String hosts = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<hosts>\n" + " <host name='" + hostName + "'>\n" + " <alias>proxy1</alias>\n" + " </host>\n" + "</hosts>"; String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services version='1.0' xmlns:deploy='vespa'>\n" + " <admin version='2.0'>\n" + " <adminserver hostalias='proxy1' />\n" + " </admin>" + " <jdisc id='" + routingClusterName + "' version='1.0'>\n" + " <nodes type='proxy'/>\n" + " </jdisc>\n" + "</services>"; HostProvisioner provisionerToOverride = new HostProvisioner() { @Override public HostSpec allocateHost(String alias) { return new HostSpec(hostName, Collections.emptyList(), ClusterMembership.from(ClusterSpec.from(ClusterSpec.Type.admin, new ClusterSpec.Id(routingClusterName), ClusterSpec.Group.from(0), Version.fromString("6.42"), false), 0)); } @Override public List<HostSpec> prepare(ClusterSpec cluster, Capacity capacity, int groups, ProvisionLogger logger) { return Collections.singletonList(new HostSpec(hostName, Collections.emptyList(), ClusterMembership.from(ClusterSpec.from(ClusterSpec.Type.container, new ClusterSpec.Id(routingClusterName), ClusterSpec.Group.from(0), Version.fromString("6.42"), false), 0))); } }; ModelContext modelContext = createMockModelContext(hosts, services, provisionerToOverride); Model model = new VespaModelFactory(new NullConfigModelRegistry()).createModel(modelContext); List<HostInfo> allocatedHosts = new ArrayList<>(model.getHosts()); assertThat(allocatedHosts.size(), is(1)); HostInfo hostInfo = allocatedHosts.get(0); assertThat(hostInfo.getHostname(), is(hostName)); assertTrue("Routing service should run on host " + hostName, hostInfo.getServices().stream() .map(ServiceInfo::getConfigId) .anyMatch(configId -> configId.contains(routingClusterName))); } ApplicationPackage createApplicationPackageThatFailsWhenValidating() { return new MockApplicationPackage.Builder().withEmptyHosts().withEmptyServices().failOnValidateXml().build(); } }
The test did not appear to need different values, hopefully Travis agrees.
private ModelContext createMockModelContext(String hosts, String services, HostProvisioner provisionerToOverride) { return new MockModelContext() { @Override public ApplicationPackage applicationPackage() { return new MockApplicationPackage.Builder().withHosts(hosts).withServices(services).build(); } @Override public Optional<HostProvisioner> hostProvisioner() { return Optional.of(provisionerToOverride); } @Override public Properties properties() { return new Properties() { @Override public boolean multitenant() { return true; } @Override public boolean hostedVespa() { return true; } @Override public Zone zone() { return Zone.defaultZone(); } @Override public Set<Rotation> rotations() { return new HashSet<>(); } @Override public ApplicationId applicationId() { return ApplicationId.from(TenantName.from("hosted-vespa"), ApplicationName.from("routing"), InstanceName.defaultName()); } @Override public List<ConfigServerSpec> configServerSpecs() { return Collections.emptyList(); } @Override public HostName loadBalancerName() { return null; } @Override public URI ztsUrl() { return null; } @Override public String athenzDnsSuffix() { return null; } @Override public boolean isBootstrap() { return false; } @Override public boolean isFirstTimeDeployment() { return false; } @Override public boolean useDedicatedNodeForLogserver() { return false; } @Override public boolean useFdispatchByDefault() { return true; } }; } }; }
public boolean useFdispatchByDefault() { return true; }
private ModelContext createMockModelContext(String hosts, String services, HostProvisioner provisionerToOverride) { return new MockModelContext() { @Override public ApplicationPackage applicationPackage() { return new MockApplicationPackage.Builder().withHosts(hosts).withServices(services).build(); } @Override public Optional<HostProvisioner> hostProvisioner() { return Optional.of(provisionerToOverride); } @Override public Properties properties() { return new TestProperties(); } }; }
class VespaModelFactoryTest { private ModelContext testModelContext; @Before public void setupContext() { testModelContext = new MockModelContext(); } @Test public void testThatFactoryCanBuildModel() { VespaModelFactory modelFactory = new VespaModelFactory(new NullConfigModelRegistry()); Model model = modelFactory.createModel(testModelContext); assertNotNull(model); assertTrue(model instanceof VespaModel); } @Test(expected = IllegalArgumentException.class) public void testThatFactoryModelValidationFailsWithIllegalArgumentException() { VespaModelFactory modelFactory = new VespaModelFactory(new NullConfigModelRegistry()); modelFactory.createAndValidateModel(new MockModelContext(createApplicationPackageThatFailsWhenValidating()), new ValidationParameters()); } @Test(expected = RuntimeException.class) public void testThatFactoryModelValidationFails() { VespaModelFactory modelFactory = new VespaModelFactory(new NullConfigModelRegistry()); modelFactory.createAndValidateModel(testModelContext, new ValidationParameters()); } @Test public void testThatFactoryModelValidationCanBeIgnored() { VespaModelFactory modelFactory = new VespaModelFactory(new NullConfigModelRegistry()); ModelCreateResult createResult = modelFactory.createAndValidateModel( new MockModelContext(createApplicationPackageThatFailsWhenValidating()), new ValidationParameters(ValidationParameters.IgnoreValidationErrors.TRUE)); assertNotNull(createResult.getModel()); assertNotNull(createResult.getConfigChangeActions()); assertTrue(createResult.getConfigChangeActions().isEmpty()); } @Test public void hostedVespaZoneApplicationAllocatesNodesFromNodeRepo() { String hostName = "test-host-name"; String routingClusterName = "routing-cluster"; String hosts = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<hosts>\n" + " <host name='" + hostName + "'>\n" + " <alias>proxy1</alias>\n" + " </host>\n" + "</hosts>"; String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services version='1.0' xmlns:deploy='vespa'>\n" + " <admin version='2.0'>\n" + " <adminserver hostalias='proxy1' />\n" + " </admin>" + " <jdisc id='" + routingClusterName + "' version='1.0'>\n" + " <nodes type='proxy'/>\n" + " </jdisc>\n" + "</services>"; HostProvisioner provisionerToOverride = new HostProvisioner() { @Override public HostSpec allocateHost(String alias) { return new HostSpec(hostName, Collections.emptyList(), ClusterMembership.from(ClusterSpec.from(ClusterSpec.Type.admin, new ClusterSpec.Id(routingClusterName), ClusterSpec.Group.from(0), Version.fromString("6.42"), false), 0)); } @Override public List<HostSpec> prepare(ClusterSpec cluster, Capacity capacity, int groups, ProvisionLogger logger) { return Collections.singletonList(new HostSpec(hostName, Collections.emptyList(), ClusterMembership.from(ClusterSpec.from(ClusterSpec.Type.container, new ClusterSpec.Id(routingClusterName), ClusterSpec.Group.from(0), Version.fromString("6.42"), false), 0))); } }; ModelContext modelContext = createMockModelContext(hosts, services, provisionerToOverride); Model model = new VespaModelFactory(new NullConfigModelRegistry()).createModel(modelContext); List<HostInfo> allocatedHosts = new ArrayList<>(model.getHosts()); assertThat(allocatedHosts.size(), is(1)); HostInfo hostInfo = allocatedHosts.get(0); assertThat(hostInfo.getHostname(), is(hostName)); assertTrue("Routing service should run on host " + hostName, hostInfo.getServices().stream() .map(ServiceInfo::getConfigId) .anyMatch(configId -> configId.contains(routingClusterName))); } ApplicationPackage createApplicationPackageThatFailsWhenValidating() { return new MockApplicationPackage.Builder().withEmptyHosts().withEmptyServices().failOnValidateXml().build(); } }
class VespaModelFactoryTest { private ModelContext testModelContext; @Before public void setupContext() { testModelContext = new MockModelContext(); } @Test public void testThatFactoryCanBuildModel() { VespaModelFactory modelFactory = new VespaModelFactory(new NullConfigModelRegistry()); Model model = modelFactory.createModel(testModelContext); assertNotNull(model); assertTrue(model instanceof VespaModel); } @Test(expected = IllegalArgumentException.class) public void testThatFactoryModelValidationFailsWithIllegalArgumentException() { VespaModelFactory modelFactory = new VespaModelFactory(new NullConfigModelRegistry()); modelFactory.createAndValidateModel(new MockModelContext(createApplicationPackageThatFailsWhenValidating()), new ValidationParameters()); } @Test(expected = RuntimeException.class) public void testThatFactoryModelValidationFails() { VespaModelFactory modelFactory = new VespaModelFactory(new NullConfigModelRegistry()); modelFactory.createAndValidateModel(testModelContext, new ValidationParameters()); } @Test public void testThatFactoryModelValidationCanBeIgnored() { VespaModelFactory modelFactory = new VespaModelFactory(new NullConfigModelRegistry()); ModelCreateResult createResult = modelFactory.createAndValidateModel( new MockModelContext(createApplicationPackageThatFailsWhenValidating()), new ValidationParameters(ValidationParameters.IgnoreValidationErrors.TRUE)); assertNotNull(createResult.getModel()); assertNotNull(createResult.getConfigChangeActions()); assertTrue(createResult.getConfigChangeActions().isEmpty()); } @Test public void hostedVespaZoneApplicationAllocatesNodesFromNodeRepo() { String hostName = "test-host-name"; String routingClusterName = "routing-cluster"; String hosts = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<hosts>\n" + " <host name='" + hostName + "'>\n" + " <alias>proxy1</alias>\n" + " </host>\n" + "</hosts>"; String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + "<services version='1.0' xmlns:deploy='vespa'>\n" + " <admin version='2.0'>\n" + " <adminserver hostalias='proxy1' />\n" + " </admin>" + " <jdisc id='" + routingClusterName + "' version='1.0'>\n" + " <nodes type='proxy'/>\n" + " </jdisc>\n" + "</services>"; HostProvisioner provisionerToOverride = new HostProvisioner() { @Override public HostSpec allocateHost(String alias) { return new HostSpec(hostName, Collections.emptyList(), ClusterMembership.from(ClusterSpec.from(ClusterSpec.Type.admin, new ClusterSpec.Id(routingClusterName), ClusterSpec.Group.from(0), Version.fromString("6.42"), false), 0)); } @Override public List<HostSpec> prepare(ClusterSpec cluster, Capacity capacity, int groups, ProvisionLogger logger) { return Collections.singletonList(new HostSpec(hostName, Collections.emptyList(), ClusterMembership.from(ClusterSpec.from(ClusterSpec.Type.container, new ClusterSpec.Id(routingClusterName), ClusterSpec.Group.from(0), Version.fromString("6.42"), false), 0))); } }; ModelContext modelContext = createMockModelContext(hosts, services, provisionerToOverride); Model model = new VespaModelFactory(new NullConfigModelRegistry()).createModel(modelContext); List<HostInfo> allocatedHosts = new ArrayList<>(model.getHosts()); assertThat(allocatedHosts.size(), is(1)); HostInfo hostInfo = allocatedHosts.get(0); assertThat(hostInfo.getHostname(), is(hostName)); assertTrue("Routing service should run on host " + hostName, hostInfo.getServices().stream() .map(ServiceInfo::getConfigId) .anyMatch(configId -> configId.contains(routingClusterName))); } ApplicationPackage createApplicationPackageThatFailsWhenValidating() { return new MockApplicationPackage.Builder().withEmptyHosts().withEmptyServices().failOnValidateXml().build(); } }
'adjust' should only be called when re-using inserters. For one-shot use I suggest adding a constructor to it.
public void entry(int idx, Inspector inspector) { if (inspector == guard) { return; } var inserter = new ArrayInserter(); inserter.adjust(cursor); injectValue(inserter, inspector, guard); }
inserter.adjust(cursor);
public void entry(int idx, Inspector inspector) { if (inspector == guard) { return; } injectValue(new ArrayInserter(cursor), inspector, guard); }
class NestedInjector implements ArrayTraverser, ObjectTraverser { private final Cursor cursor; private final Inspector guard; public NestedInjector(Cursor cursor, Inspector guard) { this.cursor = cursor; this.guard = guard; } @Override @Override public void field(String name, Inspector inspector) { if (inspector == guard) { return; } var inserter = new ObjectInserter(); inserter.adjust(cursor, name); injectValue(inserter, inspector, guard); } }
class NestedInjector implements ArrayTraverser, ObjectTraverser { private final Cursor cursor; private final Inspector guard; public NestedInjector(Cursor cursor, Inspector guard) { this.cursor = cursor; this.guard = guard; } @Override @Override public void field(String name, Inspector inspector) { if (inspector == guard) { return; } injectValue(new ObjectInserter(cursor, name), inspector, guard); } }
'adjust' should only be called when re-using inserters. For one-shot use I suggest adding a constructor to it.
public void field(String name, Inspector inspector) { if (inspector == guard) { return; } var inserter = new ObjectInserter(); inserter.adjust(cursor, name); injectValue(inserter, inspector, guard); }
inserter.adjust(cursor, name);
public void field(String name, Inspector inspector) { if (inspector == guard) { return; } injectValue(new ObjectInserter(cursor, name), inspector, guard); }
class NestedInjector implements ArrayTraverser, ObjectTraverser { private final Cursor cursor; private final Inspector guard; public NestedInjector(Cursor cursor, Inspector guard) { this.cursor = cursor; this.guard = guard; } @Override public void entry(int idx, Inspector inspector) { if (inspector == guard) { return; } var inserter = new ArrayInserter(); inserter.adjust(cursor); injectValue(inserter, inspector, guard); } @Override }
class NestedInjector implements ArrayTraverser, ObjectTraverser { private final Cursor cursor; private final Inspector guard; public NestedInjector(Cursor cursor, Inspector guard) { this.cursor = cursor; this.guard = guard; } @Override public void entry(int idx, Inspector inspector) { if (inspector == guard) { return; } injectValue(new ArrayInserter(cursor), inspector, guard); } @Override }
And update the checksum as the same logic when loading tasks
public long saveTasks(DataOutputStream dos, long checksum) throws IOException { write(dos); return checksum; }
return checksum;
public long saveTasks(DataOutputStream dos, long checksum) throws IOException { SerializeData data = new SerializeData(); data.tasks = new ArrayList<>(nameToTaskMap.values()); checksum ^= data.tasks.size(); data.runStatus = showTaskRunStatus(null); String s = GsonUtils.GSON.toJson(data); Text.writeString(dos, s); return checksum; }
class TaskManager implements Writable { private static final Logger LOG = LogManager.getLogger(TaskManager.class); public static final long TASK_EXISTS = -1L; public static final long DUPLICATE_CREATE_TASK = -2L; public static final long GET_TASK_LOCK_FAILED = -3L; private final Map<Long, Task> manualTaskMap; private final Map<String, Task> nameToTaskMap; private final TaskRunManager taskRunManager; private final ScheduledExecutorService dispatchScheduler = Executors.newScheduledThreadPool(1); private final QueryableReentrantLock lock; private AtomicBoolean isStart = new AtomicBoolean(false); public TaskManager() { manualTaskMap = Maps.newConcurrentMap(); nameToTaskMap = Maps.newConcurrentMap(); taskRunManager = new TaskRunManager(); lock = new QueryableReentrantLock(true); } public void start() { if (isStart.compareAndSet(false, true)) { clearUnfinishedTaskRun(); dispatchScheduler.scheduleAtFixedRate(() -> { if (!tryLock()) { return; } try { taskRunManager.checkRunningTaskRun(); taskRunManager.scheduledPendingTaskRun(); } catch (Exception ex) { LOG.warn("failed to dispatch job.", ex); } finally { unlock(); } }, 0, 1, TimeUnit.SECONDS); } } private void clearUnfinishedTaskRun() { if (!tryLock()) { return; } try { Iterator<Long> pendingIter = taskRunManager.getPendingTaskRunMap().keySet().iterator(); while (pendingIter.hasNext()) { Queue<TaskRun> taskRuns = taskRunManager.getPendingTaskRunMap().get(pendingIter.next()); for (TaskRun taskRun : taskRuns) { taskRun.getStatus().setErrorMessage("Fe restart abort the task"); taskRun.getStatus().setErrorCode(-1); taskRun.getStatus().setState(Constants.TaskRunState.FAILED); taskRunManager.getTaskRunHistory().addHistory(taskRun.getStatus()); } pendingIter.remove(); } Iterator<Long> runningIter = taskRunManager.getRunningTaskRunMap().keySet().iterator(); while (runningIter.hasNext()) { TaskRun taskRun = taskRunManager.getRunningTaskRunMap().get(runningIter.next()); taskRun.getStatus().setErrorMessage("Fe restart abort the task"); taskRun.getStatus().setErrorCode(-1); taskRun.getStatus().setState(Constants.TaskRunState.FAILED); runningIter.remove(); taskRunManager.getTaskRunHistory().addHistory(taskRun.getStatus()); } } finally { unlock(); } } public long createTask(Task task, boolean isReplay) { if (!tryLock()) { return GET_TASK_LOCK_FAILED; } try { if (nameToTaskMap.containsKey(task.getName())) { return TASK_EXISTS; } nameToTaskMap.put(task.getName(), task); if (manualTaskMap.containsKey(task.getId())) { return DUPLICATE_CREATE_TASK; } manualTaskMap.put(task.getId(), task); if (!isReplay) { GlobalStateMgr.getCurrentState().getEditLog().logCreateTask(task); } return task.getId(); } finally { unlock(); } } public SubmitResult executeTask(String taskName) { Task task = nameToTaskMap.get(taskName); if (task == null) { return new SubmitResult(null, SubmitResult.SubmitStatus.Failed); } return taskRunManager.submitTaskRun(TaskRunBuilder.newBuilder(task).build()); } public void dropTasks(List<Long> taskIdList, boolean isReplay) { if (!tryLock()) { return; } try { for (long taskId : taskIdList) { Task task = manualTaskMap.get(taskId); if (task == null) { return; } nameToTaskMap.remove(task.getName()); manualTaskMap.remove(task.getId()); } if (!isReplay) { GlobalStateMgr.getCurrentState().getEditLog().logDropTasks(taskIdList); } } finally { unlock(); } LOG.info("drop tasks:{}", taskIdList); } public List<Task> showTasks(String dbName) { List<Task> taskList = Lists.newArrayList(); if (dbName == null) { taskList.addAll(manualTaskMap.values()); } else { taskList.addAll(manualTaskMap.values().stream() .filter(u -> u.getDbName().equals(dbName)).collect(Collectors.toList())); } return taskList; } private boolean tryLock() { try { if (!lock.tryLock(1, TimeUnit.SECONDS)) { Thread owner = lock.getOwner(); if (owner != null) { LOG.warn("task lock is held by: {}", Util.dumpThread(owner, 50)); } else { LOG.warn("task lock owner is null"); } return false; } return true; } catch (InterruptedException e) { LOG.warn("got exception while getting task lock", e); } return lock.isHeldByCurrentThread(); } private void unlock() { this.lock.unlock(); } public void replayCreateTask(Task task) { createTask(task, true); } public void replayDropTasks(List<Long> taskIdList) { dropTasks(taskIdList, true); } public TaskRunManager getTaskRunManager() { return taskRunManager; } public ShowResultSet handleSubmitTaskStmt(SubmitTaskStmt submitTaskStmt) throws DdlException { Task task = TaskBuilder.buildTask(submitTaskStmt, ConnectContext.get()); Long createResult = createTask(task, false); String taskName = task.getName(); if (createResult < 0) { if (createResult == TASK_EXISTS) { throw new DdlException("Task " + taskName + " already exist."); } throw new DdlException("Failed to create Task: " + taskName + ", ErrorCode: " + createResult); } SubmitResult submitResult = executeTask(taskName); if (submitResult.getStatus() != SubmitResult.SubmitStatus.Submitted) { dropTasks(ImmutableList.of(task.getId()), false); } ShowResultSetMetaData.Builder builder = ShowResultSetMetaData.builder(); builder.addColumn(new Column("TaskName", ScalarType.createVarchar(40))); builder.addColumn(new Column("Status", ScalarType.createVarchar(10))); List<String> item = ImmutableList.of(taskName, submitResult.getStatus().toString()); List<List<String>> result = ImmutableList.of(item); return new ShowResultSet(builder.build(), result); } public long loadTasks(DataInputStream dis, long checksum) throws IOException { try { readFields(dis); LOG.info("finished replaying TaskManager from image"); } catch (EOFException e) { LOG.info("no TaskManager to replay."); } return checksum; } @Override public void write(DataOutput out) throws IOException { SerializeData data = new SerializeData(); data.tasks = new ArrayList<>(nameToTaskMap.values()); data.runStatus = showTaskRunStatus(null); String s = GsonUtils.GSON.toJson(data); Text.writeString(out, s); } public void readFields(DataInputStream dis) throws IOException { String s = Text.readString(dis); SerializeData data = GsonUtils.GSON.fromJson(s, SerializeData.class); if (data != null) { if (data.tasks != null) { for (Task task : data.tasks) { replayCreateTask(task); } } if (data.runStatus != null) { for (TaskRunStatus runStatus : data.runStatus) { replayTaskRunCreateStatus(runStatus); } } } } public List<TaskRunStatus> showTaskRunStatus(String dbName) { List<TaskRunStatus> taskRunList = Lists.newArrayList(); if (dbName == null) { for (Queue<TaskRun> pTaskRunQueue : taskRunManager.getPendingTaskRunMap().values()) { taskRunList.addAll(pTaskRunQueue.stream().map(TaskRun::getStatus).collect(Collectors.toList())); } taskRunList.addAll(taskRunManager.getRunningTaskRunMap().values().stream().map(TaskRun::getStatus) .collect(Collectors.toList())); taskRunList.addAll(taskRunManager.getTaskRunHistory().getAllHistory()); } else { for (Queue<TaskRun> pTaskRunQueue : taskRunManager.getPendingTaskRunMap().values()) { taskRunList.addAll(pTaskRunQueue.stream().map(TaskRun::getStatus) .filter(u -> u.getDbName().equals(dbName)).collect(Collectors.toList())); } taskRunList.addAll(taskRunManager.getRunningTaskRunMap().values().stream().map(TaskRun::getStatus) .filter(u -> u.getDbName().equals(dbName)).collect(Collectors.toList())); taskRunList.addAll(taskRunManager.getTaskRunHistory().getAllHistory().stream() .filter(u -> u.getDbName().equals(dbName)).collect(Collectors.toList())); } return taskRunList; } public void replayTaskRunCreateStatus(TaskRunStatus status) { switch (status.getState()) { case PENDING: String taskName = status.getTaskName(); Task task = nameToTaskMap.get(taskName); if (task == null) { return; } TaskRun taskRun = TaskRunBuilder.newBuilder(task).build(); taskRun.initStatus(status.getQueryId(), status.getCreateTime()); Queue<TaskRun> taskRuns = taskRunManager.getPendingTaskRunMap().computeIfAbsent(taskRun.getTaskId(), u -> Queues.newConcurrentLinkedQueue()); taskRuns.offer(taskRun); break; case RUNNING: status.setState(Constants.TaskRunState.FAILED); taskRunManager.getTaskRunHistory().addHistory(status); break; case FAILED: case SUCCESS: taskRunManager.getTaskRunHistory().addHistory(status); break; } } public void replayTaskRunStatusChange(TaskRunStatusChange statusChange) { Constants.TaskRunState fromStatus = statusChange.getFromStatus(); Constants.TaskRunState toStatus = statusChange.getToStatus(); Long taskId = statusChange.getTaskId(); if (fromStatus == Constants.TaskRunState.PENDING) { if (toStatus == Constants.TaskRunState.RUNNING) { Queue<TaskRun> taskRunQueue = taskRunManager.getPendingTaskRunMap().get(taskId); if (taskRunQueue != null && taskRunQueue.size() != 0) { TaskRun pendingTaskRun = taskRunQueue.poll(); pendingTaskRun.getStatus().setState(Constants.TaskRunState.RUNNING); taskRunManager.getRunningTaskRunMap().put(taskId, pendingTaskRun); if (taskRunQueue.size() == 0) { taskRunManager.getPendingTaskRunMap().remove(taskId); } } } } else if (fromStatus == Constants.TaskRunState.RUNNING) { if (toStatus == Constants.TaskRunState.SUCCESS || toStatus == Constants.TaskRunState.FAILED) { TaskRun taskRun = taskRunManager.getRunningTaskRunMap().remove(taskId); if (taskRun == null) { return; } if (toStatus == Constants.TaskRunState.FAILED) { taskRun.getStatus().setErrorMessage(statusChange.getErrorMessage()); taskRun.getStatus().setErrorCode(statusChange.getErrorCode()); } taskRun.getStatus().setState(toStatus); taskRun.getStatus().setFinishTime(statusChange.getFinishTime()); taskRunManager.getTaskRunHistory().addHistory(taskRun.getStatus()); } } } public void replayDropTaskRuns(List<String> queryIdList) { Map<String, String> index = Maps.newHashMapWithExpectedSize(queryIdList.size()); for (String queryId : queryIdList) { index.put(queryId, null); } taskRunManager.getTaskRunHistory().getAllHistory().removeIf(runStatus -> index.containsKey(runStatus.getQueryId())); } public void removeOldTaskInfo() { long currentTimeMs = System.currentTimeMillis(); List<Task> currentTask = showTasks(null); List<Long> taskIdToDelete = Lists.newArrayList(); currentTask.sort((o1, o2) -> Long.signum(o1.getCreateTime() - o2.getCreateTime())); int labelKeepMaxSecond = Config.label_keep_max_second; int numTaskToRemove = currentTask.size() - Config.label_keep_max_num; for (Task task : currentTask) { if ((currentTimeMs - task.getCreateTime()) / 1000 > labelKeepMaxSecond || numTaskToRemove > 0) { taskIdToDelete.add(task.getId()); --numTaskToRemove; } } dropTasks(taskIdToDelete, true); } public void removeOldTaskRunHistory() { long currentTimeMs = System.currentTimeMillis(); Deque<TaskRunStatus> taskRunHistory = taskRunManager.getTaskRunHistory().getAllHistory(); List<String> historyToDelete = Lists.newArrayList(); if (!tryLock()) { return; } try { int labelKeepMaxSecond = Config.label_keep_max_second; int numHistoryToRemove = taskRunHistory.size() - Config.label_keep_max_num; for (TaskRunStatus runStatus : taskRunHistory) { long lastUpdateTime = runStatus.getCreateTime(); if (runStatus.getFinishTime() > lastUpdateTime) { lastUpdateTime = runStatus.getFinishTime(); } if ((currentTimeMs - lastUpdateTime) / 1000 > labelKeepMaxSecond || numHistoryToRemove > 0) { historyToDelete.add(runStatus.getQueryId()); taskRunHistory.remove(); --numHistoryToRemove; } } } finally { unlock(); } LOG.info("remove run history:{}", historyToDelete); } private static class SerializeData { @SerializedName("tasks") public List<Task> tasks; @SerializedName("runStatus") public List<TaskRunStatus> runStatus; } }
class TaskManager { private static final Logger LOG = LogManager.getLogger(TaskManager.class); public static final long TASK_EXISTS = -1L; public static final long DUPLICATE_CREATE_TASK = -2L; public static final long GET_TASK_LOCK_FAILED = -3L; private final Map<Long, Task> manualTaskMap; private final Map<String, Task> nameToTaskMap; private final TaskRunManager taskRunManager; private final ScheduledExecutorService dispatchScheduler = Executors.newScheduledThreadPool(1); private final QueryableReentrantLock lock; private AtomicBoolean isStart = new AtomicBoolean(false); public TaskManager() { manualTaskMap = Maps.newConcurrentMap(); nameToTaskMap = Maps.newConcurrentMap(); taskRunManager = new TaskRunManager(); lock = new QueryableReentrantLock(true); } public void start() { if (isStart.compareAndSet(false, true)) { clearUnfinishedTaskRun(); dispatchScheduler.scheduleAtFixedRate(() -> { if (!tryLock()) { return; } try { taskRunManager.checkRunningTaskRun(); taskRunManager.scheduledPendingTaskRun(); } catch (Exception ex) { LOG.warn("failed to dispatch job.", ex); } finally { unlock(); } }, 0, 1, TimeUnit.SECONDS); } } private void clearUnfinishedTaskRun() { if (!tryLock()) { return; } try { Iterator<Long> pendingIter = taskRunManager.getPendingTaskRunMap().keySet().iterator(); while (pendingIter.hasNext()) { Queue<TaskRun> taskRuns = taskRunManager.getPendingTaskRunMap().get(pendingIter.next()); for (TaskRun taskRun : taskRuns) { taskRun.getStatus().setErrorMessage("Fe restart abort the task"); taskRun.getStatus().setErrorCode(-1); taskRun.getStatus().setState(Constants.TaskRunState.FAILED); taskRunManager.getTaskRunHistory().addHistory(taskRun.getStatus()); } pendingIter.remove(); } Iterator<Long> runningIter = taskRunManager.getRunningTaskRunMap().keySet().iterator(); while (runningIter.hasNext()) { TaskRun taskRun = taskRunManager.getRunningTaskRunMap().get(runningIter.next()); taskRun.getStatus().setErrorMessage("Fe restart abort the task"); taskRun.getStatus().setErrorCode(-1); taskRun.getStatus().setState(Constants.TaskRunState.FAILED); runningIter.remove(); taskRunManager.getTaskRunHistory().addHistory(taskRun.getStatus()); } } finally { unlock(); } } public long createTask(Task task, boolean isReplay) { if (!tryLock()) { return GET_TASK_LOCK_FAILED; } try { if (nameToTaskMap.containsKey(task.getName())) { return TASK_EXISTS; } nameToTaskMap.put(task.getName(), task); if (manualTaskMap.containsKey(task.getId())) { return DUPLICATE_CREATE_TASK; } manualTaskMap.put(task.getId(), task); if (!isReplay) { GlobalStateMgr.getCurrentState().getEditLog().logCreateTask(task); } return task.getId(); } finally { unlock(); } } public SubmitResult executeTask(String taskName) { Task task = nameToTaskMap.get(taskName); if (task == null) { return new SubmitResult(null, SubmitResult.SubmitStatus.FAILED); } return taskRunManager.submitTaskRun(TaskRunBuilder.newBuilder(task).build()); } public void dropTasks(List<Long> taskIdList, boolean isReplay) { if (!tryLock()) { return; } try { for (long taskId : taskIdList) { Task task = manualTaskMap.get(taskId); if (task == null) { LOG.warn("drop taskId {} failed because task is null", taskId); continue; } nameToTaskMap.remove(task.getName()); manualTaskMap.remove(task.getId()); } if (!isReplay) { GlobalStateMgr.getCurrentState().getEditLog().logDropTasks(taskIdList); } } finally { unlock(); } LOG.info("drop tasks:{}", taskIdList); } public List<Task> showTasks(String dbName) { List<Task> taskList = Lists.newArrayList(); if (dbName == null) { taskList.addAll(manualTaskMap.values()); } else { taskList.addAll(manualTaskMap.values().stream() .filter(u -> u.getDbName().equals(dbName)).collect(Collectors.toList())); } return taskList; } private boolean tryLock() { try { if (!lock.tryLock(1, TimeUnit.SECONDS)) { Thread owner = lock.getOwner(); if (owner != null) { LOG.warn("task lock is held by: {}", Util.dumpThread(owner, 50)); } else { LOG.warn("task lock owner is null"); } return false; } return true; } catch (InterruptedException e) { LOG.warn("got exception while getting task lock", e); } return lock.isHeldByCurrentThread(); } private void unlock() { this.lock.unlock(); } public void replayCreateTask(Task task) { if ((System.currentTimeMillis() - task.getCreateTime()) / 1000 > Config.label_keep_max_second) { return; } createTask(task, true); } public void replayDropTasks(List<Long> taskIdList) { dropTasks(taskIdList, true); } public TaskRunManager getTaskRunManager() { return taskRunManager; } public ShowResultSet handleSubmitTaskStmt(SubmitTaskStmt submitTaskStmt) throws DdlException { Task task = TaskBuilder.buildTask(submitTaskStmt, ConnectContext.get()); long createResult = createTask(task, false); String taskName = task.getName(); if (createResult < 0) { if (createResult == TASK_EXISTS) { throw new DdlException("Task " + taskName + " already exist."); } throw new DdlException("Failed to create Task: " + taskName + ", ErrorCode: " + createResult); } SubmitResult submitResult = executeTask(taskName); if (submitResult.getStatus() != SubmitResult.SubmitStatus.SUBMITTED) { dropTasks(ImmutableList.of(task.getId()), false); } ShowResultSetMetaData.Builder builder = ShowResultSetMetaData.builder(); builder.addColumn(new Column("TaskName", ScalarType.createVarchar(40))); builder.addColumn(new Column("Status", ScalarType.createVarchar(10))); List<String> item = ImmutableList.of(taskName, submitResult.getStatus().toString()); List<List<String>> result = ImmutableList.of(item); return new ShowResultSet(builder.build(), result); } public long loadTasks(DataInputStream dis, long checksum) throws IOException { int taskCount = 0; try { String s = Text.readString(dis); SerializeData data = GsonUtils.GSON.fromJson(s, SerializeData.class); if (data != null) { if (data.tasks != null) { for (Task task : data.tasks) { replayCreateTask(task); } taskCount = data.tasks.size(); } if (data.runStatus != null) { for (TaskRunStatus runStatus : data.runStatus) { replayCreateTaskRun(runStatus); } } } checksum ^= taskCount; LOG.info("finished replaying TaskManager from image"); } catch (EOFException e) { LOG.info("no TaskManager to replay."); } return checksum; } public List<TaskRunStatus> showTaskRunStatus(String dbName) { List<TaskRunStatus> taskRunList = Lists.newArrayList(); if (dbName == null) { for (Queue<TaskRun> pTaskRunQueue : taskRunManager.getPendingTaskRunMap().values()) { taskRunList.addAll(pTaskRunQueue.stream().map(TaskRun::getStatus).collect(Collectors.toList())); } taskRunList.addAll(taskRunManager.getRunningTaskRunMap().values().stream().map(TaskRun::getStatus) .collect(Collectors.toList())); taskRunList.addAll(taskRunManager.getTaskRunHistory().getAllHistory()); } else { for (Queue<TaskRun> pTaskRunQueue : taskRunManager.getPendingTaskRunMap().values()) { taskRunList.addAll(pTaskRunQueue.stream().map(TaskRun::getStatus) .filter(u -> u.getDbName().equals(dbName)).collect(Collectors.toList())); } taskRunList.addAll(taskRunManager.getRunningTaskRunMap().values().stream().map(TaskRun::getStatus) .filter(u -> u.getDbName().equals(dbName)).collect(Collectors.toList())); taskRunList.addAll(taskRunManager.getTaskRunHistory().getAllHistory().stream() .filter(u -> u.getDbName().equals(dbName)).collect(Collectors.toList())); } return taskRunList; } public void replayCreateTaskRun(TaskRunStatus status) { if (status.getState() == Constants.TaskRunState.SUCCESS || status.getState() == Constants.TaskRunState.FAILED) { long lastUpdateTime = status.getCreateTime(); if (status.getFinishTime() > lastUpdateTime) { lastUpdateTime = status.getFinishTime(); } if ((System.currentTimeMillis() - lastUpdateTime) / 1000 > Config.label_keep_max_second) { return; } } switch (status.getState()) { case PENDING: String taskName = status.getTaskName(); Task task = nameToTaskMap.get(taskName); if (task == null) { LOG.warn("fail to obtain task name {} because task is null", taskName); return; } TaskRun taskRun = TaskRunBuilder.newBuilder(task).build(); taskRun.initStatus(status.getQueryId(), status.getCreateTime()); Queue<TaskRun> taskRuns = taskRunManager.getPendingTaskRunMap().computeIfAbsent(taskRun.getTaskId(), u -> Queues.newConcurrentLinkedQueue()); taskRuns.offer(taskRun); break; case RUNNING: status.setState(Constants.TaskRunState.FAILED); taskRunManager.getTaskRunHistory().addHistory(status); break; case FAILED: case SUCCESS: taskRunManager.getTaskRunHistory().addHistory(status); break; } } public void replayUpdateTaskRun(TaskRunStatusChange statusChange) { Constants.TaskRunState toStatus = statusChange.getToStatus(); Long taskId = statusChange.getTaskId(); Queue<TaskRun> taskRunQueue = taskRunManager.getPendingTaskRunMap().get(taskId); if (taskRunQueue != null) { if (taskRunQueue.size() == 0) { taskRunManager.getPendingTaskRunMap().remove(taskId); return; } TaskRun pendingTaskRun = taskRunQueue.poll(); TaskRunStatus status = pendingTaskRun.getStatus(); if (status.getQueryId().equals(statusChange.getQueryId())) { if (toStatus == Constants.TaskRunState.FAILED) { status.setErrorMessage(statusChange.getErrorMessage()); status.setErrorCode(statusChange.getErrorCode()); } status.setState(toStatus); status.setFinishTime(statusChange.getFinishTime()); taskRunManager.getTaskRunHistory().addHistory(status); } } } public void replayDropTaskRuns(List<String> queryIdList) { Map<String, String> index = Maps.newHashMapWithExpectedSize(queryIdList.size()); for (String queryId : queryIdList) { index.put(queryId, null); } taskRunManager.getTaskRunHistory().getAllHistory().removeIf(runStatus -> index.containsKey(runStatus.getQueryId())); } public void removeOldTaskInfo() { long currentTimeMs = System.currentTimeMillis(); List<Task> currentTask = showTasks(null); List<Long> taskIdToDelete = Lists.newArrayList(); currentTask.sort((o1, o2) -> Long.signum(o1.getCreateTime() - o2.getCreateTime())); int labelKeepMaxSecond = Config.label_keep_max_second; int numTaskToRemove = currentTask.size() - Config.label_keep_max_num; for (Task task : currentTask) { if ((currentTimeMs - task.getCreateTime()) / 1000 > labelKeepMaxSecond || numTaskToRemove > 0) { taskIdToDelete.add(task.getId()); --numTaskToRemove; } } dropTasks(taskIdToDelete, true); } public void removeOldTaskRunHistory() { long currentTimeMs = System.currentTimeMillis(); Deque<TaskRunStatus> taskRunHistory = taskRunManager.getTaskRunHistory().getAllHistory(); List<String> historyToDelete = Lists.newArrayList(); if (!tryLock()) { return; } try { int labelKeepMaxSecond = Config.label_keep_max_second; int numHistoryToRemove = taskRunHistory.size() - Config.label_keep_max_num; for (TaskRunStatus runStatus : taskRunHistory) { long lastUpdateTime = runStatus.getCreateTime(); if (runStatus.getFinishTime() > lastUpdateTime) { lastUpdateTime = runStatus.getFinishTime(); } if ((currentTimeMs - lastUpdateTime) / 1000 > labelKeepMaxSecond || numHistoryToRemove > 0) { historyToDelete.add(runStatus.getQueryId()); taskRunHistory.remove(); --numHistoryToRemove; } } } finally { unlock(); } LOG.info("remove run history:{}", historyToDelete); } private static class SerializeData { @SerializedName("tasks") public List<Task> tasks; @SerializedName("runStatus") public List<TaskRunStatus> runStatus; } }
consider checking if the validity of the nixes are the same (we should also update this in C++)
public boolean equalTo(Inspector that) { boolean equal = type() == that.type(); if (equal) { switch (type()) { case NIX: break; case BOOL: equal = asBool() == that.asBool(); break; case LONG: equal = asLong() == that.asLong(); break; case DOUBLE: equal = Double.compare(asDouble(), that.asDouble()) == 0; break; case STRING: equal = asString().equals(that.asString()); break; case DATA: equal = Arrays.equals(asData(), that.asData()); break; case ARRAY: { var traverser = new EqualArray(that); traverse(traverser); equal = traverser.isEqual() && (entries() == that.entries()); } break; case OBJECT: { var traverser = new EqualObject(that); traverse(traverser); equal = traverser.isEqual() && (fields() == that.fields()); } break; default: assert(false); break; } } return equal; }
break;
public boolean equalTo(Inspector that) { boolean equal = type() == that.type(); if (equal) { switch (type()) { case NIX: equal = valid() == that.valid(); break; case BOOL: equal = asBool() == that.asBool(); break; case LONG: equal = asLong() == that.asLong(); break; case DOUBLE: equal = Double.compare(asDouble(), that.asDouble()) == 0; break; case STRING: equal = asString().equals(that.asString()); break; case DATA: equal = Arrays.equals(asData(), that.asData()); break; case ARRAY: { var traverser = new EqualArray(that); traverse(traverser); equal = traverser.isEqual() && (entries() == that.entries()); } break; case OBJECT: { var traverser = new EqualObject(that); traverse(traverser); equal = traverser.isEqual() && (fields() == that.fields()); } break; default: assert(false); break; } } return equal; }
class EqualObject extends Equal implements ObjectTraverser { public EqualObject(Inspector rhsInspector) { super(rhsInspector); } @Override public void field(String name, Inspector inspector) { if (equal) { equal = inspector.equalTo(rhsInspector.field(name)); } } }
class EqualObject extends Equal implements ObjectTraverser { public EqualObject(Inspector rhsInspector) { super(rhsInspector); } @Override public void field(String name, Inspector inspector) { if (equal) { equal = inspector.equalTo(rhsInspector.field(name)); } } }
Done
public void entry(int idx, Inspector inspector) { if (inspector == guard) { return; } var inserter = new ArrayInserter(); inserter.adjust(cursor); injectValue(inserter, inspector, guard); }
inserter.adjust(cursor);
public void entry(int idx, Inspector inspector) { if (inspector == guard) { return; } injectValue(new ArrayInserter(cursor), inspector, guard); }
class NestedInjector implements ArrayTraverser, ObjectTraverser { private final Cursor cursor; private final Inspector guard; public NestedInjector(Cursor cursor, Inspector guard) { this.cursor = cursor; this.guard = guard; } @Override @Override public void field(String name, Inspector inspector) { if (inspector == guard) { return; } var inserter = new ObjectInserter(); inserter.adjust(cursor, name); injectValue(inserter, inspector, guard); } }
class NestedInjector implements ArrayTraverser, ObjectTraverser { private final Cursor cursor; private final Inspector guard; public NestedInjector(Cursor cursor, Inspector guard) { this.cursor = cursor; this.guard = guard; } @Override @Override public void field(String name, Inspector inspector) { if (inspector == guard) { return; } injectValue(new ObjectInserter(cursor, name), inspector, guard); } }
Done
public void field(String name, Inspector inspector) { if (inspector == guard) { return; } var inserter = new ObjectInserter(); inserter.adjust(cursor, name); injectValue(inserter, inspector, guard); }
inserter.adjust(cursor, name);
public void field(String name, Inspector inspector) { if (inspector == guard) { return; } injectValue(new ObjectInserter(cursor, name), inspector, guard); }
class NestedInjector implements ArrayTraverser, ObjectTraverser { private final Cursor cursor; private final Inspector guard; public NestedInjector(Cursor cursor, Inspector guard) { this.cursor = cursor; this.guard = guard; } @Override public void entry(int idx, Inspector inspector) { if (inspector == guard) { return; } var inserter = new ArrayInserter(); inserter.adjust(cursor); injectValue(inserter, inspector, guard); } @Override }
class NestedInjector implements ArrayTraverser, ObjectTraverser { private final Cursor cursor; private final Inspector guard; public NestedInjector(Cursor cursor, Inspector guard) { this.cursor = cursor; this.guard = guard; } @Override public void entry(int idx, Inspector inspector) { if (inspector == guard) { return; } injectValue(new ArrayInserter(cursor), inspector, guard); } @Override }
Done
public boolean equalTo(Inspector that) { boolean equal = type() == that.type(); if (equal) { switch (type()) { case NIX: break; case BOOL: equal = asBool() == that.asBool(); break; case LONG: equal = asLong() == that.asLong(); break; case DOUBLE: equal = Double.compare(asDouble(), that.asDouble()) == 0; break; case STRING: equal = asString().equals(that.asString()); break; case DATA: equal = Arrays.equals(asData(), that.asData()); break; case ARRAY: { var traverser = new EqualArray(that); traverse(traverser); equal = traverser.isEqual() && (entries() == that.entries()); } break; case OBJECT: { var traverser = new EqualObject(that); traverse(traverser); equal = traverser.isEqual() && (fields() == that.fields()); } break; default: assert(false); break; } } return equal; }
break;
public boolean equalTo(Inspector that) { boolean equal = type() == that.type(); if (equal) { switch (type()) { case NIX: equal = valid() == that.valid(); break; case BOOL: equal = asBool() == that.asBool(); break; case LONG: equal = asLong() == that.asLong(); break; case DOUBLE: equal = Double.compare(asDouble(), that.asDouble()) == 0; break; case STRING: equal = asString().equals(that.asString()); break; case DATA: equal = Arrays.equals(asData(), that.asData()); break; case ARRAY: { var traverser = new EqualArray(that); traverse(traverser); equal = traverser.isEqual() && (entries() == that.entries()); } break; case OBJECT: { var traverser = new EqualObject(that); traverse(traverser); equal = traverser.isEqual() && (fields() == that.fields()); } break; default: assert(false); break; } } return equal; }
class EqualObject extends Equal implements ObjectTraverser { public EqualObject(Inspector rhsInspector) { super(rhsInspector); } @Override public void field(String name, Inspector inspector) { if (equal) { equal = inspector.equalTo(rhsInspector.field(name)); } } }
class EqualObject extends Equal implements ObjectTraverser { public EqualObject(Inspector rhsInspector) { super(rhsInspector); } @Override public void field(String name, Inspector inspector) { if (equal) { equal = inspector.equalTo(rhsInspector.field(name)); } } }
having this means we can drop the injectArray method
private void injectValue(Inserter inserter, Inspector inspector, Inspector guard) { inspector.accept(new Visitor() { @Override public void visitInvalid() { } @Override public void visitNix() { inserter.insertNIX(); } @Override public void visitBool(boolean bit) { inserter.insertBOOL(bit); } @Override public void visitLong(long l) { inserter.insertLONG(l); } @Override public void visitDouble(double d) { inserter.insertDOUBLE(d); } @Override public void visitString(String str) { inserter.insertSTRING(str); } @Override public void visitString(byte[] utf8) { inserter.insertSTRING(utf8); } @Override public void visitData(byte[] data) { inserter.insertDATA(data); } @Override public void visitArray(Inspector arr) { Cursor cursor = inserter.insertARRAY(); ArrayTraverser arrayTraverser = new NestedInjector(cursor, guard != null ? guard : cursor); arr.traverse(arrayTraverser); } @Override public void visitObject(Inspector obj) { Cursor cursor = inserter.insertOBJECT(); ObjectTraverser objectTraverser = new NestedInjector(cursor, guard != null ? guard : cursor); obj.traverse(objectTraverser); } }); }
public void visitArray(Inspector arr) {
private void injectValue(Inserter inserter, Inspector inspector, Inspector guard) { inspector.accept(new Visitor() { @Override public void visitInvalid() { } @Override public void visitNix() { inserter.insertNIX(); } @Override public void visitBool(boolean bit) { inserter.insertBOOL(bit); } @Override public void visitLong(long l) { inserter.insertLONG(l); } @Override public void visitDouble(double d) { inserter.insertDOUBLE(d); } @Override public void visitString(String str) { inserter.insertSTRING(str); } @Override public void visitString(byte[] utf8) { inserter.insertSTRING(utf8); } @Override public void visitData(byte[] data) { inserter.insertDATA(data); } @Override public void visitArray(Inspector arr) { Cursor cursor = inserter.insertARRAY(); ArrayTraverser arrayTraverser = new NestedInjector(cursor, guard != null ? guard : cursor); arr.traverse(arrayTraverser); } @Override public void visitObject(Inspector obj) { Cursor cursor = inserter.insertOBJECT(); ObjectTraverser objectTraverser = new NestedInjector(cursor, guard != null ? guard : cursor); obj.traverse(objectTraverser); } }); }
class Injector { /** * Inject a slime sub-structure described by an Inspector into a slime * structure where the insertion point is described by an * Inserter. This will copy all the values represented by the * Inspector into the position described by the Inserter. Note that * this can be used to either copy data from one Slime structure to * another, or to copy data internally within a single slime * structure. If the Inspector contains the insertion point it will * only be expanded once to avoid infinite recursion. * * @param inspector what to inject * @param inserter where to inject **/ public void inject(Inspector inspector, Inserter inserter) { if (inspector.valid()) { injectValue(inserter, inspector, null); } } private void injectArray(Inserter inserter, Inspector inspector, Inspector guard) { Cursor cursor = inserter.insertARRAY(); ArrayTraverser arrayTraverser = new NestedInjector(cursor, guard != null ? guard : cursor); inspector.traverse(arrayTraverser); } private void injectObject(Inserter inserter, Inspector inspector, Inspector guard) { Cursor cursor = inserter.insertOBJECT(); ObjectTraverser objectTraverser = new NestedInjector(cursor, guard != null ? guard : cursor); inspector.traverse(objectTraverser); } private class NestedInjector implements ArrayTraverser, ObjectTraverser { private final Cursor cursor; private final Inspector guard; public NestedInjector(Cursor cursor, Inspector guard) { this.cursor = cursor; this.guard = guard; } @Override public void entry(int idx, Inspector inspector) { if (inspector == guard) { return; } injectValue(new ArrayInserter(cursor), inspector, guard); } @Override public void field(String name, Inspector inspector) { if (inspector == guard) { return; } injectValue(new ObjectInserter(cursor, name), inspector, guard); } } }
class Injector { /** * Inject a slime sub-structure described by an Inspector into a slime * structure where the insertion point is described by an * Inserter. This will copy all the values represented by the * Inspector into the position described by the Inserter. Note that * this can be used to either copy data from one Slime structure to * another, or to copy data internally within a single slime * structure. If the Inspector contains the insertion point it will * only be expanded once to avoid infinite recursion. * * @param inspector what to inject * @param inserter where to inject **/ public void inject(Inspector inspector, Inserter inserter) { if (inspector.valid()) { injectValue(inserter, inspector, null); } } private class NestedInjector implements ArrayTraverser, ObjectTraverser { private final Cursor cursor; private final Inspector guard; public NestedInjector(Cursor cursor, Inspector guard) { this.cursor = cursor; this.guard = guard; } @Override public void entry(int idx, Inspector inspector) { if (inspector == guard) { return; } injectValue(new ArrayInserter(cursor), inspector, guard); } @Override public void field(String name, Inspector inspector) { if (inspector == guard) { return; } injectValue(new ObjectInserter(cursor, name), inspector, guard); } } }
having this means we can drop the injectObject method
private void injectValue(Inserter inserter, Inspector inspector, Inspector guard) { inspector.accept(new Visitor() { @Override public void visitInvalid() { } @Override public void visitNix() { inserter.insertNIX(); } @Override public void visitBool(boolean bit) { inserter.insertBOOL(bit); } @Override public void visitLong(long l) { inserter.insertLONG(l); } @Override public void visitDouble(double d) { inserter.insertDOUBLE(d); } @Override public void visitString(String str) { inserter.insertSTRING(str); } @Override public void visitString(byte[] utf8) { inserter.insertSTRING(utf8); } @Override public void visitData(byte[] data) { inserter.insertDATA(data); } @Override public void visitArray(Inspector arr) { Cursor cursor = inserter.insertARRAY(); ArrayTraverser arrayTraverser = new NestedInjector(cursor, guard != null ? guard : cursor); arr.traverse(arrayTraverser); } @Override public void visitObject(Inspector obj) { Cursor cursor = inserter.insertOBJECT(); ObjectTraverser objectTraverser = new NestedInjector(cursor, guard != null ? guard : cursor); obj.traverse(objectTraverser); } }); }
public void visitObject(Inspector obj) {
private void injectValue(Inserter inserter, Inspector inspector, Inspector guard) { inspector.accept(new Visitor() { @Override public void visitInvalid() { } @Override public void visitNix() { inserter.insertNIX(); } @Override public void visitBool(boolean bit) { inserter.insertBOOL(bit); } @Override public void visitLong(long l) { inserter.insertLONG(l); } @Override public void visitDouble(double d) { inserter.insertDOUBLE(d); } @Override public void visitString(String str) { inserter.insertSTRING(str); } @Override public void visitString(byte[] utf8) { inserter.insertSTRING(utf8); } @Override public void visitData(byte[] data) { inserter.insertDATA(data); } @Override public void visitArray(Inspector arr) { Cursor cursor = inserter.insertARRAY(); ArrayTraverser arrayTraverser = new NestedInjector(cursor, guard != null ? guard : cursor); arr.traverse(arrayTraverser); } @Override public void visitObject(Inspector obj) { Cursor cursor = inserter.insertOBJECT(); ObjectTraverser objectTraverser = new NestedInjector(cursor, guard != null ? guard : cursor); obj.traverse(objectTraverser); } }); }
class Injector { /** * Inject a slime sub-structure described by an Inspector into a slime * structure where the insertion point is described by an * Inserter. This will copy all the values represented by the * Inspector into the position described by the Inserter. Note that * this can be used to either copy data from one Slime structure to * another, or to copy data internally within a single slime * structure. If the Inspector contains the insertion point it will * only be expanded once to avoid infinite recursion. * * @param inspector what to inject * @param inserter where to inject **/ public void inject(Inspector inspector, Inserter inserter) { if (inspector.valid()) { injectValue(inserter, inspector, null); } } private void injectArray(Inserter inserter, Inspector inspector, Inspector guard) { Cursor cursor = inserter.insertARRAY(); ArrayTraverser arrayTraverser = new NestedInjector(cursor, guard != null ? guard : cursor); inspector.traverse(arrayTraverser); } private void injectObject(Inserter inserter, Inspector inspector, Inspector guard) { Cursor cursor = inserter.insertOBJECT(); ObjectTraverser objectTraverser = new NestedInjector(cursor, guard != null ? guard : cursor); inspector.traverse(objectTraverser); } private class NestedInjector implements ArrayTraverser, ObjectTraverser { private final Cursor cursor; private final Inspector guard; public NestedInjector(Cursor cursor, Inspector guard) { this.cursor = cursor; this.guard = guard; } @Override public void entry(int idx, Inspector inspector) { if (inspector == guard) { return; } injectValue(new ArrayInserter(cursor), inspector, guard); } @Override public void field(String name, Inspector inspector) { if (inspector == guard) { return; } injectValue(new ObjectInserter(cursor, name), inspector, guard); } } }
class Injector { /** * Inject a slime sub-structure described by an Inspector into a slime * structure where the insertion point is described by an * Inserter. This will copy all the values represented by the * Inspector into the position described by the Inserter. Note that * this can be used to either copy data from one Slime structure to * another, or to copy data internally within a single slime * structure. If the Inspector contains the insertion point it will * only be expanded once to avoid infinite recursion. * * @param inspector what to inject * @param inserter where to inject **/ public void inject(Inspector inspector, Inserter inserter) { if (inspector.valid()) { injectValue(inserter, inspector, null); } } private class NestedInjector implements ArrayTraverser, ObjectTraverser { private final Cursor cursor; private final Inspector guard; public NestedInjector(Cursor cursor, Inspector guard) { this.cursor = cursor; this.guard = guard; } @Override public void entry(int idx, Inspector inspector) { if (inspector == guard) { return; } injectValue(new ArrayInserter(cursor), inspector, guard); } @Override public void field(String name, Inspector inspector) { if (inspector == guard) { return; } injectValue(new ObjectInserter(cursor, name), inspector, guard); } } }
Done
private void injectValue(Inserter inserter, Inspector inspector, Inspector guard) { inspector.accept(new Visitor() { @Override public void visitInvalid() { } @Override public void visitNix() { inserter.insertNIX(); } @Override public void visitBool(boolean bit) { inserter.insertBOOL(bit); } @Override public void visitLong(long l) { inserter.insertLONG(l); } @Override public void visitDouble(double d) { inserter.insertDOUBLE(d); } @Override public void visitString(String str) { inserter.insertSTRING(str); } @Override public void visitString(byte[] utf8) { inserter.insertSTRING(utf8); } @Override public void visitData(byte[] data) { inserter.insertDATA(data); } @Override public void visitArray(Inspector arr) { Cursor cursor = inserter.insertARRAY(); ArrayTraverser arrayTraverser = new NestedInjector(cursor, guard != null ? guard : cursor); arr.traverse(arrayTraverser); } @Override public void visitObject(Inspector obj) { Cursor cursor = inserter.insertOBJECT(); ObjectTraverser objectTraverser = new NestedInjector(cursor, guard != null ? guard : cursor); obj.traverse(objectTraverser); } }); }
public void visitArray(Inspector arr) {
private void injectValue(Inserter inserter, Inspector inspector, Inspector guard) { inspector.accept(new Visitor() { @Override public void visitInvalid() { } @Override public void visitNix() { inserter.insertNIX(); } @Override public void visitBool(boolean bit) { inserter.insertBOOL(bit); } @Override public void visitLong(long l) { inserter.insertLONG(l); } @Override public void visitDouble(double d) { inserter.insertDOUBLE(d); } @Override public void visitString(String str) { inserter.insertSTRING(str); } @Override public void visitString(byte[] utf8) { inserter.insertSTRING(utf8); } @Override public void visitData(byte[] data) { inserter.insertDATA(data); } @Override public void visitArray(Inspector arr) { Cursor cursor = inserter.insertARRAY(); ArrayTraverser arrayTraverser = new NestedInjector(cursor, guard != null ? guard : cursor); arr.traverse(arrayTraverser); } @Override public void visitObject(Inspector obj) { Cursor cursor = inserter.insertOBJECT(); ObjectTraverser objectTraverser = new NestedInjector(cursor, guard != null ? guard : cursor); obj.traverse(objectTraverser); } }); }
class Injector { /** * Inject a slime sub-structure described by an Inspector into a slime * structure where the insertion point is described by an * Inserter. This will copy all the values represented by the * Inspector into the position described by the Inserter. Note that * this can be used to either copy data from one Slime structure to * another, or to copy data internally within a single slime * structure. If the Inspector contains the insertion point it will * only be expanded once to avoid infinite recursion. * * @param inspector what to inject * @param inserter where to inject **/ public void inject(Inspector inspector, Inserter inserter) { if (inspector.valid()) { injectValue(inserter, inspector, null); } } private void injectArray(Inserter inserter, Inspector inspector, Inspector guard) { Cursor cursor = inserter.insertARRAY(); ArrayTraverser arrayTraverser = new NestedInjector(cursor, guard != null ? guard : cursor); inspector.traverse(arrayTraverser); } private void injectObject(Inserter inserter, Inspector inspector, Inspector guard) { Cursor cursor = inserter.insertOBJECT(); ObjectTraverser objectTraverser = new NestedInjector(cursor, guard != null ? guard : cursor); inspector.traverse(objectTraverser); } private class NestedInjector implements ArrayTraverser, ObjectTraverser { private final Cursor cursor; private final Inspector guard; public NestedInjector(Cursor cursor, Inspector guard) { this.cursor = cursor; this.guard = guard; } @Override public void entry(int idx, Inspector inspector) { if (inspector == guard) { return; } injectValue(new ArrayInserter(cursor), inspector, guard); } @Override public void field(String name, Inspector inspector) { if (inspector == guard) { return; } injectValue(new ObjectInserter(cursor, name), inspector, guard); } } }
class Injector { /** * Inject a slime sub-structure described by an Inspector into a slime * structure where the insertion point is described by an * Inserter. This will copy all the values represented by the * Inspector into the position described by the Inserter. Note that * this can be used to either copy data from one Slime structure to * another, or to copy data internally within a single slime * structure. If the Inspector contains the insertion point it will * only be expanded once to avoid infinite recursion. * * @param inspector what to inject * @param inserter where to inject **/ public void inject(Inspector inspector, Inserter inserter) { if (inspector.valid()) { injectValue(inserter, inspector, null); } } private class NestedInjector implements ArrayTraverser, ObjectTraverser { private final Cursor cursor; private final Inspector guard; public NestedInjector(Cursor cursor, Inspector guard) { this.cursor = cursor; this.guard = guard; } @Override public void entry(int idx, Inspector inspector) { if (inspector == guard) { return; } injectValue(new ArrayInserter(cursor), inspector, guard); } @Override public void field(String name, Inspector inspector) { if (inspector == guard) { return; } injectValue(new ObjectInserter(cursor, name), inspector, guard); } } }
Done
private void injectValue(Inserter inserter, Inspector inspector, Inspector guard) { inspector.accept(new Visitor() { @Override public void visitInvalid() { } @Override public void visitNix() { inserter.insertNIX(); } @Override public void visitBool(boolean bit) { inserter.insertBOOL(bit); } @Override public void visitLong(long l) { inserter.insertLONG(l); } @Override public void visitDouble(double d) { inserter.insertDOUBLE(d); } @Override public void visitString(String str) { inserter.insertSTRING(str); } @Override public void visitString(byte[] utf8) { inserter.insertSTRING(utf8); } @Override public void visitData(byte[] data) { inserter.insertDATA(data); } @Override public void visitArray(Inspector arr) { Cursor cursor = inserter.insertARRAY(); ArrayTraverser arrayTraverser = new NestedInjector(cursor, guard != null ? guard : cursor); arr.traverse(arrayTraverser); } @Override public void visitObject(Inspector obj) { Cursor cursor = inserter.insertOBJECT(); ObjectTraverser objectTraverser = new NestedInjector(cursor, guard != null ? guard : cursor); obj.traverse(objectTraverser); } }); }
public void visitObject(Inspector obj) {
private void injectValue(Inserter inserter, Inspector inspector, Inspector guard) { inspector.accept(new Visitor() { @Override public void visitInvalid() { } @Override public void visitNix() { inserter.insertNIX(); } @Override public void visitBool(boolean bit) { inserter.insertBOOL(bit); } @Override public void visitLong(long l) { inserter.insertLONG(l); } @Override public void visitDouble(double d) { inserter.insertDOUBLE(d); } @Override public void visitString(String str) { inserter.insertSTRING(str); } @Override public void visitString(byte[] utf8) { inserter.insertSTRING(utf8); } @Override public void visitData(byte[] data) { inserter.insertDATA(data); } @Override public void visitArray(Inspector arr) { Cursor cursor = inserter.insertARRAY(); ArrayTraverser arrayTraverser = new NestedInjector(cursor, guard != null ? guard : cursor); arr.traverse(arrayTraverser); } @Override public void visitObject(Inspector obj) { Cursor cursor = inserter.insertOBJECT(); ObjectTraverser objectTraverser = new NestedInjector(cursor, guard != null ? guard : cursor); obj.traverse(objectTraverser); } }); }
class Injector { /** * Inject a slime sub-structure described by an Inspector into a slime * structure where the insertion point is described by an * Inserter. This will copy all the values represented by the * Inspector into the position described by the Inserter. Note that * this can be used to either copy data from one Slime structure to * another, or to copy data internally within a single slime * structure. If the Inspector contains the insertion point it will * only be expanded once to avoid infinite recursion. * * @param inspector what to inject * @param inserter where to inject **/ public void inject(Inspector inspector, Inserter inserter) { if (inspector.valid()) { injectValue(inserter, inspector, null); } } private void injectArray(Inserter inserter, Inspector inspector, Inspector guard) { Cursor cursor = inserter.insertARRAY(); ArrayTraverser arrayTraverser = new NestedInjector(cursor, guard != null ? guard : cursor); inspector.traverse(arrayTraverser); } private void injectObject(Inserter inserter, Inspector inspector, Inspector guard) { Cursor cursor = inserter.insertOBJECT(); ObjectTraverser objectTraverser = new NestedInjector(cursor, guard != null ? guard : cursor); inspector.traverse(objectTraverser); } private class NestedInjector implements ArrayTraverser, ObjectTraverser { private final Cursor cursor; private final Inspector guard; public NestedInjector(Cursor cursor, Inspector guard) { this.cursor = cursor; this.guard = guard; } @Override public void entry(int idx, Inspector inspector) { if (inspector == guard) { return; } injectValue(new ArrayInserter(cursor), inspector, guard); } @Override public void field(String name, Inspector inspector) { if (inspector == guard) { return; } injectValue(new ObjectInserter(cursor, name), inspector, guard); } } }
class Injector { /** * Inject a slime sub-structure described by an Inspector into a slime * structure where the insertion point is described by an * Inserter. This will copy all the values represented by the * Inspector into the position described by the Inserter. Note that * this can be used to either copy data from one Slime structure to * another, or to copy data internally within a single slime * structure. If the Inspector contains the insertion point it will * only be expanded once to avoid infinite recursion. * * @param inspector what to inject * @param inserter where to inject **/ public void inject(Inspector inspector, Inserter inserter) { if (inspector.valid()) { injectValue(inserter, inspector, null); } } private class NestedInjector implements ArrayTraverser, ObjectTraverser { private final Cursor cursor; private final Inspector guard; public NestedInjector(Cursor cursor, Inspector guard) { this.cursor = cursor; this.guard = guard; } @Override public void entry(int idx, Inspector inspector) { if (inspector == guard) { return; } injectValue(new ArrayInserter(cursor), inspector, guard); } @Override public void field(String name, Inspector inspector) { if (inspector == guard) { return; } injectValue(new ObjectInserter(cursor, name), inspector, guard); } } }
Nitpick: Consider rename 'operand' -> 'tensor' to match TensorModifyUpdate.
protected ValueUpdate readTensorModifyUpdate(DataType type) { byte operationId = getByte(null); TensorModifyUpdate.Operation operation = TensorModifyUpdate.Operation.getID(operationId); if (operation == null) { throw new DeserializationException("Unknown operation id " + operationId + " for tensor modify update"); } FieldValue fieldValue = type.createFieldValue(); if (!(fieldValue instanceof TensorFieldValue)) { throw new DeserializationException("Expected tensor field value, got " + type); } TensorFieldValue operand = (TensorFieldValue) fieldValue; operand.deserialize(this); return new TensorModifyUpdate(operation, operand); }
TensorFieldValue operand = (TensorFieldValue) fieldValue;
protected ValueUpdate readTensorModifyUpdate(DataType type) { byte operationId = getByte(null); TensorModifyUpdate.Operation operation = TensorModifyUpdate.Operation.getOperation(operationId); if (operation == null) { throw new DeserializationException("Unknown operation id " + operationId + " for tensor modify update"); } FieldValue fieldValue = type.createFieldValue(); if (!(fieldValue instanceof TensorFieldValue)) { throw new DeserializationException("Expected tensor field value, got " + type); } TensorFieldValue tensor = (TensorFieldValue) fieldValue; tensor.deserialize(this); return new TensorModifyUpdate(operation, tensor); }
class VespaDocumentDeserializerHead extends VespaDocumentDeserializer6 { public VespaDocumentDeserializerHead(DocumentTypeManager manager, GrowableByteBuffer buffer) { super(manager, buffer); } @Override }
class VespaDocumentDeserializerHead extends VespaDocumentDeserializer6 { public VespaDocumentDeserializerHead(DocumentTypeManager manager, GrowableByteBuffer buffer) { super(manager, buffer); } @Override }
I would check the operation and tensor separately instead of depending on toString() format.
public void testTensorModifySerialization() { FieldUpdate update = new FieldUpdate(tensorField); TensorFieldValue tensor = createTensor(tensorType, "{{x:8,y:9}:2}"); update.addValueUpdate(new TensorModifyUpdate(TensorModifyUpdate.Operation.REPLACE, tensor)); GrowableByteBuffer buffer = serializeUpdate(update); FieldUpdate deserializedUpdate = deserializeUpdate(buffer); assertEquals("'tensorfield' [tensormodify replace TensorFieldValue {\n classId: 1\n}\n]", deserializedUpdate.toString()); assertEquals(update, deserializedUpdate); }
assertEquals("'tensorfield' [tensormodify replace TensorFieldValue {\n classId: 1\n}\n]", deserializedUpdate.toString());
public void testTensorModifySerialization() { FieldUpdate update = new FieldUpdate(tensorField); TensorFieldValue tensor = createTensor(tensorType, "{{x:8,y:9}:2}"); update.addValueUpdate(new TensorModifyUpdate(TensorModifyUpdate.Operation.REPLACE, tensor)); GrowableByteBuffer buffer = serializeUpdate(update); FieldUpdate deserializedUpdate = deserializeUpdate(buffer); assertEquals("tensorfield", deserializedUpdate.getField().getName()); assertEquals(1, deserializedUpdate.getValueUpdates().size()); ValueUpdate valueUpdate = deserializedUpdate.getValueUpdate(0); if (!(valueUpdate instanceof TensorModifyUpdate)) { throw new IllegalStateException("Expected tensorModifyUpdate"); } TensorModifyUpdate tensorModifyUpdate = (TensorModifyUpdate) valueUpdate; assertEquals(TensorModifyUpdate.Operation.REPLACE, tensorModifyUpdate.getOperation()); assertEquals(tensor, tensorModifyUpdate.getValue()); assertEquals(update, deserializedUpdate); }
class SerializationTestCase { private DocumentType documentType; private Field field; private final static TensorType tensorType = new TensorType.Builder().mapped("x").mapped("y").build(); private Field tensorField; @Before public void setUp() { documentType = new DocumentType("document1"); field = new Field("field1", DataType.getArray(DataType.STRING)); documentType.addField(field); tensorField = new Field("tensorfield", new TensorDataType(tensorType)); documentType.addField(tensorField); } @Test public void testAddSerialization() { FieldUpdate update = FieldUpdate.createAdd(field, new StringFieldValue("value1")); DocumentSerializer buffer = DocumentSerializerFactory.create6(); update.serialize(buffer); buffer.getBuf().rewind(); try{ FileOutputStream fos = new FileOutputStream("src/test/files/addfieldser.dat"); fos.write(buffer.getBuf().array(), 0, buffer.getBuf().remaining()); fos.close(); } catch (Exception e) {} FieldUpdate deserializedUpdate = new FieldUpdate(DocumentDeserializerFactory.create6(new DocumentTypeManager(), buffer.getBuf()), documentType, Document.SERIALIZED_VERSION); assertEquals("'field1' [add value1 1]", deserializedUpdate.toString()); } @Test public void testClearSerialization() { FieldUpdate update = FieldUpdate.createClear(field); DocumentSerializer buffer = DocumentSerializerFactory.create6(); update.serialize(buffer); buffer.getBuf().rewind(); FieldUpdate deserializedUpdate = new FieldUpdate(DocumentDeserializerFactory.create6(new DocumentTypeManager(), buffer.getBuf()), documentType, Document.SERIALIZED_VERSION); assertEquals("'field1' [clear]", deserializedUpdate.toString()); } private static TensorFieldValue createTensor(TensorType type, String tensorCellString) { return new TensorFieldValue(Tensor.from(type, tensorCellString)); } private GrowableByteBuffer serializeUpdate(FieldUpdate update) { DocumentSerializer buffer = DocumentSerializerFactory.createHead(new GrowableByteBuffer()); update.serialize(buffer); buffer.getBuf().rewind(); return buffer.getBuf(); } private FieldUpdate deserializeUpdate(GrowableByteBuffer buffer) { return new FieldUpdate(DocumentDeserializerFactory.createHead(new DocumentTypeManager(), buffer), documentType, Document.SERIALIZED_VERSION); } @Test }
class SerializationTestCase { private DocumentType documentType; private Field field; private final static TensorType tensorType = new TensorType.Builder().mapped("x").mapped("y").build(); private Field tensorField; @Before public void setUp() { documentType = new DocumentType("document1"); field = new Field("field1", DataType.getArray(DataType.STRING)); documentType.addField(field); tensorField = new Field("tensorfield", new TensorDataType(tensorType)); documentType.addField(tensorField); } @Test public void testAddSerialization() { FieldUpdate update = FieldUpdate.createAdd(field, new StringFieldValue("value1")); DocumentSerializer buffer = DocumentSerializerFactory.create6(); update.serialize(buffer); buffer.getBuf().rewind(); try{ FileOutputStream fos = new FileOutputStream("src/test/files/addfieldser.dat"); fos.write(buffer.getBuf().array(), 0, buffer.getBuf().remaining()); fos.close(); } catch (Exception e) {} FieldUpdate deserializedUpdate = new FieldUpdate(DocumentDeserializerFactory.create6(new DocumentTypeManager(), buffer.getBuf()), documentType, Document.SERIALIZED_VERSION); assertEquals("'field1' [add value1 1]", deserializedUpdate.toString()); } @Test public void testClearSerialization() { FieldUpdate update = FieldUpdate.createClear(field); DocumentSerializer buffer = DocumentSerializerFactory.create6(); update.serialize(buffer); buffer.getBuf().rewind(); FieldUpdate deserializedUpdate = new FieldUpdate(DocumentDeserializerFactory.create6(new DocumentTypeManager(), buffer.getBuf()), documentType, Document.SERIALIZED_VERSION); assertEquals("'field1' [clear]", deserializedUpdate.toString()); } private static TensorFieldValue createTensor(TensorType type, String tensorCellString) { return new TensorFieldValue(Tensor.from(type, tensorCellString)); } private GrowableByteBuffer serializeUpdate(FieldUpdate update) { DocumentSerializer buffer = DocumentSerializerFactory.createHead(new GrowableByteBuffer()); update.serialize(buffer); buffer.getBuf().rewind(); return buffer.getBuf(); } private FieldUpdate deserializeUpdate(GrowableByteBuffer buffer) { return new FieldUpdate(DocumentDeserializerFactory.createHead(new DocumentTypeManager(), buffer), documentType, Document.SERIALIZED_VERSION); } @Test }
what is ISSUE A?
private void initColumns(ParamCreateContext context) throws UserException { TupleDescriptor srcTupleDesc = analyzer.getDescTbl().createTupleDescriptor(); context.tupleDescriptor = srcTupleDesc; Map<String, SlotDescriptor> slotDescByName = Maps.newHashMap(); context.slotDescByName = slotDescByName; TBrokerScanRangeParams params = context.params; List<ImportColumnDesc> originColumnNameToExprList = context.fileGroup.getColumnExprList(); if (originColumnNameToExprList == null || originColumnNameToExprList.isEmpty()) { for (Column column : targetTable.getBaseSchema()) { SlotDescriptor slotDesc = analyzer.getDescTbl().addSlotDescriptor(srcTupleDesc); slotDesc.setType(ScalarType.createType(PrimitiveType.VARCHAR)); slotDesc.setIsMaterialized(true); slotDesc.setIsNullable(true); slotDescByName.put(column.getName(), slotDesc); params.addToSrc_slot_ids(slotDesc.getId().asInt()); } params.setSrc_tuple_id(srcTupleDesc.getId().asInt()); return; } Map<String, Expr> columnNameToExpr = Maps.newHashMap(); context.exprMap = columnNameToExpr; for (ImportColumnDesc originColumnNameToExpr : originColumnNameToExprList) { String columnName = originColumnNameToExpr.getColumnName(); Expr columnExpr = originColumnNameToExpr.getExpr(); String realColName = targetTable.getColumn(columnName) == null ? columnName : targetTable.getColumn(columnName).getName(); if (columnExpr != null) { columnExpr = transformHadoopFunctionExpr(columnName, columnExpr); columnNameToExpr.put(realColName, columnExpr); } else { SlotDescriptor slotDesc = analyzer.getDescTbl().addSlotDescriptor(srcTupleDesc); slotDesc.setType(ScalarType.createType(PrimitiveType.VARCHAR)); slotDesc.setIsMaterialized(true); slotDesc.setIsNullable(true); params.addToSrc_slot_ids(slotDesc.getId().asInt()); slotDescByName.put(realColName, slotDesc); } } for (Map.Entry<String, Expr> entry : columnNameToExpr.entrySet()) { ExprSubstitutionMap smap = new ExprSubstitutionMap(); List<SlotRef> slots = Lists.newArrayList(); entry.getValue().collect(SlotRef.class, slots); for (SlotRef slot : slots) { SlotDescriptor slotDesc = slotDescByName.get(slot.getColumnName()); if (slotDesc == null) { throw new UserException("unknown reference column, column=" + entry.getKey() + ", reference=" + slot.getColumnName()); } smap.getLhs().add(slot); smap.getRhs().add(new SlotRef(slotDesc)); } Expr expr = entry.getValue().clone(smap); expr.analyze(analyzer); List<FunctionCallExpr> funcs = Lists.newArrayList(); expr.collect(FunctionCallExpr.class, funcs); for (FunctionCallExpr fn : funcs) { if (fn.isAggregateFunction()) { throw new AnalysisException("Don't support aggregation function in load expression"); } } columnNameToExpr.put(entry.getKey(), expr); } params.setSrc_tuple_id(srcTupleDesc.getId().asInt()); }
private void initColumns(ParamCreateContext context) throws UserException { TupleDescriptor srcTupleDesc = analyzer.getDescTbl().createTupleDescriptor(); context.tupleDescriptor = srcTupleDesc; Map<String, SlotDescriptor> slotDescByName = Maps.newHashMap(); context.slotDescByName = slotDescByName; TBrokerScanRangeParams params = context.params; List<ImportColumnDesc> originColumnNameToExprList = context.fileGroup.getColumnExprList(); if (originColumnNameToExprList == null || originColumnNameToExprList.isEmpty()) { for (Column column : targetTable.getBaseSchema()) { SlotDescriptor slotDesc = analyzer.getDescTbl().addSlotDescriptor(srcTupleDesc); slotDesc.setType(ScalarType.createType(PrimitiveType.VARCHAR)); slotDesc.setIsMaterialized(true); slotDesc.setIsNullable(true); slotDescByName.put(column.getName(), slotDesc); params.addToSrc_slot_ids(slotDesc.getId().asInt()); } params.setSrc_tuple_id(srcTupleDesc.getId().asInt()); return; } Map<String, Expr> columnNameToExpr = Maps.newHashMap(); context.exprMap = columnNameToExpr; for (ImportColumnDesc originColumnNameToExpr : originColumnNameToExprList) { String columnName = originColumnNameToExpr.getColumnName(); Expr columnExpr = originColumnNameToExpr.getExpr(); String realColName = targetTable.getColumn(columnName) == null ? columnName : targetTable.getColumn(columnName).getName(); if (columnExpr != null) { columnExpr = transformHadoopFunctionExpr(columnName, columnExpr); columnNameToExpr.put(realColName, columnExpr); } else { SlotDescriptor slotDesc = analyzer.getDescTbl().addSlotDescriptor(srcTupleDesc); slotDesc.setType(ScalarType.createType(PrimitiveType.VARCHAR)); slotDesc.setIsMaterialized(true); slotDesc.setIsNullable(true); params.addToSrc_slot_ids(slotDesc.getId().asInt()); slotDescByName.put(realColName, slotDesc); } } for (Map.Entry<String, Expr> entry : columnNameToExpr.entrySet()) { ExprSubstitutionMap smap = new ExprSubstitutionMap(); List<SlotRef> slots = Lists.newArrayList(); entry.getValue().collect(SlotRef.class, slots); for (SlotRef slot : slots) { SlotDescriptor slotDesc = slotDescByName.get(slot.getColumnName()); if (slotDesc == null) { throw new UserException("unknown reference column, column=" + entry.getKey() + ", reference=" + slot.getColumnName()); } smap.getLhs().add(slot); smap.getRhs().add(new SlotRef(slotDesc)); } Expr expr = entry.getValue().clone(smap); expr.analyze(analyzer); List<FunctionCallExpr> funcs = Lists.newArrayList(); expr.collect(FunctionCallExpr.class, funcs); for (FunctionCallExpr fn : funcs) { if (fn.isAggregateFunction()) { throw new AnalysisException("Don't support aggregation function in load expression"); } } columnNameToExpr.put(entry.getKey(), expr); } params.setSrc_tuple_id(srcTupleDesc.getId().asInt()); }
class ParamCreateContext { public BrokerFileGroup fileGroup; public TBrokerScanRangeParams params; public TupleDescriptor tupleDescriptor; public Map<String, Expr> exprMap; public Map<String, SlotDescriptor> slotDescByName; }
class ParamCreateContext { public BrokerFileGroup fileGroup; public TBrokerScanRangeParams params; public TupleDescriptor tupleDescriptor; public Map<String, Expr> exprMap; public Map<String, SlotDescriptor> slotDescByName; }
Why not test using `hostProvisioner.maintain()` directly?
public void finds_nodes_that_need_deprovisioning() { Node host2 = createNode("host2", Optional.empty(), NodeType.host, Node.State.failed, Optional.of(tenantApp)); Node host21 = createNode("host2-1", Optional.of("host2"), NodeType.tenant, Node.State.failed, Optional.empty()); Node host3 = createNode("host3", Optional.empty(), NodeType.host, Node.State.provisioned, Optional.empty()); List<Node> nodes = List.of( createNode("host1", Optional.empty(), NodeType.host, Node.State.active, Optional.of(tenantHostApp)), createNode("host1-1", Optional.of("host1"), NodeType.tenant, Node.State.reserved, Optional.of(tenantApp)), createNode("host1-2", Optional.of("host1"), NodeType.tenant, Node.State.failed, Optional.empty()), host2, host21, host3, createNode("host4", Optional.empty(), NodeType.host, Node.State.provisioned, Optional.empty()), createNode("host4-1", Optional.of("host4"), NodeType.tenant, Node.State.reserved, Optional.of(tenantApp)), createNode("proxyhost1", Optional.empty(), NodeType.proxyhost, Node.State.provisioned, Optional.empty()), createNode("proxyhost2", Optional.empty(), NodeType.proxyhost, Node.State.active, Optional.of(proxyHostApp)), createNode("proxy2", Optional.of("proxyhost2"), NodeType.proxy, Node.State.active, Optional.of(proxyApp))); Set<Node> expected = Set.of(host2, host3); Set<Node> actual = HostDeprovisionMaintainer.actionableNodes(new NodeList(nodes)); assertEquals(expected, actual); }
Set<Node> actual = HostDeprovisionMaintainer.actionableNodes(new NodeList(nodes));
public void finds_nodes_that_need_deprovisioning() { Node host2 = createNode("host2", Optional.empty(), NodeType.host, Node.State.failed, Optional.of(tenantApp)); Node host21 = createNode("host2-1", Optional.of("host2"), NodeType.tenant, Node.State.failed, Optional.empty()); Node host3 = createNode("host3", Optional.empty(), NodeType.host, Node.State.provisioned, Optional.empty()); List<Node> nodes = List.of( createNode("host1", Optional.empty(), NodeType.host, Node.State.active, Optional.of(tenantHostApp)), createNode("host1-1", Optional.of("host1"), NodeType.tenant, Node.State.reserved, Optional.of(tenantApp)), createNode("host1-2", Optional.of("host1"), NodeType.tenant, Node.State.failed, Optional.empty()), host2, host21, host3, createNode("host4", Optional.empty(), NodeType.host, Node.State.provisioned, Optional.empty()), createNode("host4-1", Optional.of("host4"), NodeType.tenant, Node.State.reserved, Optional.of(tenantApp)), createNode("proxyhost1", Optional.empty(), NodeType.proxyhost, Node.State.provisioned, Optional.empty()), createNode("proxyhost2", Optional.empty(), NodeType.proxyhost, Node.State.active, Optional.of(proxyHostApp)), createNode("proxy2", Optional.of("proxyhost2"), NodeType.proxy, Node.State.active, Optional.of(proxyApp))); Set<Node> expected = Set.of(host2, host3); Set<Node> actual = HostDeprovisionMaintainer.candidates(new NodeList(nodes)); assertEquals(expected, actual); }
class HostDeprovisionMaintainerTest { private final HostProvisionerTester tester = new HostProvisionerTester(); private final HostProvisioner hostProvisioner = mock(HostProvisioner.class); private final HostDeprovisionMaintainer maintainer = new HostDeprovisionMaintainer( tester.nodeRepository(), Duration.ofDays(1), tester.jobControl(), hostProvisioner); @Test public void removes_nodes_if_successful() { tester.addNode("host1", Optional.empty(), NodeType.host, Node.State.active, Optional.of(tenantHostApp)); tester.addNode("host1-1", Optional.of("host1"), NodeType.tenant, Node.State.reserved, Optional.of(tenantApp)); tester.addNode("host1-2", Optional.of("host1"), NodeType.tenant, Node.State.failed, Optional.empty()); tester.addNode("host4", Optional.empty(), NodeType.host, Node.State.provisioned, Optional.empty()); tester.addNode("host4-1", Optional.of("host4"), NodeType.tenant, Node.State.reserved, Optional.of(tenantApp)); Node host2 = tester.addNode("host2", Optional.empty(), NodeType.host, Node.State.failed, Optional.of(tenantApp)); tester.addNode("host2-1", Optional.of("host2"), NodeType.tenant, Node.State.failed, Optional.empty()); assertEquals(7, tester.nodeRepository().getNodes().size()); maintainer.maintain(); assertEquals(5, tester.nodeRepository().getNodes().size()); verify(hostProvisioner).deprovision(eq(host2)); verifyNoMoreInteractions(hostProvisioner); assertTrue(tester.nodeRepository().getNode("host2").isEmpty()); assertTrue(tester.nodeRepository().getNode("host2-1").isEmpty()); } @Test public void does_not_remove_if_failed() { Node host2 = tester.addNode("host2", Optional.empty(), NodeType.host, Node.State.failed, Optional.of(tenantApp)); doThrow(new RuntimeException()).when(hostProvisioner).deprovision(eq(host2)); maintainer.maintain(); assertEquals(1, tester.nodeRepository().getNodes().size()); verify(hostProvisioner).deprovision(eq(host2)); verifyNoMoreInteractions(hostProvisioner); } @Test }
class HostDeprovisionMaintainerTest { private final HostProvisionerTester tester = new HostProvisionerTester(); private final HostProvisioner hostProvisioner = mock(HostProvisioner.class); private final HostDeprovisionMaintainer maintainer = new HostDeprovisionMaintainer( tester.nodeRepository(), Duration.ofDays(1), tester.jobControl(), hostProvisioner); @Test public void removes_nodes_if_successful() { tester.addNode("host1", Optional.empty(), NodeType.host, Node.State.active, Optional.of(tenantHostApp)); tester.addNode("host1-1", Optional.of("host1"), NodeType.tenant, Node.State.reserved, Optional.of(tenantApp)); tester.addNode("host1-2", Optional.of("host1"), NodeType.tenant, Node.State.failed, Optional.empty()); tester.addNode("host4", Optional.empty(), NodeType.host, Node.State.provisioned, Optional.empty()); tester.addNode("host4-1", Optional.of("host4"), NodeType.tenant, Node.State.reserved, Optional.of(tenantApp)); Node host2 = tester.addNode("host2", Optional.empty(), NodeType.host, Node.State.failed, Optional.of(tenantApp)); tester.addNode("host2-1", Optional.of("host2"), NodeType.tenant, Node.State.failed, Optional.empty()); assertEquals(7, tester.nodeRepository().getNodes().size()); maintainer.maintain(); assertEquals(5, tester.nodeRepository().getNodes().size()); verify(hostProvisioner).deprovision(eq(host2)); verifyNoMoreInteractions(hostProvisioner); assertTrue(tester.nodeRepository().getNode("host2").isEmpty()); assertTrue(tester.nodeRepository().getNode("host2-1").isEmpty()); } @Test public void does_not_remove_if_failed() { Node host2 = tester.addNode("host2", Optional.empty(), NodeType.host, Node.State.failed, Optional.of(tenantApp)); doThrow(new RuntimeException()).when(hostProvisioner).deprovision(eq(host2)); maintainer.maintain(); assertEquals(1, tester.nodeRepository().getNodes().size()); verify(hostProvisioner).deprovision(eq(host2)); verifyNoMoreInteractions(hostProvisioner); } @Test }
The purpose of this test is to verify that given a set of all nodes, the correct ones are picked for processing. It is much easier (don't have to set up mock) and more correct (verifying the method directly, rather than observing which external methods were invoked) to call this directly.
public void finds_nodes_that_need_deprovisioning() { Node host2 = createNode("host2", Optional.empty(), NodeType.host, Node.State.failed, Optional.of(tenantApp)); Node host21 = createNode("host2-1", Optional.of("host2"), NodeType.tenant, Node.State.failed, Optional.empty()); Node host3 = createNode("host3", Optional.empty(), NodeType.host, Node.State.provisioned, Optional.empty()); List<Node> nodes = List.of( createNode("host1", Optional.empty(), NodeType.host, Node.State.active, Optional.of(tenantHostApp)), createNode("host1-1", Optional.of("host1"), NodeType.tenant, Node.State.reserved, Optional.of(tenantApp)), createNode("host1-2", Optional.of("host1"), NodeType.tenant, Node.State.failed, Optional.empty()), host2, host21, host3, createNode("host4", Optional.empty(), NodeType.host, Node.State.provisioned, Optional.empty()), createNode("host4-1", Optional.of("host4"), NodeType.tenant, Node.State.reserved, Optional.of(tenantApp)), createNode("proxyhost1", Optional.empty(), NodeType.proxyhost, Node.State.provisioned, Optional.empty()), createNode("proxyhost2", Optional.empty(), NodeType.proxyhost, Node.State.active, Optional.of(proxyHostApp)), createNode("proxy2", Optional.of("proxyhost2"), NodeType.proxy, Node.State.active, Optional.of(proxyApp))); Set<Node> expected = Set.of(host2, host3); Set<Node> actual = HostDeprovisionMaintainer.actionableNodes(new NodeList(nodes)); assertEquals(expected, actual); }
Set<Node> actual = HostDeprovisionMaintainer.actionableNodes(new NodeList(nodes));
public void finds_nodes_that_need_deprovisioning() { Node host2 = createNode("host2", Optional.empty(), NodeType.host, Node.State.failed, Optional.of(tenantApp)); Node host21 = createNode("host2-1", Optional.of("host2"), NodeType.tenant, Node.State.failed, Optional.empty()); Node host3 = createNode("host3", Optional.empty(), NodeType.host, Node.State.provisioned, Optional.empty()); List<Node> nodes = List.of( createNode("host1", Optional.empty(), NodeType.host, Node.State.active, Optional.of(tenantHostApp)), createNode("host1-1", Optional.of("host1"), NodeType.tenant, Node.State.reserved, Optional.of(tenantApp)), createNode("host1-2", Optional.of("host1"), NodeType.tenant, Node.State.failed, Optional.empty()), host2, host21, host3, createNode("host4", Optional.empty(), NodeType.host, Node.State.provisioned, Optional.empty()), createNode("host4-1", Optional.of("host4"), NodeType.tenant, Node.State.reserved, Optional.of(tenantApp)), createNode("proxyhost1", Optional.empty(), NodeType.proxyhost, Node.State.provisioned, Optional.empty()), createNode("proxyhost2", Optional.empty(), NodeType.proxyhost, Node.State.active, Optional.of(proxyHostApp)), createNode("proxy2", Optional.of("proxyhost2"), NodeType.proxy, Node.State.active, Optional.of(proxyApp))); Set<Node> expected = Set.of(host2, host3); Set<Node> actual = HostDeprovisionMaintainer.candidates(new NodeList(nodes)); assertEquals(expected, actual); }
class HostDeprovisionMaintainerTest { private final HostProvisionerTester tester = new HostProvisionerTester(); private final HostProvisioner hostProvisioner = mock(HostProvisioner.class); private final HostDeprovisionMaintainer maintainer = new HostDeprovisionMaintainer( tester.nodeRepository(), Duration.ofDays(1), tester.jobControl(), hostProvisioner); @Test public void removes_nodes_if_successful() { tester.addNode("host1", Optional.empty(), NodeType.host, Node.State.active, Optional.of(tenantHostApp)); tester.addNode("host1-1", Optional.of("host1"), NodeType.tenant, Node.State.reserved, Optional.of(tenantApp)); tester.addNode("host1-2", Optional.of("host1"), NodeType.tenant, Node.State.failed, Optional.empty()); tester.addNode("host4", Optional.empty(), NodeType.host, Node.State.provisioned, Optional.empty()); tester.addNode("host4-1", Optional.of("host4"), NodeType.tenant, Node.State.reserved, Optional.of(tenantApp)); Node host2 = tester.addNode("host2", Optional.empty(), NodeType.host, Node.State.failed, Optional.of(tenantApp)); tester.addNode("host2-1", Optional.of("host2"), NodeType.tenant, Node.State.failed, Optional.empty()); assertEquals(7, tester.nodeRepository().getNodes().size()); maintainer.maintain(); assertEquals(5, tester.nodeRepository().getNodes().size()); verify(hostProvisioner).deprovision(eq(host2)); verifyNoMoreInteractions(hostProvisioner); assertTrue(tester.nodeRepository().getNode("host2").isEmpty()); assertTrue(tester.nodeRepository().getNode("host2-1").isEmpty()); } @Test public void does_not_remove_if_failed() { Node host2 = tester.addNode("host2", Optional.empty(), NodeType.host, Node.State.failed, Optional.of(tenantApp)); doThrow(new RuntimeException()).when(hostProvisioner).deprovision(eq(host2)); maintainer.maintain(); assertEquals(1, tester.nodeRepository().getNodes().size()); verify(hostProvisioner).deprovision(eq(host2)); verifyNoMoreInteractions(hostProvisioner); } @Test }
class HostDeprovisionMaintainerTest { private final HostProvisionerTester tester = new HostProvisionerTester(); private final HostProvisioner hostProvisioner = mock(HostProvisioner.class); private final HostDeprovisionMaintainer maintainer = new HostDeprovisionMaintainer( tester.nodeRepository(), Duration.ofDays(1), tester.jobControl(), hostProvisioner); @Test public void removes_nodes_if_successful() { tester.addNode("host1", Optional.empty(), NodeType.host, Node.State.active, Optional.of(tenantHostApp)); tester.addNode("host1-1", Optional.of("host1"), NodeType.tenant, Node.State.reserved, Optional.of(tenantApp)); tester.addNode("host1-2", Optional.of("host1"), NodeType.tenant, Node.State.failed, Optional.empty()); tester.addNode("host4", Optional.empty(), NodeType.host, Node.State.provisioned, Optional.empty()); tester.addNode("host4-1", Optional.of("host4"), NodeType.tenant, Node.State.reserved, Optional.of(tenantApp)); Node host2 = tester.addNode("host2", Optional.empty(), NodeType.host, Node.State.failed, Optional.of(tenantApp)); tester.addNode("host2-1", Optional.of("host2"), NodeType.tenant, Node.State.failed, Optional.empty()); assertEquals(7, tester.nodeRepository().getNodes().size()); maintainer.maintain(); assertEquals(5, tester.nodeRepository().getNodes().size()); verify(hostProvisioner).deprovision(eq(host2)); verifyNoMoreInteractions(hostProvisioner); assertTrue(tester.nodeRepository().getNode("host2").isEmpty()); assertTrue(tester.nodeRepository().getNode("host2-1").isEmpty()); } @Test public void does_not_remove_if_failed() { Node host2 = tester.addNode("host2", Optional.empty(), NodeType.host, Node.State.failed, Optional.of(tenantApp)); doThrow(new RuntimeException()).when(hostProvisioner).deprovision(eq(host2)); maintainer.maintain(); assertEquals(1, tester.nodeRepository().getNodes().size()); verify(hostProvisioner).deprovision(eq(host2)); verifyNoMoreInteractions(hostProvisioner); } @Test }
This must be synchronized
public void deleteMetricByDimension(String name, Dimensions dimensionsToRemove, DimensionType type) { try{ metrics.get(type) .get(name) .metricsByDimensions() .remove(dimensionsToRemove); } catch (NullPointerException e) {} }
try{
public void deleteMetricByDimension(String name, Dimensions dimensionsToRemove, DimensionType type) { synchronized (monitor) { Optional.ofNullable(metrics.get(type)) .map(m -> m.get(name)) .map(ApplicationMetrics::metricsByDimensions) .ifPresent(m -> m.remove(dimensionsToRemove)); } }
class MetricReceiverWrapper { public static final String APPLICATION_DOCKER = "docker"; public static final String APPLICATION_HOST = "vespa.host"; public static final String APPLICATION_NODE = "vespa.node"; private final Object monitor = new Object(); private final Map<DimensionType, Map<String, ApplicationMetrics>> metrics = new HashMap<>(); private final MetricReceiver metricReceiver; @Inject public MetricReceiverWrapper(MetricReceiver metricReceiver) { this.metricReceiver = metricReceiver; } /** * Declaring the same dimensions and name results in the same CounterWrapper instance (idempotent). */ public CounterWrapper declareCounter(String application, Dimensions dimensions, String name) { return declareCounter(application, dimensions, name, DimensionType.DEFAULT); } public CounterWrapper declareCounter(String application, Dimensions dimensions, String name, DimensionType type) { synchronized (monitor) { Map<Dimensions, Map<String, MetricValue>> metricsByDimensions = getOrCreateApplicationMetrics(application, type); if (!metricsByDimensions.containsKey(dimensions)) metricsByDimensions.put(dimensions, new HashMap<>()); if (!metricsByDimensions.get(dimensions).containsKey(name)) { CounterWrapper counter = new CounterWrapper(metricReceiver.declareCounter(name, new Point(dimensions.dimensionsMap))); metricsByDimensions.get(dimensions).put(name, counter); } return (CounterWrapper) metricsByDimensions.get(dimensions).get(name); } } /** * Declaring the same dimensions and name results in the same GaugeWrapper instance (idempotent). */ public GaugeWrapper declareGauge(String application, Dimensions dimensions, String name) { return declareGauge(application, dimensions, name, DimensionType.DEFAULT); } public GaugeWrapper declareGauge(String application, Dimensions dimensions, String name, DimensionType type) { synchronized (monitor) { Map<Dimensions, Map<String, MetricValue>> metricsByDimensions = getOrCreateApplicationMetrics(application, type); if (!metricsByDimensions.containsKey(dimensions)) metricsByDimensions.put(dimensions, new HashMap<>()); if (!metricsByDimensions.get(dimensions).containsKey(name)) { GaugeWrapper gauge = new GaugeWrapper(metricReceiver.declareGauge(name, new Point(dimensions.dimensionsMap))); metricsByDimensions.get(dimensions).put(name, gauge); } return (GaugeWrapper) metricsByDimensions.get(dimensions).get(name); } } public List<DimensionMetrics> getDefaultMetrics() { return getMetricsByType(DimensionType.DEFAULT); } public Set<Map<String, Object>> getDefaultMetricsRaw() { synchronized (monitor) { Set<Map<String, Object>> dimensionMetrics = new HashSet<>(); metrics.getOrDefault(DimensionType.DEFAULT, new HashMap<>()) .forEach((application, applicationMetrics) -> applicationMetrics.metricsByDimensions().entrySet().stream() .map(entry -> new DimensionMetrics(application, entry.getKey(), entry.getValue().entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, value -> value.getValue().getValue())))) .map(DimensionMetrics::getMetrics) .forEach(dimensionMetrics::add)); return dimensionMetrics; } } public List<DimensionMetrics> getMetricsByType(DimensionType type) { synchronized (monitor) { List<DimensionMetrics> dimensionMetrics = new ArrayList<>(); metrics.getOrDefault(type, new HashMap<>()) .forEach((application, applicationMetrics) -> applicationMetrics.metricsByDimensions().entrySet().stream() .map(entry -> new DimensionMetrics(application, entry.getKey(), entry.getValue().entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, value -> value.getValue().getValue())))) .forEach(dimensionMetrics::add)); return dimensionMetrics; } } Map<String, Number> getMetricsForDimension(String application, Dimensions dimensions) { synchronized (monitor) { Map<Dimensions, Map<String, MetricValue>> metricsByDimensions = getOrCreateApplicationMetrics(application, DimensionType.DEFAULT); try { return metricsByDimensions.get(dimensions).entrySet().stream().collect( Collectors.toMap(Map.Entry::getKey, entry -> entry.getValue().getValue())); } catch (NullPointerException e) { return new HashMap<>(); } } } private Map<Dimensions, Map<String, MetricValue>> getOrCreateApplicationMetrics(String application, DimensionType type) { Map<String, ApplicationMetrics> applicationMetrics = metrics.computeIfAbsent(type, m -> new HashMap<>()); if (! applicationMetrics.containsKey(application)) { ApplicationMetrics metrics = new ApplicationMetrics(); applicationMetrics.put(application, metrics); } return applicationMetrics.get(application).metricsByDimensions(); } private static class ApplicationMetrics { private final Map<Dimensions, Map<String, MetricValue>> metricsByDimensions = new LinkedHashMap<>(); Map<Dimensions, Map<String, MetricValue>> metricsByDimensions() { return metricsByDimensions; } } public enum DimensionType {DEFAULT, PRETAGGED} }
class MetricReceiverWrapper { public static final String APPLICATION_DOCKER = "docker"; public static final String APPLICATION_HOST = "vespa.host"; public static final String APPLICATION_NODE = "vespa.node"; private final Object monitor = new Object(); private final Map<DimensionType, Map<String, ApplicationMetrics>> metrics = new HashMap<>(); private final MetricReceiver metricReceiver; @Inject public MetricReceiverWrapper(MetricReceiver metricReceiver) { this.metricReceiver = metricReceiver; } /** * Declaring the same dimensions and name results in the same CounterWrapper instance (idempotent). */ public CounterWrapper declareCounter(String application, Dimensions dimensions, String name) { return declareCounter(application, dimensions, name, DimensionType.DEFAULT); } public CounterWrapper declareCounter(String application, Dimensions dimensions, String name, DimensionType type) { synchronized (monitor) { Map<Dimensions, Map<String, MetricValue>> metricsByDimensions = getOrCreateApplicationMetrics(application, type); if (!metricsByDimensions.containsKey(dimensions)) metricsByDimensions.put(dimensions, new HashMap<>()); if (!metricsByDimensions.get(dimensions).containsKey(name)) { CounterWrapper counter = new CounterWrapper(metricReceiver.declareCounter(name, new Point(dimensions.dimensionsMap))); metricsByDimensions.get(dimensions).put(name, counter); } return (CounterWrapper) metricsByDimensions.get(dimensions).get(name); } } /** * Declaring the same dimensions and name results in the same GaugeWrapper instance (idempotent). */ public GaugeWrapper declareGauge(String application, Dimensions dimensions, String name) { return declareGauge(application, dimensions, name, DimensionType.DEFAULT); } public GaugeWrapper declareGauge(String application, Dimensions dimensions, String name, DimensionType type) { synchronized (monitor) { Map<Dimensions, Map<String, MetricValue>> metricsByDimensions = getOrCreateApplicationMetrics(application, type); if (!metricsByDimensions.containsKey(dimensions)) metricsByDimensions.put(dimensions, new HashMap<>()); if (!metricsByDimensions.get(dimensions).containsKey(name)) { GaugeWrapper gauge = new GaugeWrapper(metricReceiver.declareGauge(name, new Point(dimensions.dimensionsMap))); metricsByDimensions.get(dimensions).put(name, gauge); } return (GaugeWrapper) metricsByDimensions.get(dimensions).get(name); } } public List<DimensionMetrics> getDefaultMetrics() { return getMetricsByType(DimensionType.DEFAULT); } public Set<Map<String, Object>> getDefaultMetricsRaw() { synchronized (monitor) { Set<Map<String, Object>> dimensionMetrics = new HashSet<>(); metrics.getOrDefault(DimensionType.DEFAULT, new HashMap<>()) .forEach((application, applicationMetrics) -> applicationMetrics.metricsByDimensions().entrySet().stream() .map(entry -> new DimensionMetrics(application, entry.getKey(), entry.getValue().entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, value -> value.getValue().getValue())))) .map(DimensionMetrics::getMetrics) .forEach(dimensionMetrics::add)); return dimensionMetrics; } } public List<DimensionMetrics> getMetricsByType(DimensionType type) { synchronized (monitor) { List<DimensionMetrics> dimensionMetrics = new ArrayList<>(); metrics.getOrDefault(type, new HashMap<>()) .forEach((application, applicationMetrics) -> applicationMetrics.metricsByDimensions().entrySet().stream() .map(entry -> new DimensionMetrics(application, entry.getKey(), entry.getValue().entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, value -> value.getValue().getValue())))) .forEach(dimensionMetrics::add)); return dimensionMetrics; } } Map<String, Number> getMetricsForDimension(String application, Dimensions dimensions) { synchronized (monitor) { Map<Dimensions, Map<String, MetricValue>> metricsByDimensions = getOrCreateApplicationMetrics(application, DimensionType.DEFAULT); return metricsByDimensions.getOrDefault(dimensions, Collections.emptyMap()) .entrySet() .stream() .collect(Collectors.toMap(Map.Entry::getKey, entry -> entry.getValue().getValue())); } } private Map<Dimensions, Map<String, MetricValue>> getOrCreateApplicationMetrics(String application, DimensionType type) { Map<String, ApplicationMetrics> applicationMetrics = metrics.computeIfAbsent(type, m -> new HashMap<>()); if (! applicationMetrics.containsKey(application)) { ApplicationMetrics metrics = new ApplicationMetrics(); applicationMetrics.put(application, metrics); } return applicationMetrics.get(application).metricsByDimensions(); } private static class ApplicationMetrics { private final Map<Dimensions, Map<String, MetricValue>> metricsByDimensions = new LinkedHashMap<>(); Map<Dimensions, Map<String, MetricValue>> metricsByDimensions() { return metricsByDimensions; } } public enum DimensionType {DEFAULT, PRETAGGED} }
This Properties implementation can probably be replaced by TestProperties...
public Properties properties() { return new Properties() { @Override public boolean multitenant() { return false; } @Override public ApplicationId applicationId() { return ApplicationId.defaultId(); } @Override public List<ConfigServerSpec> configServerSpecs() { return Collections.emptyList(); } @Override public HostName loadBalancerName() { return null; } @Override public URI ztsUrl() { return null; } @Override public String athenzDnsSuffix() { return null; } @Override public boolean hostedVespa() {return false; } @Override public Zone zone() { return Zone.defaultZone(); } @Override public Set<Rotation> rotations() { return new HashSet<>(); } @Override public boolean isBootstrap() { return false; } @Override public boolean isFirstTimeDeployment() { return false; } @Override public boolean useDedicatedNodeForLogserver() { return false; } @Override public boolean useFdispatchByDefault() { return true; } @Override public boolean useAdaptiveDispatch() { return false; } }; }
public boolean useAdaptiveDispatch() { return false; }
public Properties properties() { return new TestProperties(); }
class MockModelContext implements ModelContext { private final ApplicationPackage applicationPackage; public MockModelContext() { this.applicationPackage = MockApplicationPackage.createEmpty(); } public MockModelContext(ApplicationPackage applicationPackage) { this.applicationPackage = applicationPackage; } @Override public ApplicationPackage applicationPackage() { return applicationPackage; } @Override public Optional<Model> previousModel() { return Optional.empty(); } @Override public Optional<ApplicationPackage> permanentApplicationPackage() { return Optional.empty(); } @Override public Optional<HostProvisioner> hostProvisioner() { return Optional.empty(); } @Override public DeployLogger deployLogger() { return new BaseDeployLogger(); } @Override public ConfigDefinitionRepo configDefinitionRepo() { return new StaticConfigDefinitionRepo(); } @Override public FileRegistry getFileRegistry() { return new MockFileRegistry(); } @Override public Version modelVespaVersion() { return new Version(6); } @Override public Version wantedNodeVespaVersion() { return new Version(6); } @Override }
class MockModelContext implements ModelContext { private final ApplicationPackage applicationPackage; public MockModelContext() { this.applicationPackage = MockApplicationPackage.createEmpty(); } public MockModelContext(ApplicationPackage applicationPackage) { this.applicationPackage = applicationPackage; } @Override public ApplicationPackage applicationPackage() { return applicationPackage; } @Override public Optional<Model> previousModel() { return Optional.empty(); } @Override public Optional<ApplicationPackage> permanentApplicationPackage() { return Optional.empty(); } @Override public Optional<HostProvisioner> hostProvisioner() { return Optional.empty(); } @Override public DeployLogger deployLogger() { return new BaseDeployLogger(); } @Override public ConfigDefinitionRepo configDefinitionRepo() { return new StaticConfigDefinitionRepo(); } @Override public FileRegistry getFileRegistry() { return new MockFileRegistry(); } @Override public Version modelVespaVersion() { return new Version(6); } @Override public Version wantedNodeVespaVersion() { return new Version(6); } @Override }
Done
public Properties properties() { return new Properties() { @Override public boolean multitenant() { return false; } @Override public ApplicationId applicationId() { return ApplicationId.defaultId(); } @Override public List<ConfigServerSpec> configServerSpecs() { return Collections.emptyList(); } @Override public HostName loadBalancerName() { return null; } @Override public URI ztsUrl() { return null; } @Override public String athenzDnsSuffix() { return null; } @Override public boolean hostedVespa() {return false; } @Override public Zone zone() { return Zone.defaultZone(); } @Override public Set<Rotation> rotations() { return new HashSet<>(); } @Override public boolean isBootstrap() { return false; } @Override public boolean isFirstTimeDeployment() { return false; } @Override public boolean useDedicatedNodeForLogserver() { return false; } @Override public boolean useFdispatchByDefault() { return true; } @Override public boolean useAdaptiveDispatch() { return false; } }; }
public boolean useAdaptiveDispatch() { return false; }
public Properties properties() { return new TestProperties(); }
class MockModelContext implements ModelContext { private final ApplicationPackage applicationPackage; public MockModelContext() { this.applicationPackage = MockApplicationPackage.createEmpty(); } public MockModelContext(ApplicationPackage applicationPackage) { this.applicationPackage = applicationPackage; } @Override public ApplicationPackage applicationPackage() { return applicationPackage; } @Override public Optional<Model> previousModel() { return Optional.empty(); } @Override public Optional<ApplicationPackage> permanentApplicationPackage() { return Optional.empty(); } @Override public Optional<HostProvisioner> hostProvisioner() { return Optional.empty(); } @Override public DeployLogger deployLogger() { return new BaseDeployLogger(); } @Override public ConfigDefinitionRepo configDefinitionRepo() { return new StaticConfigDefinitionRepo(); } @Override public FileRegistry getFileRegistry() { return new MockFileRegistry(); } @Override public Version modelVespaVersion() { return new Version(6); } @Override public Version wantedNodeVespaVersion() { return new Version(6); } @Override }
class MockModelContext implements ModelContext { private final ApplicationPackage applicationPackage; public MockModelContext() { this.applicationPackage = MockApplicationPackage.createEmpty(); } public MockModelContext(ApplicationPackage applicationPackage) { this.applicationPackage = applicationPackage; } @Override public ApplicationPackage applicationPackage() { return applicationPackage; } @Override public Optional<Model> previousModel() { return Optional.empty(); } @Override public Optional<ApplicationPackage> permanentApplicationPackage() { return Optional.empty(); } @Override public Optional<HostProvisioner> hostProvisioner() { return Optional.empty(); } @Override public DeployLogger deployLogger() { return new BaseDeployLogger(); } @Override public ConfigDefinitionRepo configDefinitionRepo() { return new StaticConfigDefinitionRepo(); } @Override public FileRegistry getFileRegistry() { return new MockFileRegistry(); } @Override public Version modelVespaVersion() { return new Version(6); } @Override public Version wantedNodeVespaVersion() { return new Version(6); } @Override }
one might think that a valid cache (`getValidCache`) would mean it contains all applications, which it does not and may be counter-intuitive. This method always appears in conjunction with `computeIfAbsent`, perhaps combine them?
public HostStatus getHostStatus(ApplicationInstanceReference applicationInstanceReference, HostName hostName) { return getValidCache().computeIfAbsent(applicationInstanceReference, this::hostsDownFor) .contains(hostName) ? HostStatus.ALLOWED_TO_BE_DOWN : HostStatus.NO_REMARKS; }
return getValidCache().computeIfAbsent(applicationInstanceReference, this::hostsDownFor)
public HostStatus getHostStatus(ApplicationInstanceReference applicationInstanceReference, HostName hostName) { return getValidCache().computeIfAbsent(applicationInstanceReference, this::hostsDownFor) .contains(hostName) ? HostStatus.ALLOWED_TO_BE_DOWN : HostStatus.NO_REMARKS; }
class ZookeeperStatusService implements StatusService { private static final Logger log = Logger.getLogger(ZookeeperStatusService.class.getName()); final static String HOST_STATUS_BASE_PATH = "/vespa/host-status-service"; final static String APPLICATION_STATUS_BASE_PATH = "/vespa/application-status-service"; final static String COUNTER_PATH = "/vespa/cache-counter"; private final Curator curator; private final CuratorCounter counter; /** A cache of hosts allowed to be down. Access only through {@link private final Map<ApplicationInstanceReference, Set<HostName>> hostsDown; private volatile long cacheRefreshedAt; @Inject public ZookeeperStatusService(@Component Curator curator) { this.curator = curator; this.counter = new CuratorCounter(curator, COUNTER_PATH); this.cacheRefreshedAt = counter.get(); this.hostsDown = new ConcurrentHashMap<>(); } @Override public Set<ApplicationInstanceReference> getAllSuspendedApplications() { try { Set<ApplicationInstanceReference> resultSet = new HashSet<>(); Stat stat = curator.framework().checkExists().forPath(APPLICATION_STATUS_BASE_PATH); if (stat == null) return resultSet; for (String appRefStr : curator.framework().getChildren().forPath(APPLICATION_STATUS_BASE_PATH)) { ApplicationInstanceReference appRef = OrchestratorUtil.parseAppInstanceReference(appRefStr); resultSet.add(appRef); } return resultSet; } catch (Exception e) { log.log(LogLevel.DEBUG, "Something went wrong while listing out applications in suspend.", e); throw new RuntimeException(e); } } /** * Cache is checked for freshness when this mapping is created, and may be invalidated again later * by other users of the cache. Since this function is backed by the cache, any such invalidations * will be reflected in the returned mapping; all users of the cache collaborate in repopulating it. */ @Override public Function<ApplicationInstanceReference, Set<HostName>> getSuspendedHostsByApplication() { Map<ApplicationInstanceReference, Set<HostName>> suspendedHostsByApplication = getValidCache(); return application -> suspendedHostsByApplication.computeIfAbsent(application, this::hostsDownFor); } /** * 1) locks the status service for an application instance. * 2) fails all operations in this thread when the session is lost, * since session loss might cause the lock to be lost. * Since it only fails operations in this thread, * all operations depending on a lock, including the locking itself, must be done in this thread. * Note that since it is the thread that fails, all status operations in this thread will fail * even if they're not supposed to be guarded by this lock * (i.e. the request is for another applicationInstanceReference) */ @Override public MutableStatusRegistry lockApplicationInstance_forCurrentThreadOnly( OrchestratorContext context, ApplicationInstanceReference applicationInstanceReference) { Duration duration = context.getTimeLeft(); String lockPath = applicationInstanceLock2Path(applicationInstanceReference); Lock lock = new Lock(lockPath, curator); lock.acquire(duration); try { return new ZkMutableStatusRegistry(lock, applicationInstanceReference, context.isProbe()); } catch (Throwable t) { lock.close(); throw t; } } private void setHostStatus(ApplicationInstanceReference applicationInstanceReference, HostName hostName, HostStatus status) { String path = hostAllowedDownPath(applicationInstanceReference, hostName); boolean invalidate = false; try { switch (status) { case NO_REMARKS: invalidate = deleteNode_ignoreNoNodeException(path, "Host already has state NO_REMARKS, path = " + path); break; case ALLOWED_TO_BE_DOWN: invalidate = createNode_ignoreNodeExistsException(path, "Host already has state ALLOWED_TO_BE_DOWN, path = " + path); break; default: throw new IllegalArgumentException("Unexpected status '" + status + "'."); } } catch (Exception e) { invalidate = true; throw new RuntimeException(e); } finally { if (invalidate) { counter.next(); hostsDown.remove(applicationInstanceReference); } } } private boolean deleteNode_ignoreNoNodeException(String path, String debugLogMessageIfNotExists) throws Exception { try { curator.framework().delete().forPath(path); return true; } catch (NoNodeException e) { log.log(LogLevel.DEBUG, debugLogMessageIfNotExists, e); return false; } } private boolean createNode_ignoreNodeExistsException(String path, String debugLogMessageIfExists) throws Exception { try { curator.framework().create() .creatingParentsIfNeeded() .forPath(path); return true; } catch (NodeExistsException e) { log.log(LogLevel.DEBUG, debugLogMessageIfExists, e); return false; } } @Override /** Holding an application's lock ensures the cache is up to date for that application. */ private Map<ApplicationInstanceReference, Set<HostName>> getValidCache() { long cacheGeneration = counter.get(); if (counter.get() != cacheRefreshedAt) { cacheRefreshedAt = cacheGeneration; hostsDown.clear(); } return hostsDown; } private Set<HostName> hostsDownFor(ApplicationInstanceReference application) { try { if (curator.framework().checkExists().forPath(hostsAllowedDownPath(application)) == null) return Collections.emptySet(); return curator.framework().getChildren().forPath(hostsAllowedDownPath(application)) .stream().map(HostName::new) .collect(Collectors.toUnmodifiableSet()); } catch (Exception e) { throw new RuntimeException(e); } } @Override public ApplicationInstanceStatus getApplicationInstanceStatus(ApplicationInstanceReference applicationInstanceReference) { try { Stat statOrNull = curator.framework().checkExists().forPath( applicationInstanceSuspendedPath(applicationInstanceReference)); return (statOrNull == null) ? ApplicationInstanceStatus.NO_REMARKS : ApplicationInstanceStatus.ALLOWED_TO_BE_DOWN; } catch (Exception e) { throw new RuntimeException(e); } } private static String applicationInstancePath(ApplicationInstanceReference applicationInstanceReference) { return HOST_STATUS_BASE_PATH + '/' + applicationInstanceReference.tenantId() + ":" + applicationInstanceReference.applicationInstanceId(); } private static String hostsAllowedDownPath(ApplicationInstanceReference applicationInstanceReference) { return applicationInstancePath(applicationInstanceReference) + "/hosts-allowed-down"; } private static String applicationInstanceLock2Path(ApplicationInstanceReference applicationInstanceReference) { return applicationInstancePath(applicationInstanceReference) + "/lock2"; } private String applicationInstanceSuspendedPath(ApplicationInstanceReference applicationInstanceReference) { return APPLICATION_STATUS_BASE_PATH + "/" + OrchestratorUtil.toRestApiFormat(applicationInstanceReference); } private static String hostAllowedDownPath(ApplicationInstanceReference applicationInstanceReference, HostName hostname) { return hostsAllowedDownPath(applicationInstanceReference) + '/' + hostname.s(); } private class ZkMutableStatusRegistry implements MutableStatusRegistry { private final Lock lock; private final ApplicationInstanceReference applicationInstanceReference; private final boolean probe; public ZkMutableStatusRegistry(Lock lock, ApplicationInstanceReference applicationInstanceReference, boolean probe) { this.lock = lock; this.applicationInstanceReference = applicationInstanceReference; this.probe = probe; } @Override public void setHostState(final HostName hostName, final HostStatus status) { if (probe) return; log.log(LogLevel.INFO, "Setting host " + hostName + " to status " + status); setHostStatus(applicationInstanceReference, hostName, status); } @Override public void setApplicationInstanceStatus(ApplicationInstanceStatus applicationInstanceStatus) { if (probe) return; log.log(LogLevel.INFO, "Setting app " + applicationInstanceReference.asString() + " to status " + applicationInstanceStatus); String path = applicationInstanceSuspendedPath(applicationInstanceReference); try { switch (applicationInstanceStatus) { case NO_REMARKS: deleteNode_ignoreNoNodeException(path, "Instance is already in state NO_REMARKS, path = " + path); break; case ALLOWED_TO_BE_DOWN: createNode_ignoreNodeExistsException(path, "Instance is already in state ALLOWED_TO_BE_DOWN, path = " + path); break; } } catch (Exception e) { throw new RuntimeException(e); } } @Override public void close() { try { lock.close(); } catch (RuntimeException e) { log.log(LogLevel.WARNING, "Failed to close application lock for " + ZookeeperStatusService.class.getSimpleName() + ", will ignore and continue", e); } } } }
class ZookeeperStatusService implements StatusService { private static final Logger log = Logger.getLogger(ZookeeperStatusService.class.getName()); final static String HOST_STATUS_BASE_PATH = "/vespa/host-status-service"; final static String APPLICATION_STATUS_BASE_PATH = "/vespa/application-status-service"; final static String HOST_STATUS_CACHE_COUNTER_PATH = "/vespa/host-status-service-cache-counter"; private final Curator curator; private final CuratorCounter counter; /** A cache of hosts allowed to be down. Access only through {@link private final Map<ApplicationInstanceReference, Set<HostName>> hostsDown; private volatile long cacheRefreshedAt; @Inject public ZookeeperStatusService(@Component Curator curator) { this.curator = curator; this.counter = new CuratorCounter(curator, HOST_STATUS_CACHE_COUNTER_PATH); this.cacheRefreshedAt = counter.get(); this.hostsDown = new ConcurrentHashMap<>(); } @Override public Set<ApplicationInstanceReference> getAllSuspendedApplications() { try { Set<ApplicationInstanceReference> resultSet = new HashSet<>(); Stat stat = curator.framework().checkExists().forPath(APPLICATION_STATUS_BASE_PATH); if (stat == null) return resultSet; for (String appRefStr : curator.framework().getChildren().forPath(APPLICATION_STATUS_BASE_PATH)) { ApplicationInstanceReference appRef = OrchestratorUtil.parseAppInstanceReference(appRefStr); resultSet.add(appRef); } return resultSet; } catch (Exception e) { log.log(LogLevel.DEBUG, "Something went wrong while listing out applications in suspend.", e); throw new RuntimeException(e); } } /** * Cache is checked for freshness when this mapping is created, and may be invalidated again later * by other users of the cache. Since this function is backed by the cache, any such invalidations * will be reflected in the returned mapping; all users of the cache collaborate in repopulating it. */ @Override public Function<ApplicationInstanceReference, Set<HostName>> getSuspendedHostsByApplication() { Map<ApplicationInstanceReference, Set<HostName>> suspendedHostsByApplication = getValidCache(); return application -> suspendedHostsByApplication.computeIfAbsent(application, this::hostsDownFor); } /** * 1) locks the status service for an application instance. * 2) fails all operations in this thread when the session is lost, * since session loss might cause the lock to be lost. * Since it only fails operations in this thread, * all operations depending on a lock, including the locking itself, must be done in this thread. * Note that since it is the thread that fails, all status operations in this thread will fail * even if they're not supposed to be guarded by this lock * (i.e. the request is for another applicationInstanceReference) */ @Override public MutableStatusRegistry lockApplicationInstance_forCurrentThreadOnly( OrchestratorContext context, ApplicationInstanceReference applicationInstanceReference) { Duration duration = context.getTimeLeft(); String lockPath = applicationInstanceLock2Path(applicationInstanceReference); Lock lock = new Lock(lockPath, curator); lock.acquire(duration); try { return new ZkMutableStatusRegistry(lock, applicationInstanceReference, context.isProbe()); } catch (Throwable t) { lock.close(); throw t; } } private void setHostStatus(ApplicationInstanceReference applicationInstanceReference, HostName hostName, HostStatus status) { String path = hostAllowedDownPath(applicationInstanceReference, hostName); boolean invalidate = false; try { switch (status) { case NO_REMARKS: invalidate = deleteNode_ignoreNoNodeException(path, "Host already has state NO_REMARKS, path = " + path); break; case ALLOWED_TO_BE_DOWN: invalidate = createNode_ignoreNodeExistsException(path, "Host already has state ALLOWED_TO_BE_DOWN, path = " + path); break; default: throw new IllegalArgumentException("Unexpected status '" + status + "'."); } } catch (Exception e) { invalidate = true; throw new RuntimeException(e); } finally { if (invalidate) { counter.next(); hostsDown.remove(applicationInstanceReference); } } } private boolean deleteNode_ignoreNoNodeException(String path, String debugLogMessageIfNotExists) throws Exception { try { curator.framework().delete().forPath(path); return true; } catch (NoNodeException e) { log.log(LogLevel.DEBUG, debugLogMessageIfNotExists, e); return false; } } private boolean createNode_ignoreNodeExistsException(String path, String debugLogMessageIfExists) throws Exception { try { curator.framework().create() .creatingParentsIfNeeded() .forPath(path); return true; } catch (NodeExistsException e) { log.log(LogLevel.DEBUG, debugLogMessageIfExists, e); return false; } } @Override /** Holding an application's lock ensures the cache is up to date for that application. */ private Map<ApplicationInstanceReference, Set<HostName>> getValidCache() { long cacheGeneration = counter.get(); if (counter.get() != cacheRefreshedAt) { cacheRefreshedAt = cacheGeneration; hostsDown.clear(); } return hostsDown; } private Set<HostName> hostsDownFor(ApplicationInstanceReference application) { try { if (curator.framework().checkExists().forPath(hostsAllowedDownPath(application)) == null) return Collections.emptySet(); return curator.framework().getChildren().forPath(hostsAllowedDownPath(application)) .stream().map(HostName::new) .collect(Collectors.toUnmodifiableSet()); } catch (Exception e) { throw new RuntimeException(e); } } @Override public ApplicationInstanceStatus getApplicationInstanceStatus(ApplicationInstanceReference applicationInstanceReference) { try { Stat statOrNull = curator.framework().checkExists().forPath( applicationInstanceSuspendedPath(applicationInstanceReference)); return (statOrNull == null) ? ApplicationInstanceStatus.NO_REMARKS : ApplicationInstanceStatus.ALLOWED_TO_BE_DOWN; } catch (Exception e) { throw new RuntimeException(e); } } private static String applicationInstancePath(ApplicationInstanceReference applicationInstanceReference) { return HOST_STATUS_BASE_PATH + '/' + applicationInstanceReference.tenantId() + ":" + applicationInstanceReference.applicationInstanceId(); } private static String hostsAllowedDownPath(ApplicationInstanceReference applicationInstanceReference) { return applicationInstancePath(applicationInstanceReference) + "/hosts-allowed-down"; } private static String applicationInstanceLock2Path(ApplicationInstanceReference applicationInstanceReference) { return applicationInstancePath(applicationInstanceReference) + "/lock2"; } private String applicationInstanceSuspendedPath(ApplicationInstanceReference applicationInstanceReference) { return APPLICATION_STATUS_BASE_PATH + "/" + OrchestratorUtil.toRestApiFormat(applicationInstanceReference); } private static String hostAllowedDownPath(ApplicationInstanceReference applicationInstanceReference, HostName hostname) { return hostsAllowedDownPath(applicationInstanceReference) + '/' + hostname.s(); } private class ZkMutableStatusRegistry implements MutableStatusRegistry { private final Lock lock; private final ApplicationInstanceReference applicationInstanceReference; private final boolean probe; public ZkMutableStatusRegistry(Lock lock, ApplicationInstanceReference applicationInstanceReference, boolean probe) { this.lock = lock; this.applicationInstanceReference = applicationInstanceReference; this.probe = probe; } @Override public ApplicationInstanceStatus getStatus() { return getApplicationInstanceStatus(applicationInstanceReference); } @Override public HostStatus getHostStatus(HostName hostName) { return ZookeeperStatusService.this.getHostStatus(applicationInstanceReference, hostName); } @Override public Set<HostName> getSuspendedHosts() { return getValidCache().computeIfAbsent(applicationInstanceReference, ZookeeperStatusService.this::hostsDownFor); } @Override public void setHostState(final HostName hostName, final HostStatus status) { if (probe) return; log.log(LogLevel.INFO, "Setting host " + hostName + " to status " + status); setHostStatus(applicationInstanceReference, hostName, status); } @Override public void setApplicationInstanceStatus(ApplicationInstanceStatus applicationInstanceStatus) { if (probe) return; log.log(LogLevel.INFO, "Setting app " + applicationInstanceReference.asString() + " to status " + applicationInstanceStatus); String path = applicationInstanceSuspendedPath(applicationInstanceReference); try { switch (applicationInstanceStatus) { case NO_REMARKS: deleteNode_ignoreNoNodeException(path, "Instance is already in state NO_REMARKS, path = " + path); break; case ALLOWED_TO_BE_DOWN: createNode_ignoreNodeExistsException(path, "Instance is already in state ALLOWED_TO_BE_DOWN, path = " + path); break; } } catch (Exception e) { throw new RuntimeException(e); } } @Override public void close() { try { lock.close(); } catch (RuntimeException e) { log.log(LogLevel.WARNING, "Failed to close application lock for " + ZookeeperStatusService.class.getSimpleName() + ", will ignore and continue", e); } } } }
s/Non/None
private static void restrictTlsProtocols(SSLEngine sslEngine) { String[] validProtocols = Arrays.stream(sslEngine.getSupportedProtocols()) .filter(ALLOWED_PROTOCOLS::contains) .toArray(String[]::new); if (validProtocols.length == 0) { throw new IllegalArgumentException( String.format("Non of the allowed protocols are supported (allowed-protocols=%s, supported-protocols=%s)", ALLOWED_PROTOCOLS, Arrays.toString(sslEngine.getSupportedProtocols()))); } log.log(Level.FINE, () -> String.format("Allowed protocols that are supported: %s", Arrays.toString(validProtocols))); sslEngine.setEnabledProtocols(validProtocols); }
String.format("Non of the allowed protocols are supported (allowed-protocols=%s, supported-protocols=%s)",
private static void restrictTlsProtocols(SSLEngine sslEngine) { String[] validProtocols = Arrays.stream(sslEngine.getSupportedProtocols()) .filter(ALLOWED_PROTOCOLS::contains) .toArray(String[]::new); if (validProtocols.length == 0) { throw new IllegalArgumentException( String.format("None of the allowed protocols are supported (allowed-protocols=%s, supported-protocols=%s)", ALLOWED_PROTOCOLS, Arrays.toString(sslEngine.getSupportedProtocols()))); } log.log(Level.FINE, () -> String.format("Allowed protocols that are supported: %s", Arrays.toString(validProtocols))); sslEngine.setEnabledProtocols(validProtocols); }
class DefaultTlsContext implements TlsContext { public static final List<String> ALLOWED_CIPHER_SUITES = Arrays.asList( "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256", "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256", "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", "TLS_AES_128_GCM_SHA256", "TLS_AES_256_GCM_SHA384", "TLS_CHACHA20_POLY1305_SHA256"); public static final List<String> ALLOWED_PROTOCOLS = List.of("TLSv1.2"); private static final Logger log = Logger.getLogger(DefaultTlsContext.class.getName()); private final SSLContext sslContext; private final List<String> acceptedCiphers; public DefaultTlsContext(List<X509Certificate> certificates, PrivateKey privateKey, List<X509Certificate> caCertificates, AuthorizedPeers authorizedPeers, AuthorizationMode mode, List<String> acceptedCiphers) { this.sslContext = createSslContext(certificates, privateKey, caCertificates, authorizedPeers, mode); this.acceptedCiphers = acceptedCiphers; } public DefaultTlsContext(Path tlsOptionsConfigFile, AuthorizationMode mode) { TransportSecurityOptions options = TransportSecurityOptions.fromJsonFile(tlsOptionsConfigFile); this.sslContext = createSslContext(options, mode); this.acceptedCiphers = options.getAcceptedCiphers(); } @Override public SSLEngine createSslEngine() { SSLEngine sslEngine = sslContext.createSSLEngine(); restrictSetOfEnabledCiphers(sslEngine, acceptedCiphers); restrictTlsProtocols(sslEngine); return sslEngine; } private static void restrictSetOfEnabledCiphers(SSLEngine sslEngine, List<String> acceptedCiphers) { String[] validCipherSuites = Arrays.stream(sslEngine.getSupportedCipherSuites()) .filter(suite -> ALLOWED_CIPHER_SUITES.contains(suite) && (acceptedCiphers.isEmpty() || acceptedCiphers.contains(suite))) .toArray(String[]::new); if (validCipherSuites.length == 0) { throw new IllegalStateException( String.format("None of the allowed cipher suites are supported " + "(allowed-cipher-suites=%s, supported-cipher-suites=%s, accepted-cipher-suites=%s)", ALLOWED_CIPHER_SUITES, List.of(sslEngine.getSupportedCipherSuites()), acceptedCiphers)); } log.log(Level.FINE, () -> String.format("Allowed cipher suites that are supported: %s", Arrays.toString(validCipherSuites))); sslEngine.setEnabledCipherSuites(validCipherSuites); } private static SSLContext createSslContext(List<X509Certificate> certificates, PrivateKey privateKey, List<X509Certificate> caCertificates, AuthorizedPeers authorizedPeers, AuthorizationMode mode) { SslContextBuilder builder = new SslContextBuilder(); if (!certificates.isEmpty()) { builder.withKeyStore(privateKey, certificates); } if (!caCertificates.isEmpty()) { builder.withTrustStore(caCertificates); } if (authorizedPeers != null) { builder.withTrustManagerFactory(new PeerAuthorizerTrustManagersFactory(authorizedPeers, mode)); } return builder.build(); } private static SSLContext createSslContext(TransportSecurityOptions options, AuthorizationMode mode) { SslContextBuilder builder = new SslContextBuilder(); options.getCertificatesFile() .ifPresent(certificates -> builder.withKeyStore(options.getPrivateKeyFile().get(), certificates)); options.getCaCertificatesFile().ifPresent(builder::withTrustStore); if (mode != AuthorizationMode.DISABLE) { options.getAuthorizedPeers().ifPresent( authorizedPeers -> builder.withTrustManagerFactory(new PeerAuthorizerTrustManagersFactory(authorizedPeers, mode))); } return builder.build(); } }
class DefaultTlsContext implements TlsContext { public static final List<String> ALLOWED_CIPHER_SUITES = Arrays.asList( "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256", "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256", "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", "TLS_AES_128_GCM_SHA256", "TLS_AES_256_GCM_SHA384", "TLS_CHACHA20_POLY1305_SHA256"); public static final List<String> ALLOWED_PROTOCOLS = List.of("TLSv1.2"); private static final Logger log = Logger.getLogger(DefaultTlsContext.class.getName()); private final SSLContext sslContext; private final List<String> acceptedCiphers; public DefaultTlsContext(List<X509Certificate> certificates, PrivateKey privateKey, List<X509Certificate> caCertificates, AuthorizedPeers authorizedPeers, AuthorizationMode mode, List<String> acceptedCiphers) { this.sslContext = createSslContext(certificates, privateKey, caCertificates, authorizedPeers, mode); this.acceptedCiphers = acceptedCiphers; } public DefaultTlsContext(Path tlsOptionsConfigFile, AuthorizationMode mode) { TransportSecurityOptions options = TransportSecurityOptions.fromJsonFile(tlsOptionsConfigFile); this.sslContext = createSslContext(options, mode); this.acceptedCiphers = options.getAcceptedCiphers(); } @Override public SSLEngine createSslEngine() { SSLEngine sslEngine = sslContext.createSSLEngine(); restrictSetOfEnabledCiphers(sslEngine, acceptedCiphers); restrictTlsProtocols(sslEngine); return sslEngine; } private static void restrictSetOfEnabledCiphers(SSLEngine sslEngine, List<String> acceptedCiphers) { String[] validCipherSuites = Arrays.stream(sslEngine.getSupportedCipherSuites()) .filter(suite -> ALLOWED_CIPHER_SUITES.contains(suite) && (acceptedCiphers.isEmpty() || acceptedCiphers.contains(suite))) .toArray(String[]::new); if (validCipherSuites.length == 0) { throw new IllegalStateException( String.format("None of the allowed cipher suites are supported " + "(allowed-cipher-suites=%s, supported-cipher-suites=%s, accepted-cipher-suites=%s)", ALLOWED_CIPHER_SUITES, List.of(sslEngine.getSupportedCipherSuites()), acceptedCiphers)); } log.log(Level.FINE, () -> String.format("Allowed cipher suites that are supported: %s", Arrays.toString(validCipherSuites))); sslEngine.setEnabledCipherSuites(validCipherSuites); } private static SSLContext createSslContext(List<X509Certificate> certificates, PrivateKey privateKey, List<X509Certificate> caCertificates, AuthorizedPeers authorizedPeers, AuthorizationMode mode) { SslContextBuilder builder = new SslContextBuilder(); if (!certificates.isEmpty()) { builder.withKeyStore(privateKey, certificates); } if (!caCertificates.isEmpty()) { builder.withTrustStore(caCertificates); } if (authorizedPeers != null) { builder.withTrustManagerFactory(new PeerAuthorizerTrustManagersFactory(authorizedPeers, mode)); } return builder.build(); } private static SSLContext createSslContext(TransportSecurityOptions options, AuthorizationMode mode) { SslContextBuilder builder = new SslContextBuilder(); options.getCertificatesFile() .ifPresent(certificates -> builder.withKeyStore(options.getPrivateKeyFile().get(), certificates)); options.getCaCertificatesFile().ifPresent(builder::withTrustStore); if (mode != AuthorizationMode.DISABLE) { options.getAuthorizedPeers().ifPresent( authorizedPeers -> builder.withTrustManagerFactory(new PeerAuthorizerTrustManagersFactory(authorizedPeers, mode))); } return builder.build(); } }
Indentation seems to be off.
public Optional<Node> parentOf(Node child) { return child.parentHostname() .flatMap(parentHostname -> nodes.stream() .filter(node -> node.hostname().equals(parentHostname)) .findFirst()); }
.flatMap(parentHostname -> nodes.stream()
public Optional<Node> parentOf(Node child) { return child.parentHostname() .flatMap(parentHostname -> nodes.stream() .filter(node -> node.hostname().equals(parentHostname)) .findFirst()); }
class NodeList implements Iterable<Node> { private final List<Node> nodes; public NodeList(List<Node> nodes) { this(nodes, true); } private NodeList(List<Node> nodes, boolean copy) { this.nodes = copy ? ImmutableList.copyOf(nodes) : Collections.unmodifiableList(nodes); } /** Returns the subset of nodes which are retired */ public NodeList retired() { return filter(node -> node.allocation().get().membership().retired()); } /** Returns the subset of nodes which are not retired */ public NodeList nonretired() { return filter(node -> ! node.allocation().get().membership().retired()); } /** Returns the subset of nodes of the given flavor */ public NodeList flavor(String flavor) { return filter(node -> node.flavor().name().equals(flavor)); } /** Returns the subset of nodes which does not have the given flavor */ public NodeList notFlavor(String flavor) { return filter(node -> ! node.flavor().name().equals(flavor)); } /** Returns the subset of nodes assigned to the given cluster type */ public NodeList type(ClusterSpec.Type type) { return filter(node -> node.allocation().get().membership().cluster().type().equals(type)); } /** Returns the subset of nodes owned by the given application */ public NodeList owner(ApplicationId application) { return filter(node -> node.allocation().map(a -> a.owner().equals(application)).orElse(false)); } /** Returns the subset of nodes matching the given node type */ public NodeList nodeType(NodeType nodeType) { return filter(node -> node.type() == nodeType); } /** Returns the subset of nodes that are parents */ public NodeList parents() { return filter(n -> n.parentHostname().isEmpty()); } /** Returns the child nodes of the given parent node */ public NodeList childrenOf(String hostname) { return filter(n -> n.parentHostname().map(hostname::equals).orElse(false)); } public NodeList childrenOf(Node parent) { return childrenOf(parent.hostname()); } /** Returns the subset of nodes that are in a given state */ public NodeList state(Node.State state) { return filter(node -> node.state() == state); } /** Returns the parent nodes of the given child nodes */ public NodeList parentsOf(Collection<Node> children) { return children.stream() .map(this::parentOf) .filter(Optional::isPresent) .flatMap(Optional::stream) .collect(collectingAndThen(Collectors.toList(), NodeList::nonCopyNew)); } /** Returns the parent node of the given child node */ public int size() { return nodes.size(); } /** Returns the immutable list of nodes in this */ public List<Node> asList() { return nodes; } private NodeList filter(Predicate<Node> predicate) { return nodes.stream().filter(predicate).collect(collectingAndThen(Collectors.toList(), NodeList::nonCopyNew)); } @Override public Iterator<Node> iterator() { return nodes.iterator(); } private static NodeList nonCopyNew(List<Node> nodes) { return new NodeList(nodes, false); } }
class NodeList implements Iterable<Node> { private final List<Node> nodes; public NodeList(List<Node> nodes) { this(nodes, true); } private NodeList(List<Node> nodes, boolean copy) { this.nodes = copy ? ImmutableList.copyOf(nodes) : Collections.unmodifiableList(nodes); } /** Returns the subset of nodes which are retired */ public NodeList retired() { return filter(node -> node.allocation().get().membership().retired()); } /** Returns the subset of nodes which are not retired */ public NodeList nonretired() { return filter(node -> ! node.allocation().get().membership().retired()); } /** Returns the subset of nodes of the given flavor */ public NodeList flavor(String flavor) { return filter(node -> node.flavor().name().equals(flavor)); } /** Returns the subset of nodes which does not have the given flavor */ public NodeList notFlavor(String flavor) { return filter(node -> ! node.flavor().name().equals(flavor)); } /** Returns the subset of nodes assigned to the given cluster type */ public NodeList type(ClusterSpec.Type type) { return filter(node -> node.allocation().get().membership().cluster().type().equals(type)); } /** Returns the subset of nodes owned by the given application */ public NodeList owner(ApplicationId application) { return filter(node -> node.allocation().map(a -> a.owner().equals(application)).orElse(false)); } /** Returns the subset of nodes matching the given node type */ public NodeList nodeType(NodeType nodeType) { return filter(node -> node.type() == nodeType); } /** Returns the subset of nodes that are parents */ public NodeList parents() { return filter(n -> n.parentHostname().isEmpty()); } /** Returns the child nodes of the given parent node */ public NodeList childrenOf(String hostname) { return filter(n -> n.parentHostname().map(hostname::equals).orElse(false)); } public NodeList childrenOf(Node parent) { return childrenOf(parent.hostname()); } /** Returns the subset of nodes that are in a given state */ public NodeList state(Node.State state) { return filter(node -> node.state() == state); } /** Returns the parent nodes of the given child nodes */ public NodeList parentsOf(Collection<Node> children) { return children.stream() .map(this::parentOf) .filter(Optional::isPresent) .flatMap(Optional::stream) .collect(collectingAndThen(Collectors.toList(), NodeList::wrap)); } /** Returns the parent node of the given child node */ public int size() { return nodes.size(); } /** Returns the immutable list of nodes in this */ public List<Node> asList() { return nodes; } private NodeList filter(Predicate<Node> predicate) { return nodes.stream().filter(predicate).collect(collectingAndThen(Collectors.toList(), NodeList::wrap)); } @Override public Iterator<Node> iterator() { return nodes.iterator(); } private static NodeList wrap(List<Node> nodes) { return new NodeList(nodes, false); } }
Tester should be initialised in a `@Before` method, then each test can use that instead of repeating the code.
public void relocate_nodes_from_spare_hosts() { ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).flavorsConfig(flavorsConfig()).build(); tester.makeReadyNodes(4, "host-small", NodeType.host, 32); deployZoneApp(tester); List<Node> dockerHosts = tester.nodeRepository().getNodes(NodeType.host, Node.State.active); Flavor flavor = tester.nodeRepository().getAvailableFlavors().getFlavorOrThrow("d-1"); ApplicationId application1 = makeApplicationId("t1", "a1"); ClusterSpec clusterSpec1 = clusterSpec("myContent.t1.a1"); addAndAssignNode(application1, "1a", dockerHosts.get(0).hostname(), clusterSpec1, flavor, 0, tester); addAndAssignNode(application1, "1b", dockerHosts.get(1).hostname(), clusterSpec1, flavor, 1, tester); ApplicationId application2 = makeApplicationId("t2", "a2"); ClusterSpec clusterSpec2 = clusterSpec("myContent.t2.a2"); addAndAssignNode(application2, "2a", dockerHosts.get(2).hostname(), clusterSpec2, flavor, 0, tester); addAndAssignNode(application2, "2b", dockerHosts.get(3).hostname(), clusterSpec2, flavor, 1, tester); deployApp(application1, clusterSpec1, flavor, tester, 2); deployApp(application2, clusterSpec2, flavor, tester, 2); Set<String> hostsWithChildren = new HashSet<>(); for (Node node : tester.nodeRepository().getNodes(NodeType.tenant, Node.State.active)) { if (!isInactiveOrRetired(node)) { hostsWithChildren.add(node.parentHostname().get()); } } assertEquals(4 - tester.provisioner().getSpareCapacityProd(), hostsWithChildren.size()); }
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).flavorsConfig(flavorsConfig()).build();
public void relocate_nodes_from_spare_hosts() { ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).flavorsConfig(flavorsConfig()).build(); tester.makeReadyNodes(4, "host-small", NodeType.host, 32); deployZoneApp(tester); List<Node> dockerHosts = tester.nodeRepository().getNodes(NodeType.host, Node.State.active); Flavor flavor = tester.nodeRepository().getAvailableFlavors().getFlavorOrThrow("d-1"); ApplicationId application1 = makeApplicationId("t1", "a1"); ClusterSpec clusterSpec1 = clusterSpec("myContent.t1.a1"); addAndAssignNode(application1, "1a", dockerHosts.get(0).hostname(), clusterSpec1, flavor, 0, tester); addAndAssignNode(application1, "1b", dockerHosts.get(1).hostname(), clusterSpec1, flavor, 1, tester); ApplicationId application2 = makeApplicationId("t2", "a2"); ClusterSpec clusterSpec2 = clusterSpec("myContent.t2.a2"); addAndAssignNode(application2, "2a", dockerHosts.get(2).hostname(), clusterSpec2, flavor, 0, tester); addAndAssignNode(application2, "2b", dockerHosts.get(3).hostname(), clusterSpec2, flavor, 1, tester); deployApp(application1, clusterSpec1, flavor, tester, 2); deployApp(application2, clusterSpec2, flavor, tester, 2); Set<String> hostsWithChildren = new HashSet<>(); for (Node node : tester.nodeRepository().getNodes(NodeType.tenant, Node.State.active)) { if (!isInactiveOrRetired(node)) { hostsWithChildren.add(node.parentHostname().get()); } } assertEquals(4 - tester.provisioner().getSpareCapacityProd(), hostsWithChildren.size()); }
class DynamicDockerProvisioningTest { /** * Test relocation of nodes from spare hosts. * <p> * Setup 4 docker hosts and allocate one container on each (from two different applications) * getSpareCapacityProd() spares. * <p> * Check that it relocates containers away from the getSpareCapacityProd() spares * <p> * Initial allocation of app 1 and 2 --> final allocation (example using 2 spares): * <p> * | | | | | | | | | | * | | | | | --> | 2a | 2b | | | * | 1a | 1b | 2a | 2b | | 1a | 1b | | | */ @Test /** * Test an allocation workflow: * <p> * 5 Hosts of capacity 3 (2 spares) * - Allocate app with 3 nodes * - Allocate app with 2 nodes * - Fail host and check redistribution */ @Test public void relocate_failed_nodes() { ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).flavorsConfig(flavorsConfig()).build(); tester.makeReadyNodes(5, "host-small", NodeType.host, 32); deployZoneApp(tester); List<Node> dockerHosts = tester.nodeRepository().getNodes(NodeType.host, Node.State.active); Flavor flavor = tester.nodeRepository().getAvailableFlavors().getFlavorOrThrow("d-1"); ApplicationId application1 = makeApplicationId("t1", "a1"); ClusterSpec clusterSpec1 = clusterSpec("myContent.t1.a1"); deployApp(application1, clusterSpec1, flavor, tester, 3); ApplicationId application2 = makeApplicationId("t2", "a2"); ClusterSpec clusterSpec2 = clusterSpec("myContent.t2.a2"); deployApp(application2, clusterSpec2, flavor, tester, 2); ApplicationId application3 = makeApplicationId("t3", "a3"); ClusterSpec clusterSpec3 = clusterSpec("myContent.t3.a3"); deployApp(application3, clusterSpec3, flavor, tester, 2); String parent = tester.nodeRepository().getNodes(application2).stream().findAny().get().parentHostname().get(); tester.nodeRepository().failRecursively(parent, Agent.system, "Testing"); deployApp(application1, clusterSpec1, flavor, tester, 3); deployApp(application2, clusterSpec2, flavor, tester, 2); deployApp(application3, clusterSpec3, flavor, tester, 2); Map<Integer, Integer> numberOfChildrenStat = new HashMap<>(); for (Node node : dockerHosts) { int nofChildren = tester.nodeRepository().list().childrenOf(node).size(); if (!numberOfChildrenStat.containsKey(nofChildren)) { numberOfChildrenStat.put(nofChildren, 0); } numberOfChildrenStat.put(nofChildren, numberOfChildrenStat.get(nofChildren) + 1); } assertEquals(3, numberOfChildrenStat.get(3).intValue()); assertEquals(1, numberOfChildrenStat.get(0).intValue()); assertEquals(1, numberOfChildrenStat.get(1).intValue()); } /** * Test redeployment of nodes that violates spare headroom - but without alternatives * <p> * Setup 2 docker hosts and allocate one app with a container on each. 2 spares * <p> * Initial allocation of app 1 --> final allocation: * <p> * | | | | | | * | | | --> | | | * | 1a | 1b | | 1a | 1b | */ @Test public void do_not_relocate_nodes_from_spare_if_no_where_to_relocate_them() { ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).flavorsConfig(flavorsConfig()).build(); tester.makeReadyNodes(2, "host-small", NodeType.host, 32); deployZoneApp(tester); List<Node> dockerHosts = tester.nodeRepository().getNodes(NodeType.host, Node.State.active); Flavor flavor = tester.nodeRepository().getAvailableFlavors().getFlavorOrThrow("d-1"); ApplicationId application1 = makeApplicationId("t1", "a1"); ClusterSpec clusterSpec1 = clusterSpec("myContent.t1.a1"); addAndAssignNode(application1, "1a", dockerHosts.get(0).hostname(), clusterSpec1, flavor, 0, tester); addAndAssignNode(application1, "1b", dockerHosts.get(1).hostname(), clusterSpec1, flavor, 1, tester); deployApp(application1, clusterSpec1, flavor, tester, 2); Set<String> hostsWithChildren = new HashSet<>(); for (Node node : tester.nodeRepository().getNodes(NodeType.tenant, Node.State.active)) { if (!isInactiveOrRetired(node)) { hostsWithChildren.add(node.parentHostname().get()); } } assertEquals(2, hostsWithChildren.size()); } @Test(expected = OutOfCapacityException.class) public void multiple_groups_are_on_separate_parent_hosts() { ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).flavorsConfig(flavorsConfig()).build(); tester.makeReadyNodes(5, "host-small", NodeType.host, 32); deployZoneApp(tester); Flavor flavor = tester.nodeRepository().getAvailableFlavors().getFlavorOrThrow("d-1"); ApplicationId application1 = tester.makeApplicationId(); tester.prepare(application1, clusterSpec("myContent.t1.a1"), 6, 2, flavor.canonicalName()); fail("Two groups have been allocated to the same parent host"); } @Ignore @Test public void spare_capacity_used_only_when_replacement() { ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).flavorsConfig(flavorsConfig()).build(); ApplicationId application1 = tester.makeApplicationId(); tester.makeReadyNodes(5, "host-small", NodeType.host, 32); deployZoneApp(tester); Flavor flavor = tester.nodeRepository().getAvailableFlavors().getFlavorOrThrow("d-3"); ClusterSpec clusterSpec = clusterSpec("myContent.t1.a1"); List<HostSpec> hosts = tester.prepare(application1, clusterSpec, 3, 1, flavor.canonicalName()); tester.activate(application1, ImmutableSet.copyOf(hosts)); DockerHostCapacity capacity = new DockerHostCapacity(tester.nodeRepository().getNodes(Node.State.values())); assertThat(capacity.freeCapacityInFlavorEquivalence(flavor), greaterThan(0)); List<Node> initialSpareCapacity = findSpareCapacity(tester); assertThat(initialSpareCapacity.size(), is(2)); try { hosts = tester.prepare(application1, clusterSpec, 4, 1, flavor.canonicalName()); fail("Was able to deploy with 4 nodes, should not be able to use spare capacity"); } catch (OutOfCapacityException e) { } tester.fail(hosts.get(0)); hosts = tester.prepare(application1, clusterSpec, 3, 1, flavor.canonicalName()); tester.activate(application1, ImmutableSet.copyOf(hosts)); List<Node> finalSpareCapacity = findSpareCapacity(tester); assertThat(finalSpareCapacity.size(), is(1)); } @Test public void non_prod_zones_do_not_have_spares() { ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.perf, RegionName.from("us-east"))).flavorsConfig(flavorsConfig()).build(); tester.makeReadyNodes(3, "host-small", NodeType.host, 32); deployZoneApp(tester); Flavor flavor = tester.nodeRepository().getAvailableFlavors().getFlavorOrThrow("d-3"); ApplicationId application1 = tester.makeApplicationId(); List<HostSpec> hosts = tester.prepare(application1, clusterSpec("myContent.t1.a1"), 3, 1, flavor.canonicalName()); tester.activate(application1, ImmutableSet.copyOf(hosts)); List<Node> initialSpareCapacity = findSpareCapacity(tester); assertThat(initialSpareCapacity.size(), is(0)); } @Test(expected = OutOfCapacityException.class) public void allocation_should_fail_when_host_is_not_active() { ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).flavorsConfig(flavorsConfig()).build(); tester.makeProvisionedNodes(3, "host-small", NodeType.host, 32); deployZoneApp(tester); ApplicationId application = tester.makeApplicationId(); Flavor flavor = tester.nodeRepository().getAvailableFlavors().getFlavorOrThrow("d-3"); tester.prepare(application, clusterSpec("myContent.t2.a2"), 2, 1, flavor.canonicalName()); } @Test public void provision_dual_stack_containers() { ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).flavorsConfig(flavorsConfig()).build(); tester.makeReadyNodes(2, "host-large", NodeType.host, 10, true); deployZoneApp(tester); ApplicationId application = tester.makeApplicationId(); Flavor flavor = tester.nodeRepository().getAvailableFlavors().getFlavorOrThrow("d-3"); List<HostSpec> hosts = tester.prepare(application, clusterSpec("myContent.t1.a1"), 2, 1, flavor.canonicalName()); tester.activate(application, hosts); List<Node> activeNodes = tester.nodeRepository().getNodes(application); assertEquals(ImmutableSet.of("127.0.127.12", "::12"), activeNodes.get(0).ipAddresses()); assertEquals(ImmutableSet.of("127.0.127.2", "::2"), activeNodes.get(1).ipAddresses()); } private ApplicationId makeApplicationId(String tenant, String appName) { return ApplicationId.from(tenant, appName, "default"); } private void deployApp(ApplicationId id, ClusterSpec spec, Flavor flavor, ProvisioningTester tester, int nodeCount) { List<HostSpec> hostSpec = tester.prepare(id, spec, nodeCount, 1, flavor.canonicalName()); tester.activate(id, new HashSet<>(hostSpec)); } private void addAndAssignNode(ApplicationId id, String hostname, String parentHostname, ClusterSpec clusterSpec, Flavor flavor, int index, ProvisioningTester tester) { Node node1a = Node.create("open1", Collections.singleton("127.0.0.100"), new HashSet<>(), hostname, Optional.of(parentHostname), flavor, NodeType.tenant); ClusterMembership clusterMembership1 = ClusterMembership.from( clusterSpec.with(Optional.of(ClusterSpec.Group.from(0))), index); Node node1aAllocation = node1a.allocate(id, clusterMembership1, Instant.now()); tester.nodeRepository().addNodes(Collections.singletonList(node1aAllocation)); NestedTransaction transaction = new NestedTransaction().add(new CuratorTransaction(tester.getCurator())); tester.nodeRepository().activate(Collections.singletonList(node1aAllocation), transaction); transaction.commit(); } private List<Node> findSpareCapacity(ProvisioningTester tester) { List<Node> nodes = tester.nodeRepository().getNodes(Node.State.values()); NodeList nl = new NodeList(nodes); return nodes.stream() .filter(n -> n.type() == NodeType.host) .filter(n -> nl.childrenOf(n).size() == 0) .collect(Collectors.toList()); } private FlavorsConfig flavorsConfig() { FlavorConfigBuilder b = new FlavorConfigBuilder(); b.addFlavor("host-large", 6., 6., 6, Flavor.Type.BARE_METAL); b.addFlavor("host-small", 3., 3., 3, Flavor.Type.BARE_METAL); b.addFlavor("host-medium", 4., 4., 4, Flavor.Type.BARE_METAL); b.addFlavor("d-1", 1, 1., 1, Flavor.Type.DOCKER_CONTAINER); b.addFlavor("d-2", 2, 2., 2, Flavor.Type.DOCKER_CONTAINER); b.addFlavor("d-3", 3, 3., 3, Flavor.Type.DOCKER_CONTAINER); b.addFlavor("d-3-disk", 3, 3., 5, Flavor.Type.DOCKER_CONTAINER); b.addFlavor("d-3-mem", 3, 5., 3, Flavor.Type.DOCKER_CONTAINER); b.addFlavor("d-3-cpu", 5, 3., 3, Flavor.Type.DOCKER_CONTAINER); return b.build(); } private void deployZoneApp(ProvisioningTester tester) { ApplicationId applicationId = tester.makeApplicationId(); List<HostSpec> list = tester.prepare(applicationId, ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("node-admin"), Version.fromString("6.42"), false, Collections.emptySet()), Capacity.fromRequiredNodeType(NodeType.host), 1); tester.activate(applicationId, ImmutableSet.copyOf(list)); } private boolean isInactiveOrRetired(Node node) { boolean isInactive = node.state().equals(Node.State.inactive); boolean isRetired = false; if (node.allocation().isPresent()) { isRetired = node.allocation().get().membership().retired(); } return isInactive || isRetired; } private ClusterSpec clusterSpec(String clusterId) { return ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from(clusterId), Version.fromString("6.42"), false, Collections.emptySet()); } }
class DynamicDockerProvisioningTest { /** * Test relocation of nodes from spare hosts. * <p> * Setup 4 docker hosts and allocate one container on each (from two different applications) * getSpareCapacityProd() spares. * <p> * Check that it relocates containers away from the getSpareCapacityProd() spares * <p> * Initial allocation of app 1 and 2 --> final allocation (example using 2 spares): * <p> * | | | | | | | | | | * | | | | | --> | 2a | 2b | | | * | 1a | 1b | 2a | 2b | | 1a | 1b | | | */ @Test /** * Test an allocation workflow: * <p> * 5 Hosts of capacity 3 (2 spares) * - Allocate app with 3 nodes * - Allocate app with 2 nodes * - Fail host and check redistribution */ @Test public void relocate_failed_nodes() { ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).flavorsConfig(flavorsConfig()).build(); tester.makeReadyNodes(5, "host-small", NodeType.host, 32); deployZoneApp(tester); List<Node> dockerHosts = tester.nodeRepository().getNodes(NodeType.host, Node.State.active); Flavor flavor = tester.nodeRepository().getAvailableFlavors().getFlavorOrThrow("d-1"); ApplicationId application1 = makeApplicationId("t1", "a1"); ClusterSpec clusterSpec1 = clusterSpec("myContent.t1.a1"); deployApp(application1, clusterSpec1, flavor, tester, 3); ApplicationId application2 = makeApplicationId("t2", "a2"); ClusterSpec clusterSpec2 = clusterSpec("myContent.t2.a2"); deployApp(application2, clusterSpec2, flavor, tester, 2); ApplicationId application3 = makeApplicationId("t3", "a3"); ClusterSpec clusterSpec3 = clusterSpec("myContent.t3.a3"); deployApp(application3, clusterSpec3, flavor, tester, 2); String parent = tester.nodeRepository().getNodes(application2).stream().findAny().get().parentHostname().get(); tester.nodeRepository().failRecursively(parent, Agent.system, "Testing"); deployApp(application1, clusterSpec1, flavor, tester, 3); deployApp(application2, clusterSpec2, flavor, tester, 2); deployApp(application3, clusterSpec3, flavor, tester, 2); Map<Integer, Integer> numberOfChildrenStat = new HashMap<>(); for (Node node : dockerHosts) { int nofChildren = tester.nodeRepository().list().childrenOf(node).size(); if (!numberOfChildrenStat.containsKey(nofChildren)) { numberOfChildrenStat.put(nofChildren, 0); } numberOfChildrenStat.put(nofChildren, numberOfChildrenStat.get(nofChildren) + 1); } assertEquals(3, numberOfChildrenStat.get(3).intValue()); assertEquals(1, numberOfChildrenStat.get(0).intValue()); assertEquals(1, numberOfChildrenStat.get(1).intValue()); } /** * Test redeployment of nodes that violates spare headroom - but without alternatives * <p> * Setup 2 docker hosts and allocate one app with a container on each. 2 spares * <p> * Initial allocation of app 1 --> final allocation: * <p> * | | | | | | * | | | --> | | | * | 1a | 1b | | 1a | 1b | */ @Test public void do_not_relocate_nodes_from_spare_if_no_where_to_relocate_them() { ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).flavorsConfig(flavorsConfig()).build(); tester.makeReadyNodes(2, "host-small", NodeType.host, 32); deployZoneApp(tester); List<Node> dockerHosts = tester.nodeRepository().getNodes(NodeType.host, Node.State.active); Flavor flavor = tester.nodeRepository().getAvailableFlavors().getFlavorOrThrow("d-1"); ApplicationId application1 = makeApplicationId("t1", "a1"); ClusterSpec clusterSpec1 = clusterSpec("myContent.t1.a1"); addAndAssignNode(application1, "1a", dockerHosts.get(0).hostname(), clusterSpec1, flavor, 0, tester); addAndAssignNode(application1, "1b", dockerHosts.get(1).hostname(), clusterSpec1, flavor, 1, tester); deployApp(application1, clusterSpec1, flavor, tester, 2); Set<String> hostsWithChildren = new HashSet<>(); for (Node node : tester.nodeRepository().getNodes(NodeType.tenant, Node.State.active)) { if (!isInactiveOrRetired(node)) { hostsWithChildren.add(node.parentHostname().get()); } } assertEquals(2, hostsWithChildren.size()); } @Test(expected = OutOfCapacityException.class) public void multiple_groups_are_on_separate_parent_hosts() { ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).flavorsConfig(flavorsConfig()).build(); tester.makeReadyNodes(5, "host-small", NodeType.host, 32); deployZoneApp(tester); Flavor flavor = tester.nodeRepository().getAvailableFlavors().getFlavorOrThrow("d-1"); ApplicationId application1 = tester.makeApplicationId(); tester.prepare(application1, clusterSpec("myContent.t1.a1"), 6, 2, flavor.canonicalName()); fail("Two groups have been allocated to the same parent host"); } @Ignore @Test public void spare_capacity_used_only_when_replacement() { ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).flavorsConfig(flavorsConfig()).build(); ApplicationId application1 = tester.makeApplicationId(); tester.makeReadyNodes(5, "host-small", NodeType.host, 32); deployZoneApp(tester); Flavor flavor = tester.nodeRepository().getAvailableFlavors().getFlavorOrThrow("d-3"); ClusterSpec clusterSpec = clusterSpec("myContent.t1.a1"); List<HostSpec> hosts = tester.prepare(application1, clusterSpec, 3, 1, flavor.canonicalName()); tester.activate(application1, ImmutableSet.copyOf(hosts)); DockerHostCapacity capacity = new DockerHostCapacity(tester.nodeRepository().getNodes(Node.State.values())); assertThat(capacity.freeCapacityInFlavorEquivalence(flavor), greaterThan(0)); List<Node> initialSpareCapacity = findSpareCapacity(tester); assertThat(initialSpareCapacity.size(), is(2)); try { hosts = tester.prepare(application1, clusterSpec, 4, 1, flavor.canonicalName()); fail("Was able to deploy with 4 nodes, should not be able to use spare capacity"); } catch (OutOfCapacityException e) { } tester.fail(hosts.get(0)); hosts = tester.prepare(application1, clusterSpec, 3, 1, flavor.canonicalName()); tester.activate(application1, ImmutableSet.copyOf(hosts)); List<Node> finalSpareCapacity = findSpareCapacity(tester); assertThat(finalSpareCapacity.size(), is(1)); } @Test public void non_prod_zones_do_not_have_spares() { ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.perf, RegionName.from("us-east"))).flavorsConfig(flavorsConfig()).build(); tester.makeReadyNodes(3, "host-small", NodeType.host, 32); deployZoneApp(tester); Flavor flavor = tester.nodeRepository().getAvailableFlavors().getFlavorOrThrow("d-3"); ApplicationId application1 = tester.makeApplicationId(); List<HostSpec> hosts = tester.prepare(application1, clusterSpec("myContent.t1.a1"), 3, 1, flavor.canonicalName()); tester.activate(application1, ImmutableSet.copyOf(hosts)); List<Node> initialSpareCapacity = findSpareCapacity(tester); assertThat(initialSpareCapacity.size(), is(0)); } @Test(expected = OutOfCapacityException.class) public void allocation_should_fail_when_host_is_not_active() { ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).flavorsConfig(flavorsConfig()).build(); tester.makeProvisionedNodes(3, "host-small", NodeType.host, 32); deployZoneApp(tester); ApplicationId application = tester.makeApplicationId(); Flavor flavor = tester.nodeRepository().getAvailableFlavors().getFlavorOrThrow("d-3"); tester.prepare(application, clusterSpec("myContent.t2.a2"), 2, 1, flavor.canonicalName()); } @Test public void provision_dual_stack_containers() { ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).flavorsConfig(flavorsConfig()).build(); tester.makeReadyNodes(2, "host-large", NodeType.host, 10, true); deployZoneApp(tester); ApplicationId application = tester.makeApplicationId(); Flavor flavor = tester.nodeRepository().getAvailableFlavors().getFlavorOrThrow("d-3"); List<HostSpec> hosts = tester.prepare(application, clusterSpec("myContent.t1.a1"), 2, 1, flavor.canonicalName()); tester.activate(application, hosts); List<Node> activeNodes = tester.nodeRepository().getNodes(application); assertEquals(ImmutableSet.of("127.0.127.12", "::12"), activeNodes.get(0).ipAddresses()); assertEquals(ImmutableSet.of("127.0.127.2", "::2"), activeNodes.get(1).ipAddresses()); } private ApplicationId makeApplicationId(String tenant, String appName) { return ApplicationId.from(tenant, appName, "default"); } private void deployApp(ApplicationId id, ClusterSpec spec, Flavor flavor, ProvisioningTester tester, int nodeCount) { List<HostSpec> hostSpec = tester.prepare(id, spec, nodeCount, 1, flavor.canonicalName()); tester.activate(id, new HashSet<>(hostSpec)); } private void addAndAssignNode(ApplicationId id, String hostname, String parentHostname, ClusterSpec clusterSpec, Flavor flavor, int index, ProvisioningTester tester) { Node node1a = Node.create("open1", Collections.singleton("127.0.0.100"), new HashSet<>(), hostname, Optional.of(parentHostname), flavor, NodeType.tenant); ClusterMembership clusterMembership1 = ClusterMembership.from( clusterSpec.with(Optional.of(ClusterSpec.Group.from(0))), index); Node node1aAllocation = node1a.allocate(id, clusterMembership1, Instant.now()); tester.nodeRepository().addNodes(Collections.singletonList(node1aAllocation)); NestedTransaction transaction = new NestedTransaction().add(new CuratorTransaction(tester.getCurator())); tester.nodeRepository().activate(Collections.singletonList(node1aAllocation), transaction); transaction.commit(); } private List<Node> findSpareCapacity(ProvisioningTester tester) { List<Node> nodes = tester.nodeRepository().getNodes(Node.State.values()); NodeList nl = new NodeList(nodes); return nodes.stream() .filter(n -> n.type() == NodeType.host) .filter(n -> nl.childrenOf(n).size() == 0) .collect(Collectors.toList()); } private FlavorsConfig flavorsConfig() { FlavorConfigBuilder b = new FlavorConfigBuilder(); b.addFlavor("host-large", 6., 6., 6, Flavor.Type.BARE_METAL); b.addFlavor("host-small", 3., 3., 3, Flavor.Type.BARE_METAL); b.addFlavor("host-medium", 4., 4., 4, Flavor.Type.BARE_METAL); b.addFlavor("d-1", 1, 1., 1, Flavor.Type.DOCKER_CONTAINER); b.addFlavor("d-2", 2, 2., 2, Flavor.Type.DOCKER_CONTAINER); b.addFlavor("d-3", 3, 3., 3, Flavor.Type.DOCKER_CONTAINER); b.addFlavor("d-3-disk", 3, 3., 5, Flavor.Type.DOCKER_CONTAINER); b.addFlavor("d-3-mem", 3, 5., 3, Flavor.Type.DOCKER_CONTAINER); b.addFlavor("d-3-cpu", 5, 3., 3, Flavor.Type.DOCKER_CONTAINER); return b.build(); } private void deployZoneApp(ProvisioningTester tester) { ApplicationId applicationId = tester.makeApplicationId(); List<HostSpec> list = tester.prepare(applicationId, ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("node-admin"), Version.fromString("6.42"), false, Collections.emptySet()), Capacity.fromRequiredNodeType(NodeType.host), 1); tester.activate(applicationId, ImmutableSet.copyOf(list)); } private boolean isInactiveOrRetired(Node node) { boolean isInactive = node.state().equals(Node.State.inactive); boolean isRetired = false; if (node.allocation().isPresent()) { isRetired = node.allocation().get().membership().retired(); } return isInactive || isRetired; } private ClusterSpec clusterSpec(String clusterId) { return ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from(clusterId), Version.fromString("6.42"), false, Collections.emptySet()); } }
Ahh, how do you get it to align with the dots? This is my default IntelliJ indentation which is 8 spaces off statement.
public Optional<Node> parentOf(Node child) { return child.parentHostname() .flatMap(parentHostname -> nodes.stream() .filter(node -> node.hostname().equals(parentHostname)) .findFirst()); }
.flatMap(parentHostname -> nodes.stream()
public Optional<Node> parentOf(Node child) { return child.parentHostname() .flatMap(parentHostname -> nodes.stream() .filter(node -> node.hostname().equals(parentHostname)) .findFirst()); }
class NodeList implements Iterable<Node> { private final List<Node> nodes; public NodeList(List<Node> nodes) { this(nodes, true); } private NodeList(List<Node> nodes, boolean copy) { this.nodes = copy ? ImmutableList.copyOf(nodes) : Collections.unmodifiableList(nodes); } /** Returns the subset of nodes which are retired */ public NodeList retired() { return filter(node -> node.allocation().get().membership().retired()); } /** Returns the subset of nodes which are not retired */ public NodeList nonretired() { return filter(node -> ! node.allocation().get().membership().retired()); } /** Returns the subset of nodes of the given flavor */ public NodeList flavor(String flavor) { return filter(node -> node.flavor().name().equals(flavor)); } /** Returns the subset of nodes which does not have the given flavor */ public NodeList notFlavor(String flavor) { return filter(node -> ! node.flavor().name().equals(flavor)); } /** Returns the subset of nodes assigned to the given cluster type */ public NodeList type(ClusterSpec.Type type) { return filter(node -> node.allocation().get().membership().cluster().type().equals(type)); } /** Returns the subset of nodes owned by the given application */ public NodeList owner(ApplicationId application) { return filter(node -> node.allocation().map(a -> a.owner().equals(application)).orElse(false)); } /** Returns the subset of nodes matching the given node type */ public NodeList nodeType(NodeType nodeType) { return filter(node -> node.type() == nodeType); } /** Returns the subset of nodes that are parents */ public NodeList parents() { return filter(n -> n.parentHostname().isEmpty()); } /** Returns the child nodes of the given parent node */ public NodeList childrenOf(String hostname) { return filter(n -> n.parentHostname().map(hostname::equals).orElse(false)); } public NodeList childrenOf(Node parent) { return childrenOf(parent.hostname()); } /** Returns the subset of nodes that are in a given state */ public NodeList state(Node.State state) { return filter(node -> node.state() == state); } /** Returns the parent nodes of the given child nodes */ public NodeList parentsOf(Collection<Node> children) { return children.stream() .map(this::parentOf) .filter(Optional::isPresent) .flatMap(Optional::stream) .collect(collectingAndThen(Collectors.toList(), NodeList::nonCopyNew)); } /** Returns the parent node of the given child node */ public int size() { return nodes.size(); } /** Returns the immutable list of nodes in this */ public List<Node> asList() { return nodes; } private NodeList filter(Predicate<Node> predicate) { return nodes.stream().filter(predicate).collect(collectingAndThen(Collectors.toList(), NodeList::nonCopyNew)); } @Override public Iterator<Node> iterator() { return nodes.iterator(); } private static NodeList nonCopyNew(List<Node> nodes) { return new NodeList(nodes, false); } }
class NodeList implements Iterable<Node> { private final List<Node> nodes; public NodeList(List<Node> nodes) { this(nodes, true); } private NodeList(List<Node> nodes, boolean copy) { this.nodes = copy ? ImmutableList.copyOf(nodes) : Collections.unmodifiableList(nodes); } /** Returns the subset of nodes which are retired */ public NodeList retired() { return filter(node -> node.allocation().get().membership().retired()); } /** Returns the subset of nodes which are not retired */ public NodeList nonretired() { return filter(node -> ! node.allocation().get().membership().retired()); } /** Returns the subset of nodes of the given flavor */ public NodeList flavor(String flavor) { return filter(node -> node.flavor().name().equals(flavor)); } /** Returns the subset of nodes which does not have the given flavor */ public NodeList notFlavor(String flavor) { return filter(node -> ! node.flavor().name().equals(flavor)); } /** Returns the subset of nodes assigned to the given cluster type */ public NodeList type(ClusterSpec.Type type) { return filter(node -> node.allocation().get().membership().cluster().type().equals(type)); } /** Returns the subset of nodes owned by the given application */ public NodeList owner(ApplicationId application) { return filter(node -> node.allocation().map(a -> a.owner().equals(application)).orElse(false)); } /** Returns the subset of nodes matching the given node type */ public NodeList nodeType(NodeType nodeType) { return filter(node -> node.type() == nodeType); } /** Returns the subset of nodes that are parents */ public NodeList parents() { return filter(n -> n.parentHostname().isEmpty()); } /** Returns the child nodes of the given parent node */ public NodeList childrenOf(String hostname) { return filter(n -> n.parentHostname().map(hostname::equals).orElse(false)); } public NodeList childrenOf(Node parent) { return childrenOf(parent.hostname()); } /** Returns the subset of nodes that are in a given state */ public NodeList state(Node.State state) { return filter(node -> node.state() == state); } /** Returns the parent nodes of the given child nodes */ public NodeList parentsOf(Collection<Node> children) { return children.stream() .map(this::parentOf) .filter(Optional::isPresent) .flatMap(Optional::stream) .collect(collectingAndThen(Collectors.toList(), NodeList::wrap)); } /** Returns the parent node of the given child node */ public int size() { return nodes.size(); } /** Returns the immutable list of nodes in this */ public List<Node> asList() { return nodes; } private NodeList filter(Predicate<Node> predicate) { return nodes.stream().filter(predicate).collect(collectingAndThen(Collectors.toList(), NodeList::wrap)); } @Override public Iterator<Node> iterator() { return nodes.iterator(); } private static NodeList wrap(List<Node> nodes) { return new NodeList(nodes, false); } }
I think it's Editor -> Code Style -> Java -> Wrapping and Braces -> Chained Method Calls -> Align when multiline.
public Optional<Node> parentOf(Node child) { return child.parentHostname() .flatMap(parentHostname -> nodes.stream() .filter(node -> node.hostname().equals(parentHostname)) .findFirst()); }
.flatMap(parentHostname -> nodes.stream()
public Optional<Node> parentOf(Node child) { return child.parentHostname() .flatMap(parentHostname -> nodes.stream() .filter(node -> node.hostname().equals(parentHostname)) .findFirst()); }
class NodeList implements Iterable<Node> { private final List<Node> nodes; public NodeList(List<Node> nodes) { this(nodes, true); } private NodeList(List<Node> nodes, boolean copy) { this.nodes = copy ? ImmutableList.copyOf(nodes) : Collections.unmodifiableList(nodes); } /** Returns the subset of nodes which are retired */ public NodeList retired() { return filter(node -> node.allocation().get().membership().retired()); } /** Returns the subset of nodes which are not retired */ public NodeList nonretired() { return filter(node -> ! node.allocation().get().membership().retired()); } /** Returns the subset of nodes of the given flavor */ public NodeList flavor(String flavor) { return filter(node -> node.flavor().name().equals(flavor)); } /** Returns the subset of nodes which does not have the given flavor */ public NodeList notFlavor(String flavor) { return filter(node -> ! node.flavor().name().equals(flavor)); } /** Returns the subset of nodes assigned to the given cluster type */ public NodeList type(ClusterSpec.Type type) { return filter(node -> node.allocation().get().membership().cluster().type().equals(type)); } /** Returns the subset of nodes owned by the given application */ public NodeList owner(ApplicationId application) { return filter(node -> node.allocation().map(a -> a.owner().equals(application)).orElse(false)); } /** Returns the subset of nodes matching the given node type */ public NodeList nodeType(NodeType nodeType) { return filter(node -> node.type() == nodeType); } /** Returns the subset of nodes that are parents */ public NodeList parents() { return filter(n -> n.parentHostname().isEmpty()); } /** Returns the child nodes of the given parent node */ public NodeList childrenOf(String hostname) { return filter(n -> n.parentHostname().map(hostname::equals).orElse(false)); } public NodeList childrenOf(Node parent) { return childrenOf(parent.hostname()); } /** Returns the subset of nodes that are in a given state */ public NodeList state(Node.State state) { return filter(node -> node.state() == state); } /** Returns the parent nodes of the given child nodes */ public NodeList parentsOf(Collection<Node> children) { return children.stream() .map(this::parentOf) .filter(Optional::isPresent) .flatMap(Optional::stream) .collect(collectingAndThen(Collectors.toList(), NodeList::nonCopyNew)); } /** Returns the parent node of the given child node */ public int size() { return nodes.size(); } /** Returns the immutable list of nodes in this */ public List<Node> asList() { return nodes; } private NodeList filter(Predicate<Node> predicate) { return nodes.stream().filter(predicate).collect(collectingAndThen(Collectors.toList(), NodeList::nonCopyNew)); } @Override public Iterator<Node> iterator() { return nodes.iterator(); } private static NodeList nonCopyNew(List<Node> nodes) { return new NodeList(nodes, false); } }
class NodeList implements Iterable<Node> { private final List<Node> nodes; public NodeList(List<Node> nodes) { this(nodes, true); } private NodeList(List<Node> nodes, boolean copy) { this.nodes = copy ? ImmutableList.copyOf(nodes) : Collections.unmodifiableList(nodes); } /** Returns the subset of nodes which are retired */ public NodeList retired() { return filter(node -> node.allocation().get().membership().retired()); } /** Returns the subset of nodes which are not retired */ public NodeList nonretired() { return filter(node -> ! node.allocation().get().membership().retired()); } /** Returns the subset of nodes of the given flavor */ public NodeList flavor(String flavor) { return filter(node -> node.flavor().name().equals(flavor)); } /** Returns the subset of nodes which does not have the given flavor */ public NodeList notFlavor(String flavor) { return filter(node -> ! node.flavor().name().equals(flavor)); } /** Returns the subset of nodes assigned to the given cluster type */ public NodeList type(ClusterSpec.Type type) { return filter(node -> node.allocation().get().membership().cluster().type().equals(type)); } /** Returns the subset of nodes owned by the given application */ public NodeList owner(ApplicationId application) { return filter(node -> node.allocation().map(a -> a.owner().equals(application)).orElse(false)); } /** Returns the subset of nodes matching the given node type */ public NodeList nodeType(NodeType nodeType) { return filter(node -> node.type() == nodeType); } /** Returns the subset of nodes that are parents */ public NodeList parents() { return filter(n -> n.parentHostname().isEmpty()); } /** Returns the child nodes of the given parent node */ public NodeList childrenOf(String hostname) { return filter(n -> n.parentHostname().map(hostname::equals).orElse(false)); } public NodeList childrenOf(Node parent) { return childrenOf(parent.hostname()); } /** Returns the subset of nodes that are in a given state */ public NodeList state(Node.State state) { return filter(node -> node.state() == state); } /** Returns the parent nodes of the given child nodes */ public NodeList parentsOf(Collection<Node> children) { return children.stream() .map(this::parentOf) .filter(Optional::isPresent) .flatMap(Optional::stream) .collect(collectingAndThen(Collectors.toList(), NodeList::wrap)); } /** Returns the parent node of the given child node */ public int size() { return nodes.size(); } /** Returns the immutable list of nodes in this */ public List<Node> asList() { return nodes; } private NodeList filter(Predicate<Node> predicate) { return nodes.stream().filter(predicate).collect(collectingAndThen(Collectors.toList(), NodeList::wrap)); } @Override public Iterator<Node> iterator() { return nodes.iterator(); } private static NodeList wrap(List<Node> nodes) { return new NodeList(nodes, false); } }
Unfortunately, some of the tests in this class use different zones (prod vs. perf environment). I could initialize the prod tester, since its unused in most of the test, but I think it makes things less clear and then some helper methods would start referring to the instance variable rather being forced to take it in as argument...
public void relocate_nodes_from_spare_hosts() { ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).flavorsConfig(flavorsConfig()).build(); tester.makeReadyNodes(4, "host-small", NodeType.host, 32); deployZoneApp(tester); List<Node> dockerHosts = tester.nodeRepository().getNodes(NodeType.host, Node.State.active); Flavor flavor = tester.nodeRepository().getAvailableFlavors().getFlavorOrThrow("d-1"); ApplicationId application1 = makeApplicationId("t1", "a1"); ClusterSpec clusterSpec1 = clusterSpec("myContent.t1.a1"); addAndAssignNode(application1, "1a", dockerHosts.get(0).hostname(), clusterSpec1, flavor, 0, tester); addAndAssignNode(application1, "1b", dockerHosts.get(1).hostname(), clusterSpec1, flavor, 1, tester); ApplicationId application2 = makeApplicationId("t2", "a2"); ClusterSpec clusterSpec2 = clusterSpec("myContent.t2.a2"); addAndAssignNode(application2, "2a", dockerHosts.get(2).hostname(), clusterSpec2, flavor, 0, tester); addAndAssignNode(application2, "2b", dockerHosts.get(3).hostname(), clusterSpec2, flavor, 1, tester); deployApp(application1, clusterSpec1, flavor, tester, 2); deployApp(application2, clusterSpec2, flavor, tester, 2); Set<String> hostsWithChildren = new HashSet<>(); for (Node node : tester.nodeRepository().getNodes(NodeType.tenant, Node.State.active)) { if (!isInactiveOrRetired(node)) { hostsWithChildren.add(node.parentHostname().get()); } } assertEquals(4 - tester.provisioner().getSpareCapacityProd(), hostsWithChildren.size()); }
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).flavorsConfig(flavorsConfig()).build();
public void relocate_nodes_from_spare_hosts() { ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).flavorsConfig(flavorsConfig()).build(); tester.makeReadyNodes(4, "host-small", NodeType.host, 32); deployZoneApp(tester); List<Node> dockerHosts = tester.nodeRepository().getNodes(NodeType.host, Node.State.active); Flavor flavor = tester.nodeRepository().getAvailableFlavors().getFlavorOrThrow("d-1"); ApplicationId application1 = makeApplicationId("t1", "a1"); ClusterSpec clusterSpec1 = clusterSpec("myContent.t1.a1"); addAndAssignNode(application1, "1a", dockerHosts.get(0).hostname(), clusterSpec1, flavor, 0, tester); addAndAssignNode(application1, "1b", dockerHosts.get(1).hostname(), clusterSpec1, flavor, 1, tester); ApplicationId application2 = makeApplicationId("t2", "a2"); ClusterSpec clusterSpec2 = clusterSpec("myContent.t2.a2"); addAndAssignNode(application2, "2a", dockerHosts.get(2).hostname(), clusterSpec2, flavor, 0, tester); addAndAssignNode(application2, "2b", dockerHosts.get(3).hostname(), clusterSpec2, flavor, 1, tester); deployApp(application1, clusterSpec1, flavor, tester, 2); deployApp(application2, clusterSpec2, flavor, tester, 2); Set<String> hostsWithChildren = new HashSet<>(); for (Node node : tester.nodeRepository().getNodes(NodeType.tenant, Node.State.active)) { if (!isInactiveOrRetired(node)) { hostsWithChildren.add(node.parentHostname().get()); } } assertEquals(4 - tester.provisioner().getSpareCapacityProd(), hostsWithChildren.size()); }
class DynamicDockerProvisioningTest { /** * Test relocation of nodes from spare hosts. * <p> * Setup 4 docker hosts and allocate one container on each (from two different applications) * getSpareCapacityProd() spares. * <p> * Check that it relocates containers away from the getSpareCapacityProd() spares * <p> * Initial allocation of app 1 and 2 --> final allocation (example using 2 spares): * <p> * | | | | | | | | | | * | | | | | --> | 2a | 2b | | | * | 1a | 1b | 2a | 2b | | 1a | 1b | | | */ @Test /** * Test an allocation workflow: * <p> * 5 Hosts of capacity 3 (2 spares) * - Allocate app with 3 nodes * - Allocate app with 2 nodes * - Fail host and check redistribution */ @Test public void relocate_failed_nodes() { ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).flavorsConfig(flavorsConfig()).build(); tester.makeReadyNodes(5, "host-small", NodeType.host, 32); deployZoneApp(tester); List<Node> dockerHosts = tester.nodeRepository().getNodes(NodeType.host, Node.State.active); Flavor flavor = tester.nodeRepository().getAvailableFlavors().getFlavorOrThrow("d-1"); ApplicationId application1 = makeApplicationId("t1", "a1"); ClusterSpec clusterSpec1 = clusterSpec("myContent.t1.a1"); deployApp(application1, clusterSpec1, flavor, tester, 3); ApplicationId application2 = makeApplicationId("t2", "a2"); ClusterSpec clusterSpec2 = clusterSpec("myContent.t2.a2"); deployApp(application2, clusterSpec2, flavor, tester, 2); ApplicationId application3 = makeApplicationId("t3", "a3"); ClusterSpec clusterSpec3 = clusterSpec("myContent.t3.a3"); deployApp(application3, clusterSpec3, flavor, tester, 2); String parent = tester.nodeRepository().getNodes(application2).stream().findAny().get().parentHostname().get(); tester.nodeRepository().failRecursively(parent, Agent.system, "Testing"); deployApp(application1, clusterSpec1, flavor, tester, 3); deployApp(application2, clusterSpec2, flavor, tester, 2); deployApp(application3, clusterSpec3, flavor, tester, 2); Map<Integer, Integer> numberOfChildrenStat = new HashMap<>(); for (Node node : dockerHosts) { int nofChildren = tester.nodeRepository().list().childrenOf(node).size(); if (!numberOfChildrenStat.containsKey(nofChildren)) { numberOfChildrenStat.put(nofChildren, 0); } numberOfChildrenStat.put(nofChildren, numberOfChildrenStat.get(nofChildren) + 1); } assertEquals(3, numberOfChildrenStat.get(3).intValue()); assertEquals(1, numberOfChildrenStat.get(0).intValue()); assertEquals(1, numberOfChildrenStat.get(1).intValue()); } /** * Test redeployment of nodes that violates spare headroom - but without alternatives * <p> * Setup 2 docker hosts and allocate one app with a container on each. 2 spares * <p> * Initial allocation of app 1 --> final allocation: * <p> * | | | | | | * | | | --> | | | * | 1a | 1b | | 1a | 1b | */ @Test public void do_not_relocate_nodes_from_spare_if_no_where_to_relocate_them() { ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).flavorsConfig(flavorsConfig()).build(); tester.makeReadyNodes(2, "host-small", NodeType.host, 32); deployZoneApp(tester); List<Node> dockerHosts = tester.nodeRepository().getNodes(NodeType.host, Node.State.active); Flavor flavor = tester.nodeRepository().getAvailableFlavors().getFlavorOrThrow("d-1"); ApplicationId application1 = makeApplicationId("t1", "a1"); ClusterSpec clusterSpec1 = clusterSpec("myContent.t1.a1"); addAndAssignNode(application1, "1a", dockerHosts.get(0).hostname(), clusterSpec1, flavor, 0, tester); addAndAssignNode(application1, "1b", dockerHosts.get(1).hostname(), clusterSpec1, flavor, 1, tester); deployApp(application1, clusterSpec1, flavor, tester, 2); Set<String> hostsWithChildren = new HashSet<>(); for (Node node : tester.nodeRepository().getNodes(NodeType.tenant, Node.State.active)) { if (!isInactiveOrRetired(node)) { hostsWithChildren.add(node.parentHostname().get()); } } assertEquals(2, hostsWithChildren.size()); } @Test(expected = OutOfCapacityException.class) public void multiple_groups_are_on_separate_parent_hosts() { ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).flavorsConfig(flavorsConfig()).build(); tester.makeReadyNodes(5, "host-small", NodeType.host, 32); deployZoneApp(tester); Flavor flavor = tester.nodeRepository().getAvailableFlavors().getFlavorOrThrow("d-1"); ApplicationId application1 = tester.makeApplicationId(); tester.prepare(application1, clusterSpec("myContent.t1.a1"), 6, 2, flavor.canonicalName()); fail("Two groups have been allocated to the same parent host"); } @Ignore @Test public void spare_capacity_used_only_when_replacement() { ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).flavorsConfig(flavorsConfig()).build(); ApplicationId application1 = tester.makeApplicationId(); tester.makeReadyNodes(5, "host-small", NodeType.host, 32); deployZoneApp(tester); Flavor flavor = tester.nodeRepository().getAvailableFlavors().getFlavorOrThrow("d-3"); ClusterSpec clusterSpec = clusterSpec("myContent.t1.a1"); List<HostSpec> hosts = tester.prepare(application1, clusterSpec, 3, 1, flavor.canonicalName()); tester.activate(application1, ImmutableSet.copyOf(hosts)); DockerHostCapacity capacity = new DockerHostCapacity(tester.nodeRepository().getNodes(Node.State.values())); assertThat(capacity.freeCapacityInFlavorEquivalence(flavor), greaterThan(0)); List<Node> initialSpareCapacity = findSpareCapacity(tester); assertThat(initialSpareCapacity.size(), is(2)); try { hosts = tester.prepare(application1, clusterSpec, 4, 1, flavor.canonicalName()); fail("Was able to deploy with 4 nodes, should not be able to use spare capacity"); } catch (OutOfCapacityException e) { } tester.fail(hosts.get(0)); hosts = tester.prepare(application1, clusterSpec, 3, 1, flavor.canonicalName()); tester.activate(application1, ImmutableSet.copyOf(hosts)); List<Node> finalSpareCapacity = findSpareCapacity(tester); assertThat(finalSpareCapacity.size(), is(1)); } @Test public void non_prod_zones_do_not_have_spares() { ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.perf, RegionName.from("us-east"))).flavorsConfig(flavorsConfig()).build(); tester.makeReadyNodes(3, "host-small", NodeType.host, 32); deployZoneApp(tester); Flavor flavor = tester.nodeRepository().getAvailableFlavors().getFlavorOrThrow("d-3"); ApplicationId application1 = tester.makeApplicationId(); List<HostSpec> hosts = tester.prepare(application1, clusterSpec("myContent.t1.a1"), 3, 1, flavor.canonicalName()); tester.activate(application1, ImmutableSet.copyOf(hosts)); List<Node> initialSpareCapacity = findSpareCapacity(tester); assertThat(initialSpareCapacity.size(), is(0)); } @Test(expected = OutOfCapacityException.class) public void allocation_should_fail_when_host_is_not_active() { ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).flavorsConfig(flavorsConfig()).build(); tester.makeProvisionedNodes(3, "host-small", NodeType.host, 32); deployZoneApp(tester); ApplicationId application = tester.makeApplicationId(); Flavor flavor = tester.nodeRepository().getAvailableFlavors().getFlavorOrThrow("d-3"); tester.prepare(application, clusterSpec("myContent.t2.a2"), 2, 1, flavor.canonicalName()); } @Test public void provision_dual_stack_containers() { ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).flavorsConfig(flavorsConfig()).build(); tester.makeReadyNodes(2, "host-large", NodeType.host, 10, true); deployZoneApp(tester); ApplicationId application = tester.makeApplicationId(); Flavor flavor = tester.nodeRepository().getAvailableFlavors().getFlavorOrThrow("d-3"); List<HostSpec> hosts = tester.prepare(application, clusterSpec("myContent.t1.a1"), 2, 1, flavor.canonicalName()); tester.activate(application, hosts); List<Node> activeNodes = tester.nodeRepository().getNodes(application); assertEquals(ImmutableSet.of("127.0.127.12", "::12"), activeNodes.get(0).ipAddresses()); assertEquals(ImmutableSet.of("127.0.127.2", "::2"), activeNodes.get(1).ipAddresses()); } private ApplicationId makeApplicationId(String tenant, String appName) { return ApplicationId.from(tenant, appName, "default"); } private void deployApp(ApplicationId id, ClusterSpec spec, Flavor flavor, ProvisioningTester tester, int nodeCount) { List<HostSpec> hostSpec = tester.prepare(id, spec, nodeCount, 1, flavor.canonicalName()); tester.activate(id, new HashSet<>(hostSpec)); } private void addAndAssignNode(ApplicationId id, String hostname, String parentHostname, ClusterSpec clusterSpec, Flavor flavor, int index, ProvisioningTester tester) { Node node1a = Node.create("open1", Collections.singleton("127.0.0.100"), new HashSet<>(), hostname, Optional.of(parentHostname), flavor, NodeType.tenant); ClusterMembership clusterMembership1 = ClusterMembership.from( clusterSpec.with(Optional.of(ClusterSpec.Group.from(0))), index); Node node1aAllocation = node1a.allocate(id, clusterMembership1, Instant.now()); tester.nodeRepository().addNodes(Collections.singletonList(node1aAllocation)); NestedTransaction transaction = new NestedTransaction().add(new CuratorTransaction(tester.getCurator())); tester.nodeRepository().activate(Collections.singletonList(node1aAllocation), transaction); transaction.commit(); } private List<Node> findSpareCapacity(ProvisioningTester tester) { List<Node> nodes = tester.nodeRepository().getNodes(Node.State.values()); NodeList nl = new NodeList(nodes); return nodes.stream() .filter(n -> n.type() == NodeType.host) .filter(n -> nl.childrenOf(n).size() == 0) .collect(Collectors.toList()); } private FlavorsConfig flavorsConfig() { FlavorConfigBuilder b = new FlavorConfigBuilder(); b.addFlavor("host-large", 6., 6., 6, Flavor.Type.BARE_METAL); b.addFlavor("host-small", 3., 3., 3, Flavor.Type.BARE_METAL); b.addFlavor("host-medium", 4., 4., 4, Flavor.Type.BARE_METAL); b.addFlavor("d-1", 1, 1., 1, Flavor.Type.DOCKER_CONTAINER); b.addFlavor("d-2", 2, 2., 2, Flavor.Type.DOCKER_CONTAINER); b.addFlavor("d-3", 3, 3., 3, Flavor.Type.DOCKER_CONTAINER); b.addFlavor("d-3-disk", 3, 3., 5, Flavor.Type.DOCKER_CONTAINER); b.addFlavor("d-3-mem", 3, 5., 3, Flavor.Type.DOCKER_CONTAINER); b.addFlavor("d-3-cpu", 5, 3., 3, Flavor.Type.DOCKER_CONTAINER); return b.build(); } private void deployZoneApp(ProvisioningTester tester) { ApplicationId applicationId = tester.makeApplicationId(); List<HostSpec> list = tester.prepare(applicationId, ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("node-admin"), Version.fromString("6.42"), false, Collections.emptySet()), Capacity.fromRequiredNodeType(NodeType.host), 1); tester.activate(applicationId, ImmutableSet.copyOf(list)); } private boolean isInactiveOrRetired(Node node) { boolean isInactive = node.state().equals(Node.State.inactive); boolean isRetired = false; if (node.allocation().isPresent()) { isRetired = node.allocation().get().membership().retired(); } return isInactive || isRetired; } private ClusterSpec clusterSpec(String clusterId) { return ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from(clusterId), Version.fromString("6.42"), false, Collections.emptySet()); } }
class DynamicDockerProvisioningTest { /** * Test relocation of nodes from spare hosts. * <p> * Setup 4 docker hosts and allocate one container on each (from two different applications) * getSpareCapacityProd() spares. * <p> * Check that it relocates containers away from the getSpareCapacityProd() spares * <p> * Initial allocation of app 1 and 2 --> final allocation (example using 2 spares): * <p> * | | | | | | | | | | * | | | | | --> | 2a | 2b | | | * | 1a | 1b | 2a | 2b | | 1a | 1b | | | */ @Test /** * Test an allocation workflow: * <p> * 5 Hosts of capacity 3 (2 spares) * - Allocate app with 3 nodes * - Allocate app with 2 nodes * - Fail host and check redistribution */ @Test public void relocate_failed_nodes() { ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).flavorsConfig(flavorsConfig()).build(); tester.makeReadyNodes(5, "host-small", NodeType.host, 32); deployZoneApp(tester); List<Node> dockerHosts = tester.nodeRepository().getNodes(NodeType.host, Node.State.active); Flavor flavor = tester.nodeRepository().getAvailableFlavors().getFlavorOrThrow("d-1"); ApplicationId application1 = makeApplicationId("t1", "a1"); ClusterSpec clusterSpec1 = clusterSpec("myContent.t1.a1"); deployApp(application1, clusterSpec1, flavor, tester, 3); ApplicationId application2 = makeApplicationId("t2", "a2"); ClusterSpec clusterSpec2 = clusterSpec("myContent.t2.a2"); deployApp(application2, clusterSpec2, flavor, tester, 2); ApplicationId application3 = makeApplicationId("t3", "a3"); ClusterSpec clusterSpec3 = clusterSpec("myContent.t3.a3"); deployApp(application3, clusterSpec3, flavor, tester, 2); String parent = tester.nodeRepository().getNodes(application2).stream().findAny().get().parentHostname().get(); tester.nodeRepository().failRecursively(parent, Agent.system, "Testing"); deployApp(application1, clusterSpec1, flavor, tester, 3); deployApp(application2, clusterSpec2, flavor, tester, 2); deployApp(application3, clusterSpec3, flavor, tester, 2); Map<Integer, Integer> numberOfChildrenStat = new HashMap<>(); for (Node node : dockerHosts) { int nofChildren = tester.nodeRepository().list().childrenOf(node).size(); if (!numberOfChildrenStat.containsKey(nofChildren)) { numberOfChildrenStat.put(nofChildren, 0); } numberOfChildrenStat.put(nofChildren, numberOfChildrenStat.get(nofChildren) + 1); } assertEquals(3, numberOfChildrenStat.get(3).intValue()); assertEquals(1, numberOfChildrenStat.get(0).intValue()); assertEquals(1, numberOfChildrenStat.get(1).intValue()); } /** * Test redeployment of nodes that violates spare headroom - but without alternatives * <p> * Setup 2 docker hosts and allocate one app with a container on each. 2 spares * <p> * Initial allocation of app 1 --> final allocation: * <p> * | | | | | | * | | | --> | | | * | 1a | 1b | | 1a | 1b | */ @Test public void do_not_relocate_nodes_from_spare_if_no_where_to_relocate_them() { ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).flavorsConfig(flavorsConfig()).build(); tester.makeReadyNodes(2, "host-small", NodeType.host, 32); deployZoneApp(tester); List<Node> dockerHosts = tester.nodeRepository().getNodes(NodeType.host, Node.State.active); Flavor flavor = tester.nodeRepository().getAvailableFlavors().getFlavorOrThrow("d-1"); ApplicationId application1 = makeApplicationId("t1", "a1"); ClusterSpec clusterSpec1 = clusterSpec("myContent.t1.a1"); addAndAssignNode(application1, "1a", dockerHosts.get(0).hostname(), clusterSpec1, flavor, 0, tester); addAndAssignNode(application1, "1b", dockerHosts.get(1).hostname(), clusterSpec1, flavor, 1, tester); deployApp(application1, clusterSpec1, flavor, tester, 2); Set<String> hostsWithChildren = new HashSet<>(); for (Node node : tester.nodeRepository().getNodes(NodeType.tenant, Node.State.active)) { if (!isInactiveOrRetired(node)) { hostsWithChildren.add(node.parentHostname().get()); } } assertEquals(2, hostsWithChildren.size()); } @Test(expected = OutOfCapacityException.class) public void multiple_groups_are_on_separate_parent_hosts() { ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).flavorsConfig(flavorsConfig()).build(); tester.makeReadyNodes(5, "host-small", NodeType.host, 32); deployZoneApp(tester); Flavor flavor = tester.nodeRepository().getAvailableFlavors().getFlavorOrThrow("d-1"); ApplicationId application1 = tester.makeApplicationId(); tester.prepare(application1, clusterSpec("myContent.t1.a1"), 6, 2, flavor.canonicalName()); fail("Two groups have been allocated to the same parent host"); } @Ignore @Test public void spare_capacity_used_only_when_replacement() { ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).flavorsConfig(flavorsConfig()).build(); ApplicationId application1 = tester.makeApplicationId(); tester.makeReadyNodes(5, "host-small", NodeType.host, 32); deployZoneApp(tester); Flavor flavor = tester.nodeRepository().getAvailableFlavors().getFlavorOrThrow("d-3"); ClusterSpec clusterSpec = clusterSpec("myContent.t1.a1"); List<HostSpec> hosts = tester.prepare(application1, clusterSpec, 3, 1, flavor.canonicalName()); tester.activate(application1, ImmutableSet.copyOf(hosts)); DockerHostCapacity capacity = new DockerHostCapacity(tester.nodeRepository().getNodes(Node.State.values())); assertThat(capacity.freeCapacityInFlavorEquivalence(flavor), greaterThan(0)); List<Node> initialSpareCapacity = findSpareCapacity(tester); assertThat(initialSpareCapacity.size(), is(2)); try { hosts = tester.prepare(application1, clusterSpec, 4, 1, flavor.canonicalName()); fail("Was able to deploy with 4 nodes, should not be able to use spare capacity"); } catch (OutOfCapacityException e) { } tester.fail(hosts.get(0)); hosts = tester.prepare(application1, clusterSpec, 3, 1, flavor.canonicalName()); tester.activate(application1, ImmutableSet.copyOf(hosts)); List<Node> finalSpareCapacity = findSpareCapacity(tester); assertThat(finalSpareCapacity.size(), is(1)); } @Test public void non_prod_zones_do_not_have_spares() { ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.perf, RegionName.from("us-east"))).flavorsConfig(flavorsConfig()).build(); tester.makeReadyNodes(3, "host-small", NodeType.host, 32); deployZoneApp(tester); Flavor flavor = tester.nodeRepository().getAvailableFlavors().getFlavorOrThrow("d-3"); ApplicationId application1 = tester.makeApplicationId(); List<HostSpec> hosts = tester.prepare(application1, clusterSpec("myContent.t1.a1"), 3, 1, flavor.canonicalName()); tester.activate(application1, ImmutableSet.copyOf(hosts)); List<Node> initialSpareCapacity = findSpareCapacity(tester); assertThat(initialSpareCapacity.size(), is(0)); } @Test(expected = OutOfCapacityException.class) public void allocation_should_fail_when_host_is_not_active() { ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).flavorsConfig(flavorsConfig()).build(); tester.makeProvisionedNodes(3, "host-small", NodeType.host, 32); deployZoneApp(tester); ApplicationId application = tester.makeApplicationId(); Flavor flavor = tester.nodeRepository().getAvailableFlavors().getFlavorOrThrow("d-3"); tester.prepare(application, clusterSpec("myContent.t2.a2"), 2, 1, flavor.canonicalName()); } @Test public void provision_dual_stack_containers() { ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).flavorsConfig(flavorsConfig()).build(); tester.makeReadyNodes(2, "host-large", NodeType.host, 10, true); deployZoneApp(tester); ApplicationId application = tester.makeApplicationId(); Flavor flavor = tester.nodeRepository().getAvailableFlavors().getFlavorOrThrow("d-3"); List<HostSpec> hosts = tester.prepare(application, clusterSpec("myContent.t1.a1"), 2, 1, flavor.canonicalName()); tester.activate(application, hosts); List<Node> activeNodes = tester.nodeRepository().getNodes(application); assertEquals(ImmutableSet.of("127.0.127.12", "::12"), activeNodes.get(0).ipAddresses()); assertEquals(ImmutableSet.of("127.0.127.2", "::2"), activeNodes.get(1).ipAddresses()); } private ApplicationId makeApplicationId(String tenant, String appName) { return ApplicationId.from(tenant, appName, "default"); } private void deployApp(ApplicationId id, ClusterSpec spec, Flavor flavor, ProvisioningTester tester, int nodeCount) { List<HostSpec> hostSpec = tester.prepare(id, spec, nodeCount, 1, flavor.canonicalName()); tester.activate(id, new HashSet<>(hostSpec)); } private void addAndAssignNode(ApplicationId id, String hostname, String parentHostname, ClusterSpec clusterSpec, Flavor flavor, int index, ProvisioningTester tester) { Node node1a = Node.create("open1", Collections.singleton("127.0.0.100"), new HashSet<>(), hostname, Optional.of(parentHostname), flavor, NodeType.tenant); ClusterMembership clusterMembership1 = ClusterMembership.from( clusterSpec.with(Optional.of(ClusterSpec.Group.from(0))), index); Node node1aAllocation = node1a.allocate(id, clusterMembership1, Instant.now()); tester.nodeRepository().addNodes(Collections.singletonList(node1aAllocation)); NestedTransaction transaction = new NestedTransaction().add(new CuratorTransaction(tester.getCurator())); tester.nodeRepository().activate(Collections.singletonList(node1aAllocation), transaction); transaction.commit(); } private List<Node> findSpareCapacity(ProvisioningTester tester) { List<Node> nodes = tester.nodeRepository().getNodes(Node.State.values()); NodeList nl = new NodeList(nodes); return nodes.stream() .filter(n -> n.type() == NodeType.host) .filter(n -> nl.childrenOf(n).size() == 0) .collect(Collectors.toList()); } private FlavorsConfig flavorsConfig() { FlavorConfigBuilder b = new FlavorConfigBuilder(); b.addFlavor("host-large", 6., 6., 6, Flavor.Type.BARE_METAL); b.addFlavor("host-small", 3., 3., 3, Flavor.Type.BARE_METAL); b.addFlavor("host-medium", 4., 4., 4, Flavor.Type.BARE_METAL); b.addFlavor("d-1", 1, 1., 1, Flavor.Type.DOCKER_CONTAINER); b.addFlavor("d-2", 2, 2., 2, Flavor.Type.DOCKER_CONTAINER); b.addFlavor("d-3", 3, 3., 3, Flavor.Type.DOCKER_CONTAINER); b.addFlavor("d-3-disk", 3, 3., 5, Flavor.Type.DOCKER_CONTAINER); b.addFlavor("d-3-mem", 3, 5., 3, Flavor.Type.DOCKER_CONTAINER); b.addFlavor("d-3-cpu", 5, 3., 3, Flavor.Type.DOCKER_CONTAINER); return b.build(); } private void deployZoneApp(ProvisioningTester tester) { ApplicationId applicationId = tester.makeApplicationId(); List<HostSpec> list = tester.prepare(applicationId, ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("node-admin"), Version.fromString("6.42"), false, Collections.emptySet()), Capacity.fromRequiredNodeType(NodeType.host), 1); tester.activate(applicationId, ImmutableSet.copyOf(list)); } private boolean isInactiveOrRetired(Node node) { boolean isInactive = node.state().equals(Node.State.inactive); boolean isRetired = false; if (node.allocation().isPresent()) { isRetired = node.allocation().get().membership().retired(); } return isInactive || isRetired; } private ClusterSpec clusterSpec(String clusterId) { return ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from(clusterId), Version.fromString("6.42"), false, Collections.emptySet()); } }
Or just `hostProvisioner` 🙂
public ProvisioningTester build() { Orchestrator orchestrator = Optional.ofNullable(this.orchestrator) .orElseGet(() -> { Orchestrator orch = mock(Orchestrator.class); try { doThrow(new RuntimeException()).when(orch).acquirePermissionToRemove(any()); } catch (OrchestrationException e) { throw new RuntimeException(e); } return orch; }); return new ProvisioningTester( Optional.ofNullable(curator).orElseGet(MockCurator::new), new NodeFlavors(Optional.ofNullable(flavorsConfig).orElseGet(ProvisioningTester::createConfig)), Optional.ofNullable(zone).orElseGet(Zone::defaultZone), Optional.ofNullable(nameResolver).orElseGet(() -> new MockNameResolver().mockAnyLookup()), orchestrator, Optional.ofNullable(hostProvisioner).orElse(null), Optional.ofNullable(loadBalancerService).orElseGet(LoadBalancerServiceMock::new), Optional.ofNullable(flagSource).orElseGet(InMemoryFlagSource::new)); }
Optional.ofNullable(hostProvisioner).orElse(null),
public ProvisioningTester build() { Orchestrator orchestrator = Optional.ofNullable(this.orchestrator) .orElseGet(() -> { Orchestrator orch = mock(Orchestrator.class); try { doThrow(new RuntimeException()).when(orch).acquirePermissionToRemove(any()); } catch (OrchestrationException e) { throw new RuntimeException(e); } return orch; }); return new ProvisioningTester( Optional.ofNullable(curator).orElseGet(MockCurator::new), new NodeFlavors(Optional.ofNullable(flavorsConfig).orElseGet(ProvisioningTester::createConfig)), Optional.ofNullable(zone).orElseGet(Zone::defaultZone), Optional.ofNullable(nameResolver).orElseGet(() -> new MockNameResolver().mockAnyLookup()), orchestrator, hostProvisioner, Optional.ofNullable(loadBalancerService).orElseGet(LoadBalancerServiceMock::new), Optional.ofNullable(flagSource).orElseGet(InMemoryFlagSource::new)); }
class Builder { private Curator curator; private FlavorsConfig flavorsConfig; private Zone zone; private NameResolver nameResolver; private Orchestrator orchestrator; private HostProvisioner hostProvisioner; private LoadBalancerServiceMock loadBalancerService; private FlagSource flagSource; public Builder curator(Curator curator) { this.curator = curator; return this; } public Builder flavorsConfig(FlavorsConfig flavorsConfig) { this.flavorsConfig = flavorsConfig; return this; } public Builder zone(Zone zone) { this.zone = zone; return this; } public Builder nameResolver(NameResolver nameResolver) { this.nameResolver = nameResolver; return this; } public Builder orchestrator(Orchestrator orchestrator) { this.orchestrator = orchestrator; return this; } public Builder hostProvisioner(HostProvisioner hostProvisioner) { this.hostProvisioner = hostProvisioner; return this; } public Builder loadBalancerService(LoadBalancerServiceMock loadBalancerService) { this.loadBalancerService = loadBalancerService; return this; } public Builder flagSource(FlagSource flagSource) { this.flagSource = flagSource; return this; } }
class Builder { private Curator curator; private FlavorsConfig flavorsConfig; private Zone zone; private NameResolver nameResolver; private Orchestrator orchestrator; private HostProvisioner hostProvisioner; private LoadBalancerServiceMock loadBalancerService; private FlagSource flagSource; public Builder curator(Curator curator) { this.curator = curator; return this; } public Builder flavorsConfig(FlavorsConfig flavorsConfig) { this.flavorsConfig = flavorsConfig; return this; } public Builder zone(Zone zone) { this.zone = zone; return this; } public Builder nameResolver(NameResolver nameResolver) { this.nameResolver = nameResolver; return this; } public Builder orchestrator(Orchestrator orchestrator) { this.orchestrator = orchestrator; return this; } public Builder hostProvisioner(HostProvisioner hostProvisioner) { this.hostProvisioner = hostProvisioner; return this; } public Builder loadBalancerService(LoadBalancerServiceMock loadBalancerService) { this.loadBalancerService = loadBalancerService; return this; } public Builder flagSource(FlagSource flagSource) { this.flagSource = flagSource; return this; } }
Remove?
private static void mockHostProvisioner(HostProvisioner hostProvisioner, Flavor hostFlavor) { final int[] numProvisioned = { 0 }; doAnswer(invocation -> { int numHosts = (int) invocation.getArguments()[0]; Flavor nodeFlavor = (Flavor) invocation.getArguments()[1]; System.out.println(numHosts + " " + nodeFlavor); return IntStream.range(0, numHosts) .map(i -> ++numProvisioned[0]) .mapToObj(i -> new ProvisionedHost("id-" + i, "host-" + i, hostFlavor, "host-" + i + "-1", nodeFlavor)) .collect(Collectors.toList()); }).when(hostProvisioner).provisionHosts(anyInt(), any()); }
System.out.println(numHosts + " " + nodeFlavor);
private static void mockHostProvisioner(HostProvisioner hostProvisioner, Flavor hostFlavor) { final int[] numProvisioned = { 0 }; doAnswer(invocation -> { int numHosts = (int) invocation.getArguments()[0]; Flavor nodeFlavor = (Flavor) invocation.getArguments()[1]; return IntStream.range(0, numHosts) .map(i -> ++numProvisioned[0]) .mapToObj(i -> new ProvisionedHost("id-" + i, "host-" + i, hostFlavor, "host-" + i + "-1", nodeFlavor)) .collect(Collectors.toList()); }).when(hostProvisioner).provisionHosts(anyInt(), any()); }
class DynamicDockerProvisionTest { private final MockNameResolver nameResolver = new MockNameResolver().mockAnyLookup(); private final HostProvisioner hostProvisioner = mock(HostProvisioner.class); private final InMemoryFlagSource flagSource = new InMemoryFlagSource() .withBooleanFlag(Flags.ENABLE_DYNAMIC_PROVISIONING.id(), true); private final ProvisioningTester tester = new ProvisioningTester.Builder() .hostProvisioner(hostProvisioner).flagSource(flagSource).nameResolver(nameResolver).build(); @Test public void dynamically_provision_with_empty_node_repo() { assertEquals(0, tester.nodeRepository().list().size()); ApplicationId application1 = tester.makeApplicationId(); Flavor flavor = tester.nodeRepository().getAvailableFlavors().getFlavorOrThrow("dockerSmall"); mockHostProvisioner(hostProvisioner, tester.nodeRepository().getAvailableFlavors().getFlavorOrThrow("small")); List<HostSpec> hostSpec = tester.prepare(application1, clusterSpec("myContent.t1.a1"), 4, 1, flavor.canonicalName()); verify(hostProvisioner).provisionHosts(4, flavor); assertEquals(8, tester.nodeRepository().list().size()); assertEquals(4, tester.nodeRepository().getNodes(NodeType.host, Node.State.provisioned).size()); assertEquals(4, tester.nodeRepository().getNodes(NodeType.tenant, Node.State.reserved).size()); assertEquals(List.of("host-1-1", "host-2-1", "host-3-1", "host-4-1"), hostSpec.stream().map(HostSpec::hostname).collect(Collectors.toList())); } @Test public void does_not_allocate_to_available_empty_hosts() { tester.makeReadyNodes(3, "small", NodeType.host, 10); deployZoneApp(tester); ApplicationId application = tester.makeApplicationId(); Flavor flavor = tester.nodeRepository().getAvailableFlavors().getFlavorOrThrow("dockerSmall"); mockHostProvisioner(hostProvisioner, tester.nodeRepository().getAvailableFlavors().getFlavorOrThrow("small")); tester.prepare(application, clusterSpec("myContent.t2.a2"), 2, 1, flavor.canonicalName()); verify(hostProvisioner).provisionHosts(2, flavor); } @Test public void allocates_to_hosts_already_hosting_nodes_by_this_tenant() { ApplicationId application = tester.makeApplicationId(); Flavor flavor = tester.nodeRepository().getAvailableFlavors().getFlavorOrThrow("dockerSmall"); mockHostProvisioner(hostProvisioner, tester.nodeRepository().getAvailableFlavors().getFlavorOrThrow("large")); tester.prepare(application, clusterSpec("myContent.t2.a2"), 2, 1, flavor.canonicalName()); verify(hostProvisioner).provisionHosts(2, flavor); for (int i = 1; i < 3; i++) { String hostname = "host-" + i; Node host = tester.nodeRepository().getNode(hostname).orElseThrow() .withIpAddressPool(Set.of("::" + i + ":2")).withIpAddresses(Set.of("::" + i + ":0")); tester.nodeRepository().setReady(List.of(host), Agent.system, getClass().getSimpleName()); nameResolver.addRecord(hostname + "-2", "::" + i + ":2"); } deployZoneApp(tester); mockHostProvisioner(hostProvisioner, tester.nodeRepository().getAvailableFlavors().getFlavorOrThrow("small")); tester.prepare(application, clusterSpec("another-id"), 2, 1, flavor.canonicalName()); verify(hostProvisioner).provisionHosts(anyInt(), any()); assertEquals(6, tester.nodeRepository().list().size()); assertEquals(2, tester.nodeRepository().getNodes(NodeType.host, Node.State.active).size()); assertEquals(4, tester.nodeRepository().getNodes(NodeType.tenant, Node.State.reserved).size()); } private static void deployZoneApp(ProvisioningTester tester) { ApplicationId applicationId = tester.makeApplicationId(); List<HostSpec> list = tester.prepare(applicationId, ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("node-admin"), Version.fromString("6.42"), false, Collections.emptySet()), Capacity.fromRequiredNodeType(NodeType.host), 1); tester.activate(applicationId, ImmutableSet.copyOf(list)); } private static ClusterSpec clusterSpec(String clusterId) { return ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from(clusterId), Version.fromString("6.42"), false, Collections.emptySet()); } }
class DynamicDockerProvisionTest { private final MockNameResolver nameResolver = new MockNameResolver().mockAnyLookup(); private final HostProvisioner hostProvisioner = mock(HostProvisioner.class); private final InMemoryFlagSource flagSource = new InMemoryFlagSource() .withBooleanFlag(Flags.ENABLE_DYNAMIC_PROVISIONING.id(), true); private final ProvisioningTester tester = new ProvisioningTester.Builder() .hostProvisioner(hostProvisioner).flagSource(flagSource).nameResolver(nameResolver).build(); @Test public void dynamically_provision_with_empty_node_repo() { assertEquals(0, tester.nodeRepository().list().size()); ApplicationId application1 = tester.makeApplicationId(); Flavor flavor = tester.nodeRepository().getAvailableFlavors().getFlavorOrThrow("dockerSmall"); mockHostProvisioner(hostProvisioner, tester.nodeRepository().getAvailableFlavors().getFlavorOrThrow("small")); List<HostSpec> hostSpec = tester.prepare(application1, clusterSpec("myContent.t1.a1"), 4, 1, flavor.canonicalName()); verify(hostProvisioner).provisionHosts(4, flavor); assertEquals(8, tester.nodeRepository().list().size()); assertEquals(4, tester.nodeRepository().getNodes(NodeType.host, Node.State.provisioned).size()); assertEquals(4, tester.nodeRepository().getNodes(NodeType.tenant, Node.State.reserved).size()); assertEquals(List.of("host-1-1", "host-2-1", "host-3-1", "host-4-1"), hostSpec.stream().map(HostSpec::hostname).collect(Collectors.toList())); } @Test public void does_not_allocate_to_available_empty_hosts() { tester.makeReadyNodes(3, "small", NodeType.host, 10); deployZoneApp(tester); ApplicationId application = tester.makeApplicationId(); Flavor flavor = tester.nodeRepository().getAvailableFlavors().getFlavorOrThrow("dockerSmall"); mockHostProvisioner(hostProvisioner, tester.nodeRepository().getAvailableFlavors().getFlavorOrThrow("small")); tester.prepare(application, clusterSpec("myContent.t2.a2"), 2, 1, flavor.canonicalName()); verify(hostProvisioner).provisionHosts(2, flavor); } @Test public void allocates_to_hosts_already_hosting_nodes_by_this_tenant() { ApplicationId application = tester.makeApplicationId(); Flavor flavor = tester.nodeRepository().getAvailableFlavors().getFlavorOrThrow("dockerSmall"); mockHostProvisioner(hostProvisioner, tester.nodeRepository().getAvailableFlavors().getFlavorOrThrow("large")); tester.prepare(application, clusterSpec("myContent.t2.a2"), 2, 1, flavor.canonicalName()); verify(hostProvisioner).provisionHosts(2, flavor); for (int i = 1; i < 3; i++) { String hostname = "host-" + i; Node host = tester.nodeRepository().getNode(hostname).orElseThrow() .withIpAddressPool(Set.of("::" + i + ":2")).withIpAddresses(Set.of("::" + i + ":0")); tester.nodeRepository().setReady(List.of(host), Agent.system, getClass().getSimpleName()); nameResolver.addRecord(hostname + "-2", "::" + i + ":2"); } deployZoneApp(tester); mockHostProvisioner(hostProvisioner, tester.nodeRepository().getAvailableFlavors().getFlavorOrThrow("small")); tester.prepare(application, clusterSpec("another-id"), 2, 1, flavor.canonicalName()); verify(hostProvisioner).provisionHosts(anyInt(), any()); assertEquals(6, tester.nodeRepository().list().size()); assertEquals(2, tester.nodeRepository().getNodes(NodeType.host, Node.State.active).size()); assertEquals(4, tester.nodeRepository().getNodes(NodeType.tenant, Node.State.reserved).size()); } private static void deployZoneApp(ProvisioningTester tester) { ApplicationId applicationId = tester.makeApplicationId(); List<HostSpec> list = tester.prepare(applicationId, ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("node-admin"), Version.fromString("6.42"), false, Collections.emptySet()), Capacity.fromRequiredNodeType(NodeType.host), 1); tester.activate(applicationId, ImmutableSet.copyOf(list)); } private static ClusterSpec clusterSpec(String clusterId) { return ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from(clusterId), Version.fromString("6.42"), false, Collections.emptySet()); } }
needs more space? ```suggestion ", modelId=" + modelId + ```
public String toString() { return "NodeRepositoryNode{" + "url='" + url + '\'' + ", id='" + id + '\'' + ", state=" + state + ", hostname='" + hostname + '\'' + ", ipAddresses=" + ipAddresses + ", additionalIpAddresses=" + additionalIpAddresses + ", openStackId='" + openStackId + '\'' + ", flavor='" + flavor + '\'' + ", canonicalFlavor='" + canonicalFlavor + '\'' + ", membership=" + membership + ", owner=" + owner + ", restartGeneration=" + restartGeneration + ", rebootGeneration=" + rebootGeneration + ", currentRestartGeneration=" + currentRestartGeneration + ", currentRebootGeneration=" + currentRebootGeneration + ", vespaVersion='" + vespaVersion + '\'' + ", wantedVespaVersion='" + wantedVespaVersion + '\'' + ", currentOsVersion='" + currentOsVersion + '\'' + ", wantedOsVersion='" + wantedOsVersion + '\'' + ", failCount=" + failCount + ", hardwareFailure=" + hardwareFailure + ", hardwareFailureDescription='" + hardwareFailureDescription + '\'' + ", hardwareDivergence='" + hardwareDivergence + '\'' + ", environment=" + environment + ", type=" + type + ", wantedDockerImage='" + wantedDockerImage + '\'' + ", currentDockerImage='" + currentDockerImage + '\'' + ", parentHostname='" + parentHostname + '\'' + ", wantToRetire=" + wantToRetire + ", wantToDeprovision=" + wantToDeprovision + ", minDiskAvailableGb=" + minDiskAvailableGb + ", minMainMemoryAvailableGb=" + minMainMemoryAvailableGb + ", cost=" + cost + ", minCpuCores=" + minCpuCores + ", description='" + description + '\'' + ", history=" + Arrays.toString(history) + ", allowedToBeDown=" + allowedToBeDown + ",modelId=" + modelId + '}'; }
",modelId=" + modelId +
public String toString() { return "NodeRepositoryNode{" + "url='" + url + '\'' + ", id='" + id + '\'' + ", state=" + state + ", hostname='" + hostname + '\'' + ", ipAddresses=" + ipAddresses + ", additionalIpAddresses=" + additionalIpAddresses + ", openStackId='" + openStackId + '\'' + ", flavor='" + flavor + '\'' + ", canonicalFlavor='" + canonicalFlavor + '\'' + ", membership=" + membership + ", owner=" + owner + ", restartGeneration=" + restartGeneration + ", rebootGeneration=" + rebootGeneration + ", currentRestartGeneration=" + currentRestartGeneration + ", currentRebootGeneration=" + currentRebootGeneration + ", vespaVersion='" + vespaVersion + '\'' + ", wantedVespaVersion='" + wantedVespaVersion + '\'' + ", currentOsVersion='" + currentOsVersion + '\'' + ", wantedOsVersion='" + wantedOsVersion + '\'' + ", failCount=" + failCount + ", hardwareFailure=" + hardwareFailure + ", hardwareFailureDescription='" + hardwareFailureDescription + '\'' + ", hardwareDivergence='" + hardwareDivergence + '\'' + ", environment=" + environment + ", type=" + type + ", wantedDockerImage='" + wantedDockerImage + '\'' + ", currentDockerImage='" + currentDockerImage + '\'' + ", parentHostname='" + parentHostname + '\'' + ", wantToRetire=" + wantToRetire + ", wantToDeprovision=" + wantToDeprovision + ", minDiskAvailableGb=" + minDiskAvailableGb + ", minMainMemoryAvailableGb=" + minMainMemoryAvailableGb + ", cost=" + cost + ", minCpuCores=" + minCpuCores + ", description='" + description + '\'' + ", history=" + Arrays.toString(history) + ", allowedToBeDown=" + allowedToBeDown + ", modelId=" + modelId + '}'; }
class NodeRepositoryNode { @JsonProperty("url") private String url; @JsonProperty("id") private String id; @JsonProperty("state") private NodeState state; @JsonProperty("hostname") private String hostname; @JsonProperty("ipAddresses") private Set<String> ipAddresses; @JsonProperty("additionalIpAddresses") private Set<String> additionalIpAddresses; @JsonProperty("openStackId") private String openStackId; @JsonProperty("flavor") private String flavor; @JsonProperty("canonicalFlavor") private String canonicalFlavor; @JsonProperty("membership") private NodeMembership membership; @JsonProperty("owner") private NodeOwner owner; @JsonProperty("restartGeneration") private Integer restartGeneration; @JsonProperty("rebootGeneration") private Integer rebootGeneration; @JsonProperty("currentRestartGeneration") private Integer currentRestartGeneration; @JsonProperty("currentRebootGeneration") private Integer currentRebootGeneration; @JsonProperty("vespaVersion") private String vespaVersion; @JsonProperty("wantedVespaVersion") private String wantedVespaVersion; @JsonProperty("currentOsVersion") private String currentOsVersion; @JsonProperty("wantedOsVersion") private String wantedOsVersion; @JsonProperty("failCount") private Integer failCount; @JsonProperty("hardwareFailure") private Boolean hardwareFailure; @JsonProperty("hardwareFailureDescription") private String hardwareFailureDescription; @JsonProperty("hardwareDivergence") private String hardwareDivergence; @JsonProperty("environment") private NodeEnvironment environment; @JsonProperty("type") private NodeType type; @JsonProperty("wantedDockerImage") private String wantedDockerImage; @JsonProperty("currentDockerImage") private String currentDockerImage; @JsonProperty("parentHostname") private String parentHostname; @JsonProperty("wantToRetire") private Boolean wantToRetire; @JsonProperty("wantToDeprovision") private Boolean wantToDeprovision; @JsonProperty("minDiskAvailableGb") private Double minDiskAvailableGb; @JsonProperty("minMainMemoryAvailableGb") private Double minMainMemoryAvailableGb; @JsonProperty("cost") private Integer cost; @JsonProperty("minCpuCores") private Double minCpuCores; @JsonProperty("description") private String description; @JsonProperty("history") private NodeHistory[] history; @JsonProperty("allowedToBeDown") private Boolean allowedToBeDown; @JsonProperty("modelId") private String modelId; public String getUrl() { return url; } public void setUrl(String url) { this.url = url; } public String getId() { return id; } public void setId(String id) { this.id = id; } public NodeState getState() { return state; } public void setState(NodeState state) { this.state = state; } public String getHostname() { return hostname; } public void setHostname(String hostname) { this.hostname = hostname; } public Set<String> getIpAddresses() { return ipAddresses; } public Set<String> getAdditionalIpAddresses() { return additionalIpAddresses; } public void setIpAddresses(Set<String> ipAddresses) { this.ipAddresses = ipAddresses; } public void setAdditionalIpAddresses(Set<String> additionalIpAddresses) { this.additionalIpAddresses = additionalIpAddresses; } public String getOpenStackId() { return openStackId; } public void setOpenStackId(String openStackId) { this.openStackId = openStackId; } public String getFlavor() { return flavor; } public void setFlavor(String flavor) { this.flavor = flavor; } public String getCanonicalFlavor() { return canonicalFlavor; } public void setCanonicalFlavor(String canonicalFlavor) { this.canonicalFlavor = canonicalFlavor; } public NodeMembership getMembership() { return membership; } public void setMembership(NodeMembership membership) { this.membership = membership; } public NodeOwner getOwner() { return owner; } public void setOwner(NodeOwner owner) { this.owner = owner; } public Integer getRestartGeneration() { return restartGeneration; } public void setRestartGeneration(Integer restartGeneration) { this.restartGeneration = restartGeneration; } public Integer getRebootGeneration() { return rebootGeneration; } public void setRebootGeneration(Integer rebootGeneration) { this.rebootGeneration = rebootGeneration; } public Integer getCurrentRestartGeneration() { return currentRestartGeneration; } public void setCurrentRestartGeneration(Integer currentRestartGeneration) { this.currentRestartGeneration = currentRestartGeneration; } public Integer getCurrentRebootGeneration() { return currentRebootGeneration; } public void setCurrentRebootGeneration(Integer currentRebootGeneration) { this.currentRebootGeneration = currentRebootGeneration; } public String getVespaVersion() { return vespaVersion; } public void setVespaVersion(String vespaVersion) { this.vespaVersion = vespaVersion; } public String getWantedVespaVersion() { return wantedVespaVersion; } public void setWantedVespaVersion(String wantedVespaVersion) { this.wantedVespaVersion = wantedVespaVersion; } public Integer getFailCount() { return failCount; } public void setFailCount(Integer failCount) { this.failCount = failCount; } public Boolean getHardwareFailure() { return hardwareFailure; } public void setHardwareFailure(Boolean hardwareFailure) { this.hardwareFailure = hardwareFailure; } public String getHardwareFailureDescription() { return hardwareFailureDescription; } public void setHardwareDivergence(String hardwareDivergence) { this.hardwareDivergence = hardwareDivergence; } public String getHardwareDivergence() { return hardwareDivergence; } public void setHardwareFailureDescription(String hardwareFailureDescription) { this.hardwareFailureDescription = hardwareFailureDescription; } public NodeEnvironment getEnvironment() { return environment; } public void setEnvironment(NodeEnvironment environment) { this.environment = environment; } public NodeType getType() { return type; } public void setType(NodeType type) { this.type = type; } public String getWantedDockerImage() { return wantedDockerImage; } public void setWantedDockerImage(String wantedDockerImage) { this.wantedDockerImage = wantedDockerImage; } public String getCurrentDockerImage() { return currentDockerImage; } public void setCurrentDockerImage(String currentDockerImage) { this.currentDockerImage = currentDockerImage; } public String getParentHostname() { return parentHostname; } public void setParentHostname(String parentHostname) { this.parentHostname = parentHostname; } public Boolean getWantToRetire() { return wantToRetire; } public Boolean getWantToDeprovision() { return wantToDeprovision; } public void setWantToRetire(Boolean wantToRetire) { this.wantToRetire = wantToRetire; } public void setWantToDeprovision(Boolean wantToDeprovision) { this.wantToDeprovision = wantToDeprovision; } public Double getMinDiskAvailableGb() { return minDiskAvailableGb; } public void setMinDiskAvailableGb(Double minDiskAvailableGb) { this.minDiskAvailableGb = minDiskAvailableGb; } public Double getMinMainMemoryAvailableGb() { return minMainMemoryAvailableGb; } public void setMinMainMemoryAvailableGb(Double minMainMemoryAvailableGb) { this.minMainMemoryAvailableGb = minMainMemoryAvailableGb; } public Integer getCost() { return cost; } public void setCost(Integer cost) { this.cost = cost; } public Double getMinCpuCores() { return minCpuCores; } public void setMinCpuCores(Double minCpuCores) { this.minCpuCores = minCpuCores; } public String getDescription() { return description; } public void setDescription(String description) { this.description = description; } public NodeHistory[] getHistory() { return history; } public void setHistory(NodeHistory[] history) { this.history = history; } public Boolean getAllowedToBeDown() { return allowedToBeDown; } public String getCurrentOsVersion() { return currentOsVersion; } public void setCurrentOsVersion(String currentOsVersion) { this.currentOsVersion = currentOsVersion; } public String getWantedOsVersion() { return wantedOsVersion; } public void setWantedOsVersion(String wantedOsVersion) { this.wantedOsVersion = wantedOsVersion; } public String getModelId() { return modelId; } public void setModelId(String modelId) { this.modelId = modelId; } @Override }
class NodeRepositoryNode { @JsonProperty("url") private String url; @JsonProperty("id") private String id; @JsonProperty("state") private NodeState state; @JsonProperty("hostname") private String hostname; @JsonProperty("ipAddresses") private Set<String> ipAddresses; @JsonProperty("additionalIpAddresses") private Set<String> additionalIpAddresses; @JsonProperty("openStackId") private String openStackId; @JsonProperty("flavor") private String flavor; @JsonProperty("canonicalFlavor") private String canonicalFlavor; @JsonProperty("membership") private NodeMembership membership; @JsonProperty("owner") private NodeOwner owner; @JsonProperty("restartGeneration") private Integer restartGeneration; @JsonProperty("rebootGeneration") private Integer rebootGeneration; @JsonProperty("currentRestartGeneration") private Integer currentRestartGeneration; @JsonProperty("currentRebootGeneration") private Integer currentRebootGeneration; @JsonProperty("vespaVersion") private String vespaVersion; @JsonProperty("wantedVespaVersion") private String wantedVespaVersion; @JsonProperty("currentOsVersion") private String currentOsVersion; @JsonProperty("wantedOsVersion") private String wantedOsVersion; @JsonProperty("failCount") private Integer failCount; @JsonProperty("hardwareFailure") private Boolean hardwareFailure; @JsonProperty("hardwareFailureDescription") private String hardwareFailureDescription; @JsonProperty("hardwareDivergence") private String hardwareDivergence; @JsonProperty("environment") private NodeEnvironment environment; @JsonProperty("type") private NodeType type; @JsonProperty("wantedDockerImage") private String wantedDockerImage; @JsonProperty("currentDockerImage") private String currentDockerImage; @JsonProperty("parentHostname") private String parentHostname; @JsonProperty("wantToRetire") private Boolean wantToRetire; @JsonProperty("wantToDeprovision") private Boolean wantToDeprovision; @JsonProperty("minDiskAvailableGb") private Double minDiskAvailableGb; @JsonProperty("minMainMemoryAvailableGb") private Double minMainMemoryAvailableGb; @JsonProperty("cost") private Integer cost; @JsonProperty("minCpuCores") private Double minCpuCores; @JsonProperty("description") private String description; @JsonProperty("history") private NodeHistory[] history; @JsonProperty("allowedToBeDown") private Boolean allowedToBeDown; @JsonProperty("modelId") private String modelId; public String getUrl() { return url; } public void setUrl(String url) { this.url = url; } public String getId() { return id; } public void setId(String id) { this.id = id; } public NodeState getState() { return state; } public void setState(NodeState state) { this.state = state; } public String getHostname() { return hostname; } public void setHostname(String hostname) { this.hostname = hostname; } public Set<String> getIpAddresses() { return ipAddresses; } public Set<String> getAdditionalIpAddresses() { return additionalIpAddresses; } public void setIpAddresses(Set<String> ipAddresses) { this.ipAddresses = ipAddresses; } public void setAdditionalIpAddresses(Set<String> additionalIpAddresses) { this.additionalIpAddresses = additionalIpAddresses; } public String getOpenStackId() { return openStackId; } public void setOpenStackId(String openStackId) { this.openStackId = openStackId; } public String getFlavor() { return flavor; } public void setFlavor(String flavor) { this.flavor = flavor; } public String getCanonicalFlavor() { return canonicalFlavor; } public void setCanonicalFlavor(String canonicalFlavor) { this.canonicalFlavor = canonicalFlavor; } public NodeMembership getMembership() { return membership; } public void setMembership(NodeMembership membership) { this.membership = membership; } public NodeOwner getOwner() { return owner; } public void setOwner(NodeOwner owner) { this.owner = owner; } public Integer getRestartGeneration() { return restartGeneration; } public void setRestartGeneration(Integer restartGeneration) { this.restartGeneration = restartGeneration; } public Integer getRebootGeneration() { return rebootGeneration; } public void setRebootGeneration(Integer rebootGeneration) { this.rebootGeneration = rebootGeneration; } public Integer getCurrentRestartGeneration() { return currentRestartGeneration; } public void setCurrentRestartGeneration(Integer currentRestartGeneration) { this.currentRestartGeneration = currentRestartGeneration; } public Integer getCurrentRebootGeneration() { return currentRebootGeneration; } public void setCurrentRebootGeneration(Integer currentRebootGeneration) { this.currentRebootGeneration = currentRebootGeneration; } public String getVespaVersion() { return vespaVersion; } public void setVespaVersion(String vespaVersion) { this.vespaVersion = vespaVersion; } public String getWantedVespaVersion() { return wantedVespaVersion; } public void setWantedVespaVersion(String wantedVespaVersion) { this.wantedVespaVersion = wantedVespaVersion; } public Integer getFailCount() { return failCount; } public void setFailCount(Integer failCount) { this.failCount = failCount; } public Boolean getHardwareFailure() { return hardwareFailure; } public void setHardwareFailure(Boolean hardwareFailure) { this.hardwareFailure = hardwareFailure; } public String getHardwareFailureDescription() { return hardwareFailureDescription; } public void setHardwareDivergence(String hardwareDivergence) { this.hardwareDivergence = hardwareDivergence; } public String getHardwareDivergence() { return hardwareDivergence; } public void setHardwareFailureDescription(String hardwareFailureDescription) { this.hardwareFailureDescription = hardwareFailureDescription; } public NodeEnvironment getEnvironment() { return environment; } public void setEnvironment(NodeEnvironment environment) { this.environment = environment; } public NodeType getType() { return type; } public void setType(NodeType type) { this.type = type; } public String getWantedDockerImage() { return wantedDockerImage; } public void setWantedDockerImage(String wantedDockerImage) { this.wantedDockerImage = wantedDockerImage; } public String getCurrentDockerImage() { return currentDockerImage; } public void setCurrentDockerImage(String currentDockerImage) { this.currentDockerImage = currentDockerImage; } public String getParentHostname() { return parentHostname; } public void setParentHostname(String parentHostname) { this.parentHostname = parentHostname; } public Boolean getWantToRetire() { return wantToRetire; } public Boolean getWantToDeprovision() { return wantToDeprovision; } public void setWantToRetire(Boolean wantToRetire) { this.wantToRetire = wantToRetire; } public void setWantToDeprovision(Boolean wantToDeprovision) { this.wantToDeprovision = wantToDeprovision; } public Double getMinDiskAvailableGb() { return minDiskAvailableGb; } public void setMinDiskAvailableGb(Double minDiskAvailableGb) { this.minDiskAvailableGb = minDiskAvailableGb; } public Double getMinMainMemoryAvailableGb() { return minMainMemoryAvailableGb; } public void setMinMainMemoryAvailableGb(Double minMainMemoryAvailableGb) { this.minMainMemoryAvailableGb = minMainMemoryAvailableGb; } public Integer getCost() { return cost; } public void setCost(Integer cost) { this.cost = cost; } public Double getMinCpuCores() { return minCpuCores; } public void setMinCpuCores(Double minCpuCores) { this.minCpuCores = minCpuCores; } public String getDescription() { return description; } public void setDescription(String description) { this.description = description; } public NodeHistory[] getHistory() { return history; } public void setHistory(NodeHistory[] history) { this.history = history; } public Boolean getAllowedToBeDown() { return allowedToBeDown; } public String getCurrentOsVersion() { return currentOsVersion; } public void setCurrentOsVersion(String currentOsVersion) { this.currentOsVersion = currentOsVersion; } public String getWantedOsVersion() { return wantedOsVersion; } public void setWantedOsVersion(String wantedOsVersion) { this.wantedOsVersion = wantedOsVersion; } public String getModelId() { return modelId; } public void setModelId(String modelId) { this.modelId = modelId; } @Override }
Please update `SerializationTest` to cover (de)serialization of this particular field.
private void toSlime(Node node, Cursor object) { object.setString(hostnameKey, node.hostname()); toSlime(node.ipAddresses(), object.setArray(ipAddressesKey), IP::requireAddresses); toSlime(node.ipAddressPool().asSet(), object.setArray(ipAddressPoolKey), IP::requireAddressPool); object.setString(idKey, node.id()); node.parentHostname().ifPresent(hostname -> object.setString(parentHostnameKey, hostname)); object.setString(flavorKey, node.flavor().name()); object.setLong(rebootGenerationKey, node.status().reboot().wanted()); object.setLong(currentRebootGenerationKey, node.status().reboot().current()); node.status().vespaVersion().ifPresent(version -> object.setString(vespaVersionKey, version.toString())); object.setLong(failCountKey, node.status().failCount()); node.status().hardwareFailureDescription().ifPresent(failure -> object.setString(hardwareFailureKey, failure)); object.setBool(wantToRetireKey, node.status().wantToRetire()); object.setBool(wantToDeprovisionKey, node.status().wantToDeprovision()); node.allocation().ifPresent(allocation -> toSlime(allocation, object.setObject(instanceKey))); toSlime(node.history(), object.setArray(historyKey)); object.setString(nodeTypeKey, toString(node.type())); node.status().hardwareDivergence().ifPresent(hardwareDivergence -> object.setString(hardwareDivergenceKey, hardwareDivergence)); node.status().osVersion().ifPresent(version -> object.setString(osVersionKey, version.toString())); node.status().firmwareVerifiedAt().ifPresent(instant -> object.setLong(firmwareCheckKey, instant.toEpochMilli())); node.reports().toSlime(object, reportsKey); node.modelId().ifPresent(modelId -> object.setString(modelIdKey, modelId)); }
node.modelId().ifPresent(modelId -> object.setString(modelIdKey, modelId));
private void toSlime(Node node, Cursor object) { object.setString(hostnameKey, node.hostname()); toSlime(node.ipAddresses(), object.setArray(ipAddressesKey), IP::requireAddresses); toSlime(node.ipAddressPool().asSet(), object.setArray(ipAddressPoolKey), IP::requireAddressPool); object.setString(idKey, node.id()); node.parentHostname().ifPresent(hostname -> object.setString(parentHostnameKey, hostname)); object.setString(flavorKey, node.flavor().name()); object.setLong(rebootGenerationKey, node.status().reboot().wanted()); object.setLong(currentRebootGenerationKey, node.status().reboot().current()); node.status().vespaVersion().ifPresent(version -> object.setString(vespaVersionKey, version.toString())); object.setLong(failCountKey, node.status().failCount()); node.status().hardwareFailureDescription().ifPresent(failure -> object.setString(hardwareFailureKey, failure)); object.setBool(wantToRetireKey, node.status().wantToRetire()); object.setBool(wantToDeprovisionKey, node.status().wantToDeprovision()); node.allocation().ifPresent(allocation -> toSlime(allocation, object.setObject(instanceKey))); toSlime(node.history(), object.setArray(historyKey)); object.setString(nodeTypeKey, toString(node.type())); node.status().hardwareDivergence().ifPresent(hardwareDivergence -> object.setString(hardwareDivergenceKey, hardwareDivergence)); node.status().osVersion().ifPresent(version -> object.setString(osVersionKey, version.toString())); node.status().firmwareVerifiedAt().ifPresent(instant -> object.setLong(firmwareCheckKey, instant.toEpochMilli())); node.reports().toSlime(object, reportsKey); node.modelId().ifPresent(modelId -> object.setString(modelIdKey, modelId)); }
class NodeSerializer { /** The configured node flavors */ private final NodeFlavors flavors; private static final String hostnameKey = "hostname"; private static final String ipAddressesKey = "ipAddresses"; private static final String ipAddressPoolKey = "additionalIpAddresses"; private static final String idKey = "openStackId"; private static final String parentHostnameKey = "parentHostname"; private static final String historyKey = "history"; private static final String instanceKey = "instance"; private static final String rebootGenerationKey = "rebootGeneration"; private static final String currentRebootGenerationKey = "currentRebootGeneration"; private static final String vespaVersionKey = "vespaVersion"; private static final String failCountKey = "failCount"; private static final String hardwareFailureKey = "hardwareFailure"; private static final String nodeTypeKey = "type"; private static final String wantToRetireKey = "wantToRetire"; private static final String wantToDeprovisionKey = "wantToDeprovision"; private static final String hardwareDivergenceKey = "hardwareDivergence"; private static final String osVersionKey = "osVersion"; private static final String firmwareCheckKey = "firmwareCheck"; private static final String reportsKey = "reports"; private static final String modelIdKey = "modelId"; private static final String flavorKey = "flavor"; private static final String tenantIdKey = "tenantId"; private static final String applicationIdKey = "applicationId"; private static final String instanceIdKey = "instanceId"; private static final String serviceIdKey = "serviceId"; private static final String restartGenerationKey = "restartGeneration"; private static final String currentRestartGenerationKey = "currentRestartGeneration"; private static final String removableKey = "removable"; private static final String wantedVespaVersionKey = "wantedVespaVersion"; private static final String historyEventTypeKey = "type"; private static final String atKey = "at"; private static final String agentKey = "agent"; public NodeSerializer(NodeFlavors flavors) { this.flavors = flavors; } public byte[] toJson(Node node) { try { Slime slime = new Slime(); toSlime(node, slime.setObject()); return SlimeUtils.toJsonBytes(slime); } catch (IOException e) { throw new RuntimeException("Serialization of " + node + " to json failed", e); } } private void toSlime(Allocation allocation, Cursor object) { object.setString(tenantIdKey, allocation.owner().tenant().value()); object.setString(applicationIdKey, allocation.owner().application().value()); object.setString(instanceIdKey, allocation.owner().instance().value()); object.setString(serviceIdKey, allocation.membership().stringValue()); object.setLong(restartGenerationKey, allocation.restartGeneration().wanted()); object.setLong(currentRestartGenerationKey, allocation.restartGeneration().current()); object.setBool(removableKey, allocation.isRemovable()); object.setString(wantedVespaVersionKey, allocation.membership().cluster().vespaVersion().toString()); } private void toSlime(History history, Cursor array) { for (History.Event event : history.events()) toSlime(event, array.addObject()); } private void toSlime(History.Event event, Cursor object) { object.setString(historyEventTypeKey, toString(event.type())); object.setLong(atKey, event.at().toEpochMilli()); object.setString(agentKey, toString(event.agent())); } private void toSlime(Set<String> ipAddresses, Cursor array, UnaryOperator<Set<String>> validator) { validator.apply(ipAddresses).stream().sorted(IP.naturalOrder).forEach(array::addString); } public Node fromJson(Node.State state, byte[] data) { return nodeFromSlime(state, SlimeUtils.jsonToSlime(data).get()); } private Node nodeFromSlime(Node.State state, Inspector object) { return new Node(object.field(idKey).asString(), ipAddressesFromSlime(object, ipAddressesKey), ipAddressesFromSlime(object, ipAddressPoolKey), object.field(hostnameKey).asString(), parentHostnameFromSlime(object), flavorFromSlime(object), statusFromSlime(object), state, allocationFromSlime(object.field(instanceKey)), historyFromSlime(object.field(historyKey)), nodeTypeFromString(object.field(nodeTypeKey).asString()), Reports.fromSlime(object.field(reportsKey)), modelIdFromSlime(object)); } private Status statusFromSlime(Inspector object) { return new Status(generationFromSlime(object, rebootGenerationKey, currentRebootGenerationKey), versionFromSlime(object.field(vespaVersionKey)), (int)object.field(failCountKey).asLong(), hardwareFailureDescriptionFromSlime(object), object.field(wantToRetireKey).asBool(), object.field(wantToDeprovisionKey).asBool(), removeQuotedNulls(hardwareDivergenceFromSlime(object)), versionFromSlime(object.field(osVersionKey)), instantFromSlime(object.field(firmwareCheckKey))); } private Flavor flavorFromSlime(Inspector object) { return flavors.getFlavorOrThrow(object.field(flavorKey).asString()); } private Optional<Allocation> allocationFromSlime(Inspector object) { if ( ! object.valid()) return Optional.empty(); return Optional.of(new Allocation(applicationIdFromSlime(object), clusterMembershipFromSlime(object), generationFromSlime(object, restartGenerationKey, currentRestartGenerationKey), object.field(removableKey).asBool())); } private ApplicationId applicationIdFromSlime(Inspector object) { return ApplicationId.from(TenantName.from(object.field(tenantIdKey).asString()), ApplicationName.from(object.field(applicationIdKey).asString()), InstanceName.from(object.field(instanceIdKey).asString())); } private History historyFromSlime(Inspector array) { List<History.Event> events = new ArrayList<>(); array.traverse((ArrayTraverser) (int i, Inspector item) -> { History.Event event = eventFromSlime(item); if (event != null) events.add(event); }); return new History(events); } private History.Event eventFromSlime(Inspector object) { History.Event.Type type = eventTypeFromString(object.field(historyEventTypeKey).asString()); if (type == null) return null; Instant at = Instant.ofEpochMilli(object.field(atKey).asLong()); Agent agent = eventAgentFromSlime(object.field(agentKey)); return new History.Event(type, agent, at); } private Generation generationFromSlime(Inspector object, String wantedField, String currentField) { Inspector current = object.field(currentField); return new Generation(object.field(wantedField).asLong(), current.asLong()); } private ClusterMembership clusterMembershipFromSlime(Inspector object) { return ClusterMembership.from(object.field(serviceIdKey).asString(), versionFromSlime(object.field(wantedVespaVersionKey)).get()); } private Optional<Version> versionFromSlime(Inspector object) { if ( ! object.valid()) return Optional.empty(); return Optional.of(Version.fromString(object.asString())); } private Optional<Instant> instantFromSlime(Inspector object) { if ( ! object.valid()) return Optional.empty(); return Optional.of(Instant.ofEpochMilli(object.asLong())); } private Optional<String> parentHostnameFromSlime(Inspector object) { if (object.field(parentHostnameKey).valid()) return Optional.of(object.field(parentHostnameKey).asString()); else return Optional.empty(); } private Optional<String> hardwareDivergenceFromSlime(Inspector object) { if (object.field(hardwareDivergenceKey).valid()) { return Optional.of(object.field(hardwareDivergenceKey).asString()); } return Optional.empty(); } private Optional<String> removeQuotedNulls(Optional<String> value) { return value.filter(v -> !v.equals("null")); } private Set<String> ipAddressesFromSlime(Inspector object, String key) { ImmutableSet.Builder<String> ipAddresses = ImmutableSet.builder(); object.field(key).traverse((ArrayTraverser) (i, item) -> ipAddresses.add(item.asString())); return ipAddresses.build(); } private Optional<String> hardwareFailureDescriptionFromSlime(Inspector object) { if (object.field(hardwareFailureKey).valid()) { return Optional.of(object.field(hardwareFailureKey).asString()); } return Optional.empty(); } private Optional<String> modelIdFromSlime(Inspector object) { if (object.field(modelIdKey).valid()) { return Optional.of(object.field(modelIdKey).asString()); } return Optional.empty(); } /** Returns the event type, or null if this event type should be ignored */ private History.Event.Type eventTypeFromString(String eventTypeString) { switch (eventTypeString) { case "provisioned" : return History.Event.Type.provisioned; case "readied" : return History.Event.Type.readied; case "reserved" : return History.Event.Type.reserved; case "activated" : return History.Event.Type.activated; case "wantToRetire": return History.Event.Type.wantToRetire; case "retired" : return History.Event.Type.retired; case "deactivated" : return History.Event.Type.deactivated; case "parked" : return History.Event.Type.parked; case "failed" : return History.Event.Type.failed; case "deallocated" : return History.Event.Type.deallocated; case "down" : return History.Event.Type.down; case "requested" : return History.Event.Type.requested; case "rebooted" : return History.Event.Type.rebooted; } throw new IllegalArgumentException("Unknown node event type '" + eventTypeString + "'"); } private String toString(History.Event.Type nodeEventType) { switch (nodeEventType) { case provisioned : return "provisioned"; case readied : return "readied"; case reserved : return "reserved"; case activated : return "activated"; case wantToRetire: return "wantToRetire"; case retired : return "retired"; case deactivated : return "deactivated"; case parked : return "parked"; case failed : return "failed"; case deallocated : return "deallocated"; case down : return "down"; case requested: return "requested"; case rebooted: return "rebooted"; } throw new IllegalArgumentException("Serialized form of '" + nodeEventType + "' not defined"); } private Agent eventAgentFromSlime(Inspector eventAgentField) { if ( ! eventAgentField.valid()) return Agent.system; switch (eventAgentField.asString()) { case "application" : return Agent.application; case "system" : return Agent.system; case "operator" : return Agent.operator; case "NodeRetirer" : return Agent.NodeRetirer; } throw new IllegalArgumentException("Unknown node event agent '" + eventAgentField.asString() + "'"); } private String toString(Agent agent) { switch (agent) { case application : return "application"; case system : return "system"; case operator : return "operator"; case NodeRetirer : return "NodeRetirer"; } throw new IllegalArgumentException("Serialized form of '" + agent + "' not defined"); } static NodeType nodeTypeFromString(String typeString) { switch (typeString) { case "tenant": return NodeType.tenant; case "host": return NodeType.host; case "proxy": return NodeType.proxy; case "proxyhost": return NodeType.proxyhost; case "config": return NodeType.config; case "confighost": return NodeType.confighost; case "controller": return NodeType.controller; case "controllerhost": return NodeType.controllerhost; default : throw new IllegalArgumentException("Unknown node type '" + typeString + "'"); } } static String toString(NodeType type) { switch (type) { case tenant: return "tenant"; case host: return "host"; case proxy: return "proxy"; case proxyhost: return "proxyhost"; case config: return "config"; case confighost: return "confighost"; case controller: return "controller"; case controllerhost: return "controllerhost"; } throw new IllegalArgumentException("Serialized form of '" + type + "' not defined"); } }
class NodeSerializer { /** The configured node flavors */ private final NodeFlavors flavors; private static final String hostnameKey = "hostname"; private static final String ipAddressesKey = "ipAddresses"; private static final String ipAddressPoolKey = "additionalIpAddresses"; private static final String idKey = "openStackId"; private static final String parentHostnameKey = "parentHostname"; private static final String historyKey = "history"; private static final String instanceKey = "instance"; private static final String rebootGenerationKey = "rebootGeneration"; private static final String currentRebootGenerationKey = "currentRebootGeneration"; private static final String vespaVersionKey = "vespaVersion"; private static final String failCountKey = "failCount"; private static final String hardwareFailureKey = "hardwareFailure"; private static final String nodeTypeKey = "type"; private static final String wantToRetireKey = "wantToRetire"; private static final String wantToDeprovisionKey = "wantToDeprovision"; private static final String hardwareDivergenceKey = "hardwareDivergence"; private static final String osVersionKey = "osVersion"; private static final String firmwareCheckKey = "firmwareCheck"; private static final String reportsKey = "reports"; private static final String modelIdKey = "modelId"; private static final String flavorKey = "flavor"; private static final String tenantIdKey = "tenantId"; private static final String applicationIdKey = "applicationId"; private static final String instanceIdKey = "instanceId"; private static final String serviceIdKey = "serviceId"; private static final String restartGenerationKey = "restartGeneration"; private static final String currentRestartGenerationKey = "currentRestartGeneration"; private static final String removableKey = "removable"; private static final String wantedVespaVersionKey = "wantedVespaVersion"; private static final String historyEventTypeKey = "type"; private static final String atKey = "at"; private static final String agentKey = "agent"; public NodeSerializer(NodeFlavors flavors) { this.flavors = flavors; } public byte[] toJson(Node node) { try { Slime slime = new Slime(); toSlime(node, slime.setObject()); return SlimeUtils.toJsonBytes(slime); } catch (IOException e) { throw new RuntimeException("Serialization of " + node + " to json failed", e); } } private void toSlime(Allocation allocation, Cursor object) { object.setString(tenantIdKey, allocation.owner().tenant().value()); object.setString(applicationIdKey, allocation.owner().application().value()); object.setString(instanceIdKey, allocation.owner().instance().value()); object.setString(serviceIdKey, allocation.membership().stringValue()); object.setLong(restartGenerationKey, allocation.restartGeneration().wanted()); object.setLong(currentRestartGenerationKey, allocation.restartGeneration().current()); object.setBool(removableKey, allocation.isRemovable()); object.setString(wantedVespaVersionKey, allocation.membership().cluster().vespaVersion().toString()); } private void toSlime(History history, Cursor array) { for (History.Event event : history.events()) toSlime(event, array.addObject()); } private void toSlime(History.Event event, Cursor object) { object.setString(historyEventTypeKey, toString(event.type())); object.setLong(atKey, event.at().toEpochMilli()); object.setString(agentKey, toString(event.agent())); } private void toSlime(Set<String> ipAddresses, Cursor array, UnaryOperator<Set<String>> validator) { validator.apply(ipAddresses).stream().sorted(IP.naturalOrder).forEach(array::addString); } public Node fromJson(Node.State state, byte[] data) { return nodeFromSlime(state, SlimeUtils.jsonToSlime(data).get()); } private Node nodeFromSlime(Node.State state, Inspector object) { return new Node(object.field(idKey).asString(), ipAddressesFromSlime(object, ipAddressesKey), ipAddressesFromSlime(object, ipAddressPoolKey), object.field(hostnameKey).asString(), parentHostnameFromSlime(object), flavorFromSlime(object), statusFromSlime(object), state, allocationFromSlime(object.field(instanceKey)), historyFromSlime(object.field(historyKey)), nodeTypeFromString(object.field(nodeTypeKey).asString()), Reports.fromSlime(object.field(reportsKey)), modelIdFromSlime(object)); } private Status statusFromSlime(Inspector object) { return new Status(generationFromSlime(object, rebootGenerationKey, currentRebootGenerationKey), versionFromSlime(object.field(vespaVersionKey)), (int)object.field(failCountKey).asLong(), hardwareFailureDescriptionFromSlime(object), object.field(wantToRetireKey).asBool(), object.field(wantToDeprovisionKey).asBool(), removeQuotedNulls(hardwareDivergenceFromSlime(object)), versionFromSlime(object.field(osVersionKey)), instantFromSlime(object.field(firmwareCheckKey))); } private Flavor flavorFromSlime(Inspector object) { return flavors.getFlavorOrThrow(object.field(flavorKey).asString()); } private Optional<Allocation> allocationFromSlime(Inspector object) { if ( ! object.valid()) return Optional.empty(); return Optional.of(new Allocation(applicationIdFromSlime(object), clusterMembershipFromSlime(object), generationFromSlime(object, restartGenerationKey, currentRestartGenerationKey), object.field(removableKey).asBool())); } private ApplicationId applicationIdFromSlime(Inspector object) { return ApplicationId.from(TenantName.from(object.field(tenantIdKey).asString()), ApplicationName.from(object.field(applicationIdKey).asString()), InstanceName.from(object.field(instanceIdKey).asString())); } private History historyFromSlime(Inspector array) { List<History.Event> events = new ArrayList<>(); array.traverse((ArrayTraverser) (int i, Inspector item) -> { History.Event event = eventFromSlime(item); if (event != null) events.add(event); }); return new History(events); } private History.Event eventFromSlime(Inspector object) { History.Event.Type type = eventTypeFromString(object.field(historyEventTypeKey).asString()); if (type == null) return null; Instant at = Instant.ofEpochMilli(object.field(atKey).asLong()); Agent agent = eventAgentFromSlime(object.field(agentKey)); return new History.Event(type, agent, at); } private Generation generationFromSlime(Inspector object, String wantedField, String currentField) { Inspector current = object.field(currentField); return new Generation(object.field(wantedField).asLong(), current.asLong()); } private ClusterMembership clusterMembershipFromSlime(Inspector object) { return ClusterMembership.from(object.field(serviceIdKey).asString(), versionFromSlime(object.field(wantedVespaVersionKey)).get()); } private Optional<Version> versionFromSlime(Inspector object) { if ( ! object.valid()) return Optional.empty(); return Optional.of(Version.fromString(object.asString())); } private Optional<Instant> instantFromSlime(Inspector object) { if ( ! object.valid()) return Optional.empty(); return Optional.of(Instant.ofEpochMilli(object.asLong())); } private Optional<String> parentHostnameFromSlime(Inspector object) { if (object.field(parentHostnameKey).valid()) return Optional.of(object.field(parentHostnameKey).asString()); else return Optional.empty(); } private Optional<String> hardwareDivergenceFromSlime(Inspector object) { if (object.field(hardwareDivergenceKey).valid()) { return Optional.of(object.field(hardwareDivergenceKey).asString()); } return Optional.empty(); } private Optional<String> removeQuotedNulls(Optional<String> value) { return value.filter(v -> !v.equals("null")); } private Set<String> ipAddressesFromSlime(Inspector object, String key) { ImmutableSet.Builder<String> ipAddresses = ImmutableSet.builder(); object.field(key).traverse((ArrayTraverser) (i, item) -> ipAddresses.add(item.asString())); return ipAddresses.build(); } private Optional<String> hardwareFailureDescriptionFromSlime(Inspector object) { if (object.field(hardwareFailureKey).valid()) { return Optional.of(object.field(hardwareFailureKey).asString()); } return Optional.empty(); } private Optional<String> modelIdFromSlime(Inspector object) { if (object.field(modelIdKey).valid()) { return Optional.of(object.field(modelIdKey).asString()); } return Optional.empty(); } /** Returns the event type, or null if this event type should be ignored */ private History.Event.Type eventTypeFromString(String eventTypeString) { switch (eventTypeString) { case "provisioned" : return History.Event.Type.provisioned; case "readied" : return History.Event.Type.readied; case "reserved" : return History.Event.Type.reserved; case "activated" : return History.Event.Type.activated; case "wantToRetire": return History.Event.Type.wantToRetire; case "retired" : return History.Event.Type.retired; case "deactivated" : return History.Event.Type.deactivated; case "parked" : return History.Event.Type.parked; case "failed" : return History.Event.Type.failed; case "deallocated" : return History.Event.Type.deallocated; case "down" : return History.Event.Type.down; case "requested" : return History.Event.Type.requested; case "rebooted" : return History.Event.Type.rebooted; } throw new IllegalArgumentException("Unknown node event type '" + eventTypeString + "'"); } private String toString(History.Event.Type nodeEventType) { switch (nodeEventType) { case provisioned : return "provisioned"; case readied : return "readied"; case reserved : return "reserved"; case activated : return "activated"; case wantToRetire: return "wantToRetire"; case retired : return "retired"; case deactivated : return "deactivated"; case parked : return "parked"; case failed : return "failed"; case deallocated : return "deallocated"; case down : return "down"; case requested: return "requested"; case rebooted: return "rebooted"; } throw new IllegalArgumentException("Serialized form of '" + nodeEventType + "' not defined"); } private Agent eventAgentFromSlime(Inspector eventAgentField) { if ( ! eventAgentField.valid()) return Agent.system; switch (eventAgentField.asString()) { case "application" : return Agent.application; case "system" : return Agent.system; case "operator" : return Agent.operator; case "NodeRetirer" : return Agent.NodeRetirer; } throw new IllegalArgumentException("Unknown node event agent '" + eventAgentField.asString() + "'"); } private String toString(Agent agent) { switch (agent) { case application : return "application"; case system : return "system"; case operator : return "operator"; case NodeRetirer : return "NodeRetirer"; } throw new IllegalArgumentException("Serialized form of '" + agent + "' not defined"); } static NodeType nodeTypeFromString(String typeString) { switch (typeString) { case "tenant": return NodeType.tenant; case "host": return NodeType.host; case "proxy": return NodeType.proxy; case "proxyhost": return NodeType.proxyhost; case "config": return NodeType.config; case "confighost": return NodeType.confighost; case "controller": return NodeType.controller; case "controllerhost": return NodeType.controllerhost; default : throw new IllegalArgumentException("Unknown node type '" + typeString + "'"); } } static String toString(NodeType type) { switch (type) { case tenant: return "tenant"; case host: return "host"; case proxy: return "proxy"; case proxyhost: return "proxyhost"; case config: return "config"; case confighost: return "confighost"; case controller: return "controller"; case controllerhost: return "controllerhost"; } throw new IllegalArgumentException("Serialized form of '" + type + "' not defined"); } }
I wouldn't expect a cache to always be fully populated. Pre-populating it would cause all historic as well as all relevant data to be read, since there is no GC. `getValidCache()` is called only once, with potentially _many_ `computeIfAbsent(...)`s, in the `getHostStatusByApplication`.
public HostStatus getHostStatus(ApplicationInstanceReference applicationInstanceReference, HostName hostName) { return getValidCache().computeIfAbsent(applicationInstanceReference, this::hostsDownFor) .contains(hostName) ? HostStatus.ALLOWED_TO_BE_DOWN : HostStatus.NO_REMARKS; }
return getValidCache().computeIfAbsent(applicationInstanceReference, this::hostsDownFor)
public HostStatus getHostStatus(ApplicationInstanceReference applicationInstanceReference, HostName hostName) { return getValidCache().computeIfAbsent(applicationInstanceReference, this::hostsDownFor) .contains(hostName) ? HostStatus.ALLOWED_TO_BE_DOWN : HostStatus.NO_REMARKS; }
class ZookeeperStatusService implements StatusService { private static final Logger log = Logger.getLogger(ZookeeperStatusService.class.getName()); final static String HOST_STATUS_BASE_PATH = "/vespa/host-status-service"; final static String APPLICATION_STATUS_BASE_PATH = "/vespa/application-status-service"; final static String COUNTER_PATH = "/vespa/cache-counter"; private final Curator curator; private final CuratorCounter counter; /** A cache of hosts allowed to be down. Access only through {@link private final Map<ApplicationInstanceReference, Set<HostName>> hostsDown; private volatile long cacheRefreshedAt; @Inject public ZookeeperStatusService(@Component Curator curator) { this.curator = curator; this.counter = new CuratorCounter(curator, COUNTER_PATH); this.cacheRefreshedAt = counter.get(); this.hostsDown = new ConcurrentHashMap<>(); } @Override public Set<ApplicationInstanceReference> getAllSuspendedApplications() { try { Set<ApplicationInstanceReference> resultSet = new HashSet<>(); Stat stat = curator.framework().checkExists().forPath(APPLICATION_STATUS_BASE_PATH); if (stat == null) return resultSet; for (String appRefStr : curator.framework().getChildren().forPath(APPLICATION_STATUS_BASE_PATH)) { ApplicationInstanceReference appRef = OrchestratorUtil.parseAppInstanceReference(appRefStr); resultSet.add(appRef); } return resultSet; } catch (Exception e) { log.log(LogLevel.DEBUG, "Something went wrong while listing out applications in suspend.", e); throw new RuntimeException(e); } } /** * Cache is checked for freshness when this mapping is created, and may be invalidated again later * by other users of the cache. Since this function is backed by the cache, any such invalidations * will be reflected in the returned mapping; all users of the cache collaborate in repopulating it. */ @Override public Function<ApplicationInstanceReference, Set<HostName>> getSuspendedHostsByApplication() { Map<ApplicationInstanceReference, Set<HostName>> suspendedHostsByApplication = getValidCache(); return application -> suspendedHostsByApplication.computeIfAbsent(application, this::hostsDownFor); } /** * 1) locks the status service for an application instance. * 2) fails all operations in this thread when the session is lost, * since session loss might cause the lock to be lost. * Since it only fails operations in this thread, * all operations depending on a lock, including the locking itself, must be done in this thread. * Note that since it is the thread that fails, all status operations in this thread will fail * even if they're not supposed to be guarded by this lock * (i.e. the request is for another applicationInstanceReference) */ @Override public MutableStatusRegistry lockApplicationInstance_forCurrentThreadOnly( OrchestratorContext context, ApplicationInstanceReference applicationInstanceReference) { Duration duration = context.getTimeLeft(); String lockPath = applicationInstanceLock2Path(applicationInstanceReference); Lock lock = new Lock(lockPath, curator); lock.acquire(duration); try { return new ZkMutableStatusRegistry(lock, applicationInstanceReference, context.isProbe()); } catch (Throwable t) { lock.close(); throw t; } } private void setHostStatus(ApplicationInstanceReference applicationInstanceReference, HostName hostName, HostStatus status) { String path = hostAllowedDownPath(applicationInstanceReference, hostName); boolean invalidate = false; try { switch (status) { case NO_REMARKS: invalidate = deleteNode_ignoreNoNodeException(path, "Host already has state NO_REMARKS, path = " + path); break; case ALLOWED_TO_BE_DOWN: invalidate = createNode_ignoreNodeExistsException(path, "Host already has state ALLOWED_TO_BE_DOWN, path = " + path); break; default: throw new IllegalArgumentException("Unexpected status '" + status + "'."); } } catch (Exception e) { invalidate = true; throw new RuntimeException(e); } finally { if (invalidate) { counter.next(); hostsDown.remove(applicationInstanceReference); } } } private boolean deleteNode_ignoreNoNodeException(String path, String debugLogMessageIfNotExists) throws Exception { try { curator.framework().delete().forPath(path); return true; } catch (NoNodeException e) { log.log(LogLevel.DEBUG, debugLogMessageIfNotExists, e); return false; } } private boolean createNode_ignoreNodeExistsException(String path, String debugLogMessageIfExists) throws Exception { try { curator.framework().create() .creatingParentsIfNeeded() .forPath(path); return true; } catch (NodeExistsException e) { log.log(LogLevel.DEBUG, debugLogMessageIfExists, e); return false; } } @Override /** Holding an application's lock ensures the cache is up to date for that application. */ private Map<ApplicationInstanceReference, Set<HostName>> getValidCache() { long cacheGeneration = counter.get(); if (counter.get() != cacheRefreshedAt) { cacheRefreshedAt = cacheGeneration; hostsDown.clear(); } return hostsDown; } private Set<HostName> hostsDownFor(ApplicationInstanceReference application) { try { if (curator.framework().checkExists().forPath(hostsAllowedDownPath(application)) == null) return Collections.emptySet(); return curator.framework().getChildren().forPath(hostsAllowedDownPath(application)) .stream().map(HostName::new) .collect(Collectors.toUnmodifiableSet()); } catch (Exception e) { throw new RuntimeException(e); } } @Override public ApplicationInstanceStatus getApplicationInstanceStatus(ApplicationInstanceReference applicationInstanceReference) { try { Stat statOrNull = curator.framework().checkExists().forPath( applicationInstanceSuspendedPath(applicationInstanceReference)); return (statOrNull == null) ? ApplicationInstanceStatus.NO_REMARKS : ApplicationInstanceStatus.ALLOWED_TO_BE_DOWN; } catch (Exception e) { throw new RuntimeException(e); } } private static String applicationInstancePath(ApplicationInstanceReference applicationInstanceReference) { return HOST_STATUS_BASE_PATH + '/' + applicationInstanceReference.tenantId() + ":" + applicationInstanceReference.applicationInstanceId(); } private static String hostsAllowedDownPath(ApplicationInstanceReference applicationInstanceReference) { return applicationInstancePath(applicationInstanceReference) + "/hosts-allowed-down"; } private static String applicationInstanceLock2Path(ApplicationInstanceReference applicationInstanceReference) { return applicationInstancePath(applicationInstanceReference) + "/lock2"; } private String applicationInstanceSuspendedPath(ApplicationInstanceReference applicationInstanceReference) { return APPLICATION_STATUS_BASE_PATH + "/" + OrchestratorUtil.toRestApiFormat(applicationInstanceReference); } private static String hostAllowedDownPath(ApplicationInstanceReference applicationInstanceReference, HostName hostname) { return hostsAllowedDownPath(applicationInstanceReference) + '/' + hostname.s(); } private class ZkMutableStatusRegistry implements MutableStatusRegistry { private final Lock lock; private final ApplicationInstanceReference applicationInstanceReference; private final boolean probe; public ZkMutableStatusRegistry(Lock lock, ApplicationInstanceReference applicationInstanceReference, boolean probe) { this.lock = lock; this.applicationInstanceReference = applicationInstanceReference; this.probe = probe; } @Override public void setHostState(final HostName hostName, final HostStatus status) { if (probe) return; log.log(LogLevel.INFO, "Setting host " + hostName + " to status " + status); setHostStatus(applicationInstanceReference, hostName, status); } @Override public void setApplicationInstanceStatus(ApplicationInstanceStatus applicationInstanceStatus) { if (probe) return; log.log(LogLevel.INFO, "Setting app " + applicationInstanceReference.asString() + " to status " + applicationInstanceStatus); String path = applicationInstanceSuspendedPath(applicationInstanceReference); try { switch (applicationInstanceStatus) { case NO_REMARKS: deleteNode_ignoreNoNodeException(path, "Instance is already in state NO_REMARKS, path = " + path); break; case ALLOWED_TO_BE_DOWN: createNode_ignoreNodeExistsException(path, "Instance is already in state ALLOWED_TO_BE_DOWN, path = " + path); break; } } catch (Exception e) { throw new RuntimeException(e); } } @Override public void close() { try { lock.close(); } catch (RuntimeException e) { log.log(LogLevel.WARNING, "Failed to close application lock for " + ZookeeperStatusService.class.getSimpleName() + ", will ignore and continue", e); } } } }
class ZookeeperStatusService implements StatusService { private static final Logger log = Logger.getLogger(ZookeeperStatusService.class.getName()); final static String HOST_STATUS_BASE_PATH = "/vespa/host-status-service"; final static String APPLICATION_STATUS_BASE_PATH = "/vespa/application-status-service"; final static String HOST_STATUS_CACHE_COUNTER_PATH = "/vespa/host-status-service-cache-counter"; private final Curator curator; private final CuratorCounter counter; /** A cache of hosts allowed to be down. Access only through {@link private final Map<ApplicationInstanceReference, Set<HostName>> hostsDown; private volatile long cacheRefreshedAt; @Inject public ZookeeperStatusService(@Component Curator curator) { this.curator = curator; this.counter = new CuratorCounter(curator, HOST_STATUS_CACHE_COUNTER_PATH); this.cacheRefreshedAt = counter.get(); this.hostsDown = new ConcurrentHashMap<>(); } @Override public Set<ApplicationInstanceReference> getAllSuspendedApplications() { try { Set<ApplicationInstanceReference> resultSet = new HashSet<>(); Stat stat = curator.framework().checkExists().forPath(APPLICATION_STATUS_BASE_PATH); if (stat == null) return resultSet; for (String appRefStr : curator.framework().getChildren().forPath(APPLICATION_STATUS_BASE_PATH)) { ApplicationInstanceReference appRef = OrchestratorUtil.parseAppInstanceReference(appRefStr); resultSet.add(appRef); } return resultSet; } catch (Exception e) { log.log(LogLevel.DEBUG, "Something went wrong while listing out applications in suspend.", e); throw new RuntimeException(e); } } /** * Cache is checked for freshness when this mapping is created, and may be invalidated again later * by other users of the cache. Since this function is backed by the cache, any such invalidations * will be reflected in the returned mapping; all users of the cache collaborate in repopulating it. */ @Override public Function<ApplicationInstanceReference, Set<HostName>> getSuspendedHostsByApplication() { Map<ApplicationInstanceReference, Set<HostName>> suspendedHostsByApplication = getValidCache(); return application -> suspendedHostsByApplication.computeIfAbsent(application, this::hostsDownFor); } /** * 1) locks the status service for an application instance. * 2) fails all operations in this thread when the session is lost, * since session loss might cause the lock to be lost. * Since it only fails operations in this thread, * all operations depending on a lock, including the locking itself, must be done in this thread. * Note that since it is the thread that fails, all status operations in this thread will fail * even if they're not supposed to be guarded by this lock * (i.e. the request is for another applicationInstanceReference) */ @Override public MutableStatusRegistry lockApplicationInstance_forCurrentThreadOnly( OrchestratorContext context, ApplicationInstanceReference applicationInstanceReference) { Duration duration = context.getTimeLeft(); String lockPath = applicationInstanceLock2Path(applicationInstanceReference); Lock lock = new Lock(lockPath, curator); lock.acquire(duration); try { return new ZkMutableStatusRegistry(lock, applicationInstanceReference, context.isProbe()); } catch (Throwable t) { lock.close(); throw t; } } private void setHostStatus(ApplicationInstanceReference applicationInstanceReference, HostName hostName, HostStatus status) { String path = hostAllowedDownPath(applicationInstanceReference, hostName); boolean invalidate = false; try { switch (status) { case NO_REMARKS: invalidate = deleteNode_ignoreNoNodeException(path, "Host already has state NO_REMARKS, path = " + path); break; case ALLOWED_TO_BE_DOWN: invalidate = createNode_ignoreNodeExistsException(path, "Host already has state ALLOWED_TO_BE_DOWN, path = " + path); break; default: throw new IllegalArgumentException("Unexpected status '" + status + "'."); } } catch (Exception e) { invalidate = true; throw new RuntimeException(e); } finally { if (invalidate) { counter.next(); hostsDown.remove(applicationInstanceReference); } } } private boolean deleteNode_ignoreNoNodeException(String path, String debugLogMessageIfNotExists) throws Exception { try { curator.framework().delete().forPath(path); return true; } catch (NoNodeException e) { log.log(LogLevel.DEBUG, debugLogMessageIfNotExists, e); return false; } } private boolean createNode_ignoreNodeExistsException(String path, String debugLogMessageIfExists) throws Exception { try { curator.framework().create() .creatingParentsIfNeeded() .forPath(path); return true; } catch (NodeExistsException e) { log.log(LogLevel.DEBUG, debugLogMessageIfExists, e); return false; } } @Override /** Holding an application's lock ensures the cache is up to date for that application. */ private Map<ApplicationInstanceReference, Set<HostName>> getValidCache() { long cacheGeneration = counter.get(); if (counter.get() != cacheRefreshedAt) { cacheRefreshedAt = cacheGeneration; hostsDown.clear(); } return hostsDown; } private Set<HostName> hostsDownFor(ApplicationInstanceReference application) { try { if (curator.framework().checkExists().forPath(hostsAllowedDownPath(application)) == null) return Collections.emptySet(); return curator.framework().getChildren().forPath(hostsAllowedDownPath(application)) .stream().map(HostName::new) .collect(Collectors.toUnmodifiableSet()); } catch (Exception e) { throw new RuntimeException(e); } } @Override public ApplicationInstanceStatus getApplicationInstanceStatus(ApplicationInstanceReference applicationInstanceReference) { try { Stat statOrNull = curator.framework().checkExists().forPath( applicationInstanceSuspendedPath(applicationInstanceReference)); return (statOrNull == null) ? ApplicationInstanceStatus.NO_REMARKS : ApplicationInstanceStatus.ALLOWED_TO_BE_DOWN; } catch (Exception e) { throw new RuntimeException(e); } } private static String applicationInstancePath(ApplicationInstanceReference applicationInstanceReference) { return HOST_STATUS_BASE_PATH + '/' + applicationInstanceReference.tenantId() + ":" + applicationInstanceReference.applicationInstanceId(); } private static String hostsAllowedDownPath(ApplicationInstanceReference applicationInstanceReference) { return applicationInstancePath(applicationInstanceReference) + "/hosts-allowed-down"; } private static String applicationInstanceLock2Path(ApplicationInstanceReference applicationInstanceReference) { return applicationInstancePath(applicationInstanceReference) + "/lock2"; } private String applicationInstanceSuspendedPath(ApplicationInstanceReference applicationInstanceReference) { return APPLICATION_STATUS_BASE_PATH + "/" + OrchestratorUtil.toRestApiFormat(applicationInstanceReference); } private static String hostAllowedDownPath(ApplicationInstanceReference applicationInstanceReference, HostName hostname) { return hostsAllowedDownPath(applicationInstanceReference) + '/' + hostname.s(); } private class ZkMutableStatusRegistry implements MutableStatusRegistry { private final Lock lock; private final ApplicationInstanceReference applicationInstanceReference; private final boolean probe; public ZkMutableStatusRegistry(Lock lock, ApplicationInstanceReference applicationInstanceReference, boolean probe) { this.lock = lock; this.applicationInstanceReference = applicationInstanceReference; this.probe = probe; } @Override public ApplicationInstanceStatus getStatus() { return getApplicationInstanceStatus(applicationInstanceReference); } @Override public HostStatus getHostStatus(HostName hostName) { return ZookeeperStatusService.this.getHostStatus(applicationInstanceReference, hostName); } @Override public Set<HostName> getSuspendedHosts() { return getValidCache().computeIfAbsent(applicationInstanceReference, ZookeeperStatusService.this::hostsDownFor); } @Override public void setHostState(final HostName hostName, final HostStatus status) { if (probe) return; log.log(LogLevel.INFO, "Setting host " + hostName + " to status " + status); setHostStatus(applicationInstanceReference, hostName, status); } @Override public void setApplicationInstanceStatus(ApplicationInstanceStatus applicationInstanceStatus) { if (probe) return; log.log(LogLevel.INFO, "Setting app " + applicationInstanceReference.asString() + " to status " + applicationInstanceStatus); String path = applicationInstanceSuspendedPath(applicationInstanceReference); try { switch (applicationInstanceStatus) { case NO_REMARKS: deleteNode_ignoreNoNodeException(path, "Instance is already in state NO_REMARKS, path = " + path); break; case ALLOWED_TO_BE_DOWN: createNode_ignoreNodeExistsException(path, "Instance is already in state ALLOWED_TO_BE_DOWN, path = " + path); break; } } catch (Exception e) { throw new RuntimeException(e); } } @Override public void close() { try { lock.close(); } catch (RuntimeException e) { log.log(LogLevel.WARNING, "Failed to close application lock for " + ZookeeperStatusService.class.getSimpleName() + ", will ignore and continue", e); } } } }
Consider iterating over the entry set to avoid the extra lookup.
public void refreshContainersToRun(List<NodeSpec> containersToRun) { final Map<String, NodeAgentContext> nodeAgentContextsByHostname = containersToRun.stream() .collect(Collectors.toMap(NodeSpec::getHostname, nodeAgentContextFactory::create)); diff(nodeAgentWithSchedulerByHostname.keySet(), nodeAgentContextsByHostname.keySet()) .forEach(hostname -> nodeAgentWithSchedulerByHostname.remove(hostname).stop()); diff(nodeAgentContextsByHostname.keySet(), nodeAgentWithSchedulerByHostname.keySet()).forEach(hostname -> { NodeAgentWithScheduler naws = nodeAgentWithSchedulerFactory.create(nodeAgentContextsByHostname.get(hostname)); naws.start(); nodeAgentWithSchedulerByHostname.put(hostname, naws); }); Duration timeBetweenNodeAgents = spread.dividedBy(Math.max(nodeAgentContextsByHostname.size() - 1, 1)); Instant nextAgentStart = clock.instant(); for (String hostname : nodeAgentContextsByHostname.keySet()) { NodeAgentContext context = nodeAgentContextsByHostname.get(hostname); nodeAgentWithSchedulerByHostname.get(hostname).scheduleTickWith(context, nextAgentStart); nextAgentStart = nextAgentStart.plus(timeBetweenNodeAgents); } }
NodeAgentContext context = nodeAgentContextsByHostname.get(hostname);
public void refreshContainersToRun(List<NodeSpec> containersToRun) { final Map<String, NodeAgentContext> nodeAgentContextsByHostname = containersToRun.stream() .collect(Collectors.toMap(NodeSpec::getHostname, nodeAgentContextFactory::create)); diff(nodeAgentWithSchedulerByHostname.keySet(), nodeAgentContextsByHostname.keySet()) .forEach(hostname -> nodeAgentWithSchedulerByHostname.remove(hostname).stop()); diff(nodeAgentContextsByHostname.keySet(), nodeAgentWithSchedulerByHostname.keySet()).forEach(hostname -> { NodeAgentWithScheduler naws = nodeAgentWithSchedulerFactory.create(nodeAgentContextsByHostname.get(hostname)); naws.start(); nodeAgentWithSchedulerByHostname.put(hostname, naws); }); Duration timeBetweenNodeAgents = spread.dividedBy(Math.max(nodeAgentContextsByHostname.size() - 1, 1)); Instant nextAgentStart = clock.instant(); for (String hostname : nodeAgentContextsByHostname.keySet()) { NodeAgentContext context = nodeAgentContextsByHostname.get(hostname); nodeAgentWithSchedulerByHostname.get(hostname).scheduleTickWith(context, nextAgentStart); nextAgentStart = nextAgentStart.plus(timeBetweenNodeAgents); } }
class NodeAdminImpl implements NodeAdmin { private static final PrefixLogger logger = PrefixLogger.getNodeAdminLogger(NodeAdmin.class); private static final Duration NODE_AGENT_FREEZE_TIMEOUT = Duration.ofSeconds(5); private static final Duration NODE_AGENT_SPREAD = Duration.ofSeconds(3); private final ScheduledExecutorService aclScheduler = Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("aclscheduler")); private final NodeAgentWithSchedulerFactory nodeAgentWithSchedulerFactory; private final NodeAgentContextFactory nodeAgentContextFactory; private final Optional<AclMaintainer> aclMaintainer; private final Clock clock; private final Duration freezeTimeout; private final Duration spread; private boolean previousWantFrozen; private boolean isFrozen; private Instant startOfFreezeConvergence; private final Map<String, NodeAgentWithScheduler> nodeAgentWithSchedulerByHostname = new ConcurrentHashMap<>(); private final GaugeWrapper numberOfContainersInLoadImageState; private final CounterWrapper numberOfUnhandledExceptionsInNodeAgent; public NodeAdminImpl(NodeAgentFactory nodeAgentFactory, NodeAgentContextFactory nodeAgentContextFactory, Optional<AclMaintainer> aclMaintainer, MetricReceiverWrapper metricReceiver, Clock clock) { this((NodeAgentWithSchedulerFactory) nodeAgentContext -> create(clock, nodeAgentFactory, nodeAgentContext), nodeAgentContextFactory, aclMaintainer, metricReceiver, clock, NODE_AGENT_FREEZE_TIMEOUT, NODE_AGENT_SPREAD); } public NodeAdminImpl(NodeAgentFactory nodeAgentFactory, NodeAgentContextFactory nodeAgentContextFactory, Optional<AclMaintainer> aclMaintainer, MetricReceiverWrapper metricReceiver, Clock clock, Duration freezeTimeout, Duration spread) { this((NodeAgentWithSchedulerFactory) nodeAgentContext -> create(clock, nodeAgentFactory, nodeAgentContext), nodeAgentContextFactory, aclMaintainer, metricReceiver, clock, freezeTimeout, spread); } NodeAdminImpl(NodeAgentWithSchedulerFactory nodeAgentWithSchedulerFactory, NodeAgentContextFactory nodeAgentContextFactory, Optional<AclMaintainer> aclMaintainer, MetricReceiverWrapper metricReceiver, Clock clock, Duration freezeTimeout, Duration spread) { this.nodeAgentWithSchedulerFactory = nodeAgentWithSchedulerFactory; this.nodeAgentContextFactory = nodeAgentContextFactory; this.aclMaintainer = aclMaintainer; this.clock = clock; this.freezeTimeout = freezeTimeout; this.spread = spread; this.previousWantFrozen = true; this.isFrozen = true; this.startOfFreezeConvergence = clock.instant(); Dimensions dimensions = new Dimensions.Builder().add("role", "docker").build(); this.numberOfContainersInLoadImageState = metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.image.loading"); this.numberOfUnhandledExceptionsInNodeAgent = metricReceiver.declareCounter(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.unhandled_exceptions"); } @Override @Override public void updateNodeAgentMetrics() { int numberContainersWaitingImage = 0; int numberOfNewUnhandledExceptions = 0; for (NodeAgentWithScheduler nodeAgentWithScheduler : nodeAgentWithSchedulerByHostname.values()) { if (nodeAgentWithScheduler.isDownloadingImage()) numberContainersWaitingImage++; numberOfNewUnhandledExceptions += nodeAgentWithScheduler.getAndResetNumberOfUnhandledExceptions(); nodeAgentWithScheduler.updateContainerNodeMetrics(); } numberOfContainersInLoadImageState.sample(numberContainersWaitingImage); numberOfUnhandledExceptionsInNodeAgent.add(numberOfNewUnhandledExceptions); } @Override public boolean setFrozen(boolean wantFrozen) { if (wantFrozen != previousWantFrozen) { if (wantFrozen) { this.startOfFreezeConvergence = clock.instant(); } else { this.startOfFreezeConvergence = null; } previousWantFrozen = wantFrozen; } boolean allNodeAgentsConverged = nodeAgentWithSchedulerByHostname.values().parallelStream() .filter(nodeAgentScheduler -> !nodeAgentScheduler.setFrozen(wantFrozen, freezeTimeout)) .count() == 0; if (wantFrozen) { if (allNodeAgentsConverged) isFrozen = true; } else isFrozen = false; return allNodeAgentsConverged; } @Override public boolean isFrozen() { return isFrozen; } @Override public Duration subsystemFreezeDuration() { if (startOfFreezeConvergence == null) { return Duration.ofSeconds(0); } else { return Duration.between(startOfFreezeConvergence, clock.instant()); } } @Override public void stopNodeAgentServices(List<String> hostnames) { hostnames.parallelStream() .filter(nodeAgentWithSchedulerByHostname::containsKey) .map(nodeAgentWithSchedulerByHostname::get) .forEach(nodeAgent -> { nodeAgent.suspend(); nodeAgent.stopServices(); }); } @Override public void start() { aclMaintainer.ifPresent(maintainer -> { int delay = 120; aclScheduler.scheduleWithFixedDelay(() -> { if (!isFrozen()) maintainer.converge(); }, 30, delay, TimeUnit.SECONDS); }); } @Override public void stop() { aclScheduler.shutdown(); nodeAgentWithSchedulerByHostname.values().parallelStream().forEach(NodeAgent::stop); do { try { aclScheduler.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e) { logger.info("Was interrupted while waiting for metricsScheduler and aclScheduler to shutdown"); } } while (!aclScheduler.isTerminated()); } private static <T> Set<T> diff(final Set<T> minuend, final Set<T> subtrahend) { final HashSet<T> result = new HashSet<>(minuend); result.removeAll(subtrahend); return result; } static class NodeAgentWithScheduler implements NodeAgent, NodeAgentScheduler { private final NodeAgent nodeAgent; private final NodeAgentScheduler nodeAgentScheduler; private NodeAgentWithScheduler(NodeAgent nodeAgent, NodeAgentScheduler nodeAgentScheduler) { this.nodeAgent = nodeAgent; this.nodeAgentScheduler = nodeAgentScheduler; } @Override public void stopServices() { nodeAgent.stopServices(); } @Override public void suspend() { nodeAgent.suspend(); } @Override public void start() { nodeAgent.start(); } @Override public void stop() { nodeAgent.stop(); } @Override public void updateContainerNodeMetrics() { nodeAgent.updateContainerNodeMetrics(); } @Override public boolean isDownloadingImage() { return nodeAgent.isDownloadingImage(); } @Override public int getAndResetNumberOfUnhandledExceptions() { return nodeAgent.getAndResetNumberOfUnhandledExceptions(); } @Override public void scheduleTickWith(NodeAgentContext context, Instant at) { nodeAgentScheduler.scheduleTickWith(context, at); } @Override public boolean setFrozen(boolean frozen, Duration timeout) { return nodeAgentScheduler.setFrozen(frozen, timeout); } } @FunctionalInterface interface NodeAgentWithSchedulerFactory { NodeAgentWithScheduler create(NodeAgentContext context); } private static NodeAgentWithScheduler create(Clock clock, NodeAgentFactory nodeAgentFactory, NodeAgentContext context) { NodeAgentContextManager contextManager = new NodeAgentContextManager(clock, context); NodeAgent nodeAgent = nodeAgentFactory.create(contextManager); return new NodeAgentWithScheduler(nodeAgent, contextManager); } }
class NodeAdminImpl implements NodeAdmin { private static final PrefixLogger logger = PrefixLogger.getNodeAdminLogger(NodeAdmin.class); private static final Duration NODE_AGENT_FREEZE_TIMEOUT = Duration.ofSeconds(5); private static final Duration NODE_AGENT_SPREAD = Duration.ofSeconds(3); private final ScheduledExecutorService aclScheduler = Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("aclscheduler")); private final NodeAgentWithSchedulerFactory nodeAgentWithSchedulerFactory; private final NodeAgentContextFactory nodeAgentContextFactory; private final Optional<AclMaintainer> aclMaintainer; private final Clock clock; private final Duration freezeTimeout; private final Duration spread; private boolean previousWantFrozen; private boolean isFrozen; private Instant startOfFreezeConvergence; private final Map<String, NodeAgentWithScheduler> nodeAgentWithSchedulerByHostname = new ConcurrentHashMap<>(); private final GaugeWrapper numberOfContainersInLoadImageState; private final CounterWrapper numberOfUnhandledExceptionsInNodeAgent; public NodeAdminImpl(NodeAgentFactory nodeAgentFactory, NodeAgentContextFactory nodeAgentContextFactory, Optional<AclMaintainer> aclMaintainer, MetricReceiverWrapper metricReceiver, Clock clock) { this((NodeAgentWithSchedulerFactory) nodeAgentContext -> create(clock, nodeAgentFactory, nodeAgentContext), nodeAgentContextFactory, aclMaintainer, metricReceiver, clock, NODE_AGENT_FREEZE_TIMEOUT, NODE_AGENT_SPREAD); } public NodeAdminImpl(NodeAgentFactory nodeAgentFactory, NodeAgentContextFactory nodeAgentContextFactory, Optional<AclMaintainer> aclMaintainer, MetricReceiverWrapper metricReceiver, Clock clock, Duration freezeTimeout, Duration spread) { this((NodeAgentWithSchedulerFactory) nodeAgentContext -> create(clock, nodeAgentFactory, nodeAgentContext), nodeAgentContextFactory, aclMaintainer, metricReceiver, clock, freezeTimeout, spread); } NodeAdminImpl(NodeAgentWithSchedulerFactory nodeAgentWithSchedulerFactory, NodeAgentContextFactory nodeAgentContextFactory, Optional<AclMaintainer> aclMaintainer, MetricReceiverWrapper metricReceiver, Clock clock, Duration freezeTimeout, Duration spread) { this.nodeAgentWithSchedulerFactory = nodeAgentWithSchedulerFactory; this.nodeAgentContextFactory = nodeAgentContextFactory; this.aclMaintainer = aclMaintainer; this.clock = clock; this.freezeTimeout = freezeTimeout; this.spread = spread; this.previousWantFrozen = true; this.isFrozen = true; this.startOfFreezeConvergence = clock.instant(); Dimensions dimensions = new Dimensions.Builder().add("role", "docker").build(); this.numberOfContainersInLoadImageState = metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.image.loading"); this.numberOfUnhandledExceptionsInNodeAgent = metricReceiver.declareCounter(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.unhandled_exceptions"); } @Override @Override public void updateNodeAgentMetrics() { int numberContainersWaitingImage = 0; int numberOfNewUnhandledExceptions = 0; for (NodeAgentWithScheduler nodeAgentWithScheduler : nodeAgentWithSchedulerByHostname.values()) { if (nodeAgentWithScheduler.isDownloadingImage()) numberContainersWaitingImage++; numberOfNewUnhandledExceptions += nodeAgentWithScheduler.getAndResetNumberOfUnhandledExceptions(); nodeAgentWithScheduler.updateContainerNodeMetrics(); } numberOfContainersInLoadImageState.sample(numberContainersWaitingImage); numberOfUnhandledExceptionsInNodeAgent.add(numberOfNewUnhandledExceptions); } @Override public boolean setFrozen(boolean wantFrozen) { if (wantFrozen != previousWantFrozen) { if (wantFrozen) { this.startOfFreezeConvergence = clock.instant(); } else { this.startOfFreezeConvergence = null; } previousWantFrozen = wantFrozen; } boolean allNodeAgentsConverged = nodeAgentWithSchedulerByHostname.values().parallelStream() .filter(nodeAgentScheduler -> !nodeAgentScheduler.setFrozen(wantFrozen, freezeTimeout)) .count() == 0; if (wantFrozen) { if (allNodeAgentsConverged) isFrozen = true; } else isFrozen = false; return allNodeAgentsConverged; } @Override public boolean isFrozen() { return isFrozen; } @Override public Duration subsystemFreezeDuration() { if (startOfFreezeConvergence == null) { return Duration.ofSeconds(0); } else { return Duration.between(startOfFreezeConvergence, clock.instant()); } } @Override public void stopNodeAgentServices(List<String> hostnames) { hostnames.parallelStream() .filter(nodeAgentWithSchedulerByHostname::containsKey) .map(nodeAgentWithSchedulerByHostname::get) .forEach(nodeAgent -> { nodeAgent.suspend(); nodeAgent.stopServices(); }); } @Override public void start() { aclMaintainer.ifPresent(maintainer -> { int delay = 120; aclScheduler.scheduleWithFixedDelay(() -> { if (!isFrozen()) maintainer.converge(); }, 30, delay, TimeUnit.SECONDS); }); } @Override public void stop() { aclScheduler.shutdown(); nodeAgentWithSchedulerByHostname.values().parallelStream().forEach(NodeAgent::stop); do { try { aclScheduler.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e) { logger.info("Was interrupted while waiting for metricsScheduler and aclScheduler to shutdown"); } } while (!aclScheduler.isTerminated()); } private static <T> Set<T> diff(final Set<T> minuend, final Set<T> subtrahend) { final HashSet<T> result = new HashSet<>(minuend); result.removeAll(subtrahend); return result; } static class NodeAgentWithScheduler implements NodeAgent, NodeAgentScheduler { private final NodeAgent nodeAgent; private final NodeAgentScheduler nodeAgentScheduler; private NodeAgentWithScheduler(NodeAgent nodeAgent, NodeAgentScheduler nodeAgentScheduler) { this.nodeAgent = nodeAgent; this.nodeAgentScheduler = nodeAgentScheduler; } @Override public void stopServices() { nodeAgent.stopServices(); } @Override public void suspend() { nodeAgent.suspend(); } @Override public void start() { nodeAgent.start(); } @Override public void stop() { nodeAgent.stop(); } @Override public void updateContainerNodeMetrics() { nodeAgent.updateContainerNodeMetrics(); } @Override public boolean isDownloadingImage() { return nodeAgent.isDownloadingImage(); } @Override public int getAndResetNumberOfUnhandledExceptions() { return nodeAgent.getAndResetNumberOfUnhandledExceptions(); } @Override public void scheduleTickWith(NodeAgentContext context, Instant at) { nodeAgentScheduler.scheduleTickWith(context, at); } @Override public boolean setFrozen(boolean frozen, Duration timeout) { return nodeAgentScheduler.setFrozen(frozen, timeout); } } @FunctionalInterface interface NodeAgentWithSchedulerFactory { NodeAgentWithScheduler create(NodeAgentContext context); } private static NodeAgentWithScheduler create(Clock clock, NodeAgentFactory nodeAgentFactory, NodeAgentContext context) { NodeAgentContextManager contextManager = new NodeAgentContextManager(clock, context); NodeAgent nodeAgent = nodeAgentFactory.create(contextManager); return new NodeAgentWithScheduler(nodeAgent, contextManager); } }
Done in #8456
public void refreshContainersToRun(List<NodeSpec> containersToRun) { final Map<String, NodeAgentContext> nodeAgentContextsByHostname = containersToRun.stream() .collect(Collectors.toMap(NodeSpec::getHostname, nodeAgentContextFactory::create)); diff(nodeAgentWithSchedulerByHostname.keySet(), nodeAgentContextsByHostname.keySet()) .forEach(hostname -> nodeAgentWithSchedulerByHostname.remove(hostname).stop()); diff(nodeAgentContextsByHostname.keySet(), nodeAgentWithSchedulerByHostname.keySet()).forEach(hostname -> { NodeAgentWithScheduler naws = nodeAgentWithSchedulerFactory.create(nodeAgentContextsByHostname.get(hostname)); naws.start(); nodeAgentWithSchedulerByHostname.put(hostname, naws); }); Duration timeBetweenNodeAgents = spread.dividedBy(Math.max(nodeAgentContextsByHostname.size() - 1, 1)); Instant nextAgentStart = clock.instant(); for (String hostname : nodeAgentContextsByHostname.keySet()) { NodeAgentContext context = nodeAgentContextsByHostname.get(hostname); nodeAgentWithSchedulerByHostname.get(hostname).scheduleTickWith(context, nextAgentStart); nextAgentStart = nextAgentStart.plus(timeBetweenNodeAgents); } }
NodeAgentContext context = nodeAgentContextsByHostname.get(hostname);
public void refreshContainersToRun(List<NodeSpec> containersToRun) { final Map<String, NodeAgentContext> nodeAgentContextsByHostname = containersToRun.stream() .collect(Collectors.toMap(NodeSpec::getHostname, nodeAgentContextFactory::create)); diff(nodeAgentWithSchedulerByHostname.keySet(), nodeAgentContextsByHostname.keySet()) .forEach(hostname -> nodeAgentWithSchedulerByHostname.remove(hostname).stop()); diff(nodeAgentContextsByHostname.keySet(), nodeAgentWithSchedulerByHostname.keySet()).forEach(hostname -> { NodeAgentWithScheduler naws = nodeAgentWithSchedulerFactory.create(nodeAgentContextsByHostname.get(hostname)); naws.start(); nodeAgentWithSchedulerByHostname.put(hostname, naws); }); Duration timeBetweenNodeAgents = spread.dividedBy(Math.max(nodeAgentContextsByHostname.size() - 1, 1)); Instant nextAgentStart = clock.instant(); for (String hostname : nodeAgentContextsByHostname.keySet()) { NodeAgentContext context = nodeAgentContextsByHostname.get(hostname); nodeAgentWithSchedulerByHostname.get(hostname).scheduleTickWith(context, nextAgentStart); nextAgentStart = nextAgentStart.plus(timeBetweenNodeAgents); } }
class NodeAdminImpl implements NodeAdmin { private static final PrefixLogger logger = PrefixLogger.getNodeAdminLogger(NodeAdmin.class); private static final Duration NODE_AGENT_FREEZE_TIMEOUT = Duration.ofSeconds(5); private static final Duration NODE_AGENT_SPREAD = Duration.ofSeconds(3); private final ScheduledExecutorService aclScheduler = Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("aclscheduler")); private final NodeAgentWithSchedulerFactory nodeAgentWithSchedulerFactory; private final NodeAgentContextFactory nodeAgentContextFactory; private final Optional<AclMaintainer> aclMaintainer; private final Clock clock; private final Duration freezeTimeout; private final Duration spread; private boolean previousWantFrozen; private boolean isFrozen; private Instant startOfFreezeConvergence; private final Map<String, NodeAgentWithScheduler> nodeAgentWithSchedulerByHostname = new ConcurrentHashMap<>(); private final GaugeWrapper numberOfContainersInLoadImageState; private final CounterWrapper numberOfUnhandledExceptionsInNodeAgent; public NodeAdminImpl(NodeAgentFactory nodeAgentFactory, NodeAgentContextFactory nodeAgentContextFactory, Optional<AclMaintainer> aclMaintainer, MetricReceiverWrapper metricReceiver, Clock clock) { this((NodeAgentWithSchedulerFactory) nodeAgentContext -> create(clock, nodeAgentFactory, nodeAgentContext), nodeAgentContextFactory, aclMaintainer, metricReceiver, clock, NODE_AGENT_FREEZE_TIMEOUT, NODE_AGENT_SPREAD); } public NodeAdminImpl(NodeAgentFactory nodeAgentFactory, NodeAgentContextFactory nodeAgentContextFactory, Optional<AclMaintainer> aclMaintainer, MetricReceiverWrapper metricReceiver, Clock clock, Duration freezeTimeout, Duration spread) { this((NodeAgentWithSchedulerFactory) nodeAgentContext -> create(clock, nodeAgentFactory, nodeAgentContext), nodeAgentContextFactory, aclMaintainer, metricReceiver, clock, freezeTimeout, spread); } NodeAdminImpl(NodeAgentWithSchedulerFactory nodeAgentWithSchedulerFactory, NodeAgentContextFactory nodeAgentContextFactory, Optional<AclMaintainer> aclMaintainer, MetricReceiverWrapper metricReceiver, Clock clock, Duration freezeTimeout, Duration spread) { this.nodeAgentWithSchedulerFactory = nodeAgentWithSchedulerFactory; this.nodeAgentContextFactory = nodeAgentContextFactory; this.aclMaintainer = aclMaintainer; this.clock = clock; this.freezeTimeout = freezeTimeout; this.spread = spread; this.previousWantFrozen = true; this.isFrozen = true; this.startOfFreezeConvergence = clock.instant(); Dimensions dimensions = new Dimensions.Builder().add("role", "docker").build(); this.numberOfContainersInLoadImageState = metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.image.loading"); this.numberOfUnhandledExceptionsInNodeAgent = metricReceiver.declareCounter(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.unhandled_exceptions"); } @Override @Override public void updateNodeAgentMetrics() { int numberContainersWaitingImage = 0; int numberOfNewUnhandledExceptions = 0; for (NodeAgentWithScheduler nodeAgentWithScheduler : nodeAgentWithSchedulerByHostname.values()) { if (nodeAgentWithScheduler.isDownloadingImage()) numberContainersWaitingImage++; numberOfNewUnhandledExceptions += nodeAgentWithScheduler.getAndResetNumberOfUnhandledExceptions(); nodeAgentWithScheduler.updateContainerNodeMetrics(); } numberOfContainersInLoadImageState.sample(numberContainersWaitingImage); numberOfUnhandledExceptionsInNodeAgent.add(numberOfNewUnhandledExceptions); } @Override public boolean setFrozen(boolean wantFrozen) { if (wantFrozen != previousWantFrozen) { if (wantFrozen) { this.startOfFreezeConvergence = clock.instant(); } else { this.startOfFreezeConvergence = null; } previousWantFrozen = wantFrozen; } boolean allNodeAgentsConverged = nodeAgentWithSchedulerByHostname.values().parallelStream() .filter(nodeAgentScheduler -> !nodeAgentScheduler.setFrozen(wantFrozen, freezeTimeout)) .count() == 0; if (wantFrozen) { if (allNodeAgentsConverged) isFrozen = true; } else isFrozen = false; return allNodeAgentsConverged; } @Override public boolean isFrozen() { return isFrozen; } @Override public Duration subsystemFreezeDuration() { if (startOfFreezeConvergence == null) { return Duration.ofSeconds(0); } else { return Duration.between(startOfFreezeConvergence, clock.instant()); } } @Override public void stopNodeAgentServices(List<String> hostnames) { hostnames.parallelStream() .filter(nodeAgentWithSchedulerByHostname::containsKey) .map(nodeAgentWithSchedulerByHostname::get) .forEach(nodeAgent -> { nodeAgent.suspend(); nodeAgent.stopServices(); }); } @Override public void start() { aclMaintainer.ifPresent(maintainer -> { int delay = 120; aclScheduler.scheduleWithFixedDelay(() -> { if (!isFrozen()) maintainer.converge(); }, 30, delay, TimeUnit.SECONDS); }); } @Override public void stop() { aclScheduler.shutdown(); nodeAgentWithSchedulerByHostname.values().parallelStream().forEach(NodeAgent::stop); do { try { aclScheduler.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e) { logger.info("Was interrupted while waiting for metricsScheduler and aclScheduler to shutdown"); } } while (!aclScheduler.isTerminated()); } private static <T> Set<T> diff(final Set<T> minuend, final Set<T> subtrahend) { final HashSet<T> result = new HashSet<>(minuend); result.removeAll(subtrahend); return result; } static class NodeAgentWithScheduler implements NodeAgent, NodeAgentScheduler { private final NodeAgent nodeAgent; private final NodeAgentScheduler nodeAgentScheduler; private NodeAgentWithScheduler(NodeAgent nodeAgent, NodeAgentScheduler nodeAgentScheduler) { this.nodeAgent = nodeAgent; this.nodeAgentScheduler = nodeAgentScheduler; } @Override public void stopServices() { nodeAgent.stopServices(); } @Override public void suspend() { nodeAgent.suspend(); } @Override public void start() { nodeAgent.start(); } @Override public void stop() { nodeAgent.stop(); } @Override public void updateContainerNodeMetrics() { nodeAgent.updateContainerNodeMetrics(); } @Override public boolean isDownloadingImage() { return nodeAgent.isDownloadingImage(); } @Override public int getAndResetNumberOfUnhandledExceptions() { return nodeAgent.getAndResetNumberOfUnhandledExceptions(); } @Override public void scheduleTickWith(NodeAgentContext context, Instant at) { nodeAgentScheduler.scheduleTickWith(context, at); } @Override public boolean setFrozen(boolean frozen, Duration timeout) { return nodeAgentScheduler.setFrozen(frozen, timeout); } } @FunctionalInterface interface NodeAgentWithSchedulerFactory { NodeAgentWithScheduler create(NodeAgentContext context); } private static NodeAgentWithScheduler create(Clock clock, NodeAgentFactory nodeAgentFactory, NodeAgentContext context) { NodeAgentContextManager contextManager = new NodeAgentContextManager(clock, context); NodeAgent nodeAgent = nodeAgentFactory.create(contextManager); return new NodeAgentWithScheduler(nodeAgent, contextManager); } }
class NodeAdminImpl implements NodeAdmin { private static final PrefixLogger logger = PrefixLogger.getNodeAdminLogger(NodeAdmin.class); private static final Duration NODE_AGENT_FREEZE_TIMEOUT = Duration.ofSeconds(5); private static final Duration NODE_AGENT_SPREAD = Duration.ofSeconds(3); private final ScheduledExecutorService aclScheduler = Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("aclscheduler")); private final NodeAgentWithSchedulerFactory nodeAgentWithSchedulerFactory; private final NodeAgentContextFactory nodeAgentContextFactory; private final Optional<AclMaintainer> aclMaintainer; private final Clock clock; private final Duration freezeTimeout; private final Duration spread; private boolean previousWantFrozen; private boolean isFrozen; private Instant startOfFreezeConvergence; private final Map<String, NodeAgentWithScheduler> nodeAgentWithSchedulerByHostname = new ConcurrentHashMap<>(); private final GaugeWrapper numberOfContainersInLoadImageState; private final CounterWrapper numberOfUnhandledExceptionsInNodeAgent; public NodeAdminImpl(NodeAgentFactory nodeAgentFactory, NodeAgentContextFactory nodeAgentContextFactory, Optional<AclMaintainer> aclMaintainer, MetricReceiverWrapper metricReceiver, Clock clock) { this((NodeAgentWithSchedulerFactory) nodeAgentContext -> create(clock, nodeAgentFactory, nodeAgentContext), nodeAgentContextFactory, aclMaintainer, metricReceiver, clock, NODE_AGENT_FREEZE_TIMEOUT, NODE_AGENT_SPREAD); } public NodeAdminImpl(NodeAgentFactory nodeAgentFactory, NodeAgentContextFactory nodeAgentContextFactory, Optional<AclMaintainer> aclMaintainer, MetricReceiverWrapper metricReceiver, Clock clock, Duration freezeTimeout, Duration spread) { this((NodeAgentWithSchedulerFactory) nodeAgentContext -> create(clock, nodeAgentFactory, nodeAgentContext), nodeAgentContextFactory, aclMaintainer, metricReceiver, clock, freezeTimeout, spread); } NodeAdminImpl(NodeAgentWithSchedulerFactory nodeAgentWithSchedulerFactory, NodeAgentContextFactory nodeAgentContextFactory, Optional<AclMaintainer> aclMaintainer, MetricReceiverWrapper metricReceiver, Clock clock, Duration freezeTimeout, Duration spread) { this.nodeAgentWithSchedulerFactory = nodeAgentWithSchedulerFactory; this.nodeAgentContextFactory = nodeAgentContextFactory; this.aclMaintainer = aclMaintainer; this.clock = clock; this.freezeTimeout = freezeTimeout; this.spread = spread; this.previousWantFrozen = true; this.isFrozen = true; this.startOfFreezeConvergence = clock.instant(); Dimensions dimensions = new Dimensions.Builder().add("role", "docker").build(); this.numberOfContainersInLoadImageState = metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.image.loading"); this.numberOfUnhandledExceptionsInNodeAgent = metricReceiver.declareCounter(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.unhandled_exceptions"); } @Override @Override public void updateNodeAgentMetrics() { int numberContainersWaitingImage = 0; int numberOfNewUnhandledExceptions = 0; for (NodeAgentWithScheduler nodeAgentWithScheduler : nodeAgentWithSchedulerByHostname.values()) { if (nodeAgentWithScheduler.isDownloadingImage()) numberContainersWaitingImage++; numberOfNewUnhandledExceptions += nodeAgentWithScheduler.getAndResetNumberOfUnhandledExceptions(); nodeAgentWithScheduler.updateContainerNodeMetrics(); } numberOfContainersInLoadImageState.sample(numberContainersWaitingImage); numberOfUnhandledExceptionsInNodeAgent.add(numberOfNewUnhandledExceptions); } @Override public boolean setFrozen(boolean wantFrozen) { if (wantFrozen != previousWantFrozen) { if (wantFrozen) { this.startOfFreezeConvergence = clock.instant(); } else { this.startOfFreezeConvergence = null; } previousWantFrozen = wantFrozen; } boolean allNodeAgentsConverged = nodeAgentWithSchedulerByHostname.values().parallelStream() .filter(nodeAgentScheduler -> !nodeAgentScheduler.setFrozen(wantFrozen, freezeTimeout)) .count() == 0; if (wantFrozen) { if (allNodeAgentsConverged) isFrozen = true; } else isFrozen = false; return allNodeAgentsConverged; } @Override public boolean isFrozen() { return isFrozen; } @Override public Duration subsystemFreezeDuration() { if (startOfFreezeConvergence == null) { return Duration.ofSeconds(0); } else { return Duration.between(startOfFreezeConvergence, clock.instant()); } } @Override public void stopNodeAgentServices(List<String> hostnames) { hostnames.parallelStream() .filter(nodeAgentWithSchedulerByHostname::containsKey) .map(nodeAgentWithSchedulerByHostname::get) .forEach(nodeAgent -> { nodeAgent.suspend(); nodeAgent.stopServices(); }); } @Override public void start() { aclMaintainer.ifPresent(maintainer -> { int delay = 120; aclScheduler.scheduleWithFixedDelay(() -> { if (!isFrozen()) maintainer.converge(); }, 30, delay, TimeUnit.SECONDS); }); } @Override public void stop() { aclScheduler.shutdown(); nodeAgentWithSchedulerByHostname.values().parallelStream().forEach(NodeAgent::stop); do { try { aclScheduler.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e) { logger.info("Was interrupted while waiting for metricsScheduler and aclScheduler to shutdown"); } } while (!aclScheduler.isTerminated()); } private static <T> Set<T> diff(final Set<T> minuend, final Set<T> subtrahend) { final HashSet<T> result = new HashSet<>(minuend); result.removeAll(subtrahend); return result; } static class NodeAgentWithScheduler implements NodeAgent, NodeAgentScheduler { private final NodeAgent nodeAgent; private final NodeAgentScheduler nodeAgentScheduler; private NodeAgentWithScheduler(NodeAgent nodeAgent, NodeAgentScheduler nodeAgentScheduler) { this.nodeAgent = nodeAgent; this.nodeAgentScheduler = nodeAgentScheduler; } @Override public void stopServices() { nodeAgent.stopServices(); } @Override public void suspend() { nodeAgent.suspend(); } @Override public void start() { nodeAgent.start(); } @Override public void stop() { nodeAgent.stop(); } @Override public void updateContainerNodeMetrics() { nodeAgent.updateContainerNodeMetrics(); } @Override public boolean isDownloadingImage() { return nodeAgent.isDownloadingImage(); } @Override public int getAndResetNumberOfUnhandledExceptions() { return nodeAgent.getAndResetNumberOfUnhandledExceptions(); } @Override public void scheduleTickWith(NodeAgentContext context, Instant at) { nodeAgentScheduler.scheduleTickWith(context, at); } @Override public boolean setFrozen(boolean frozen, Duration timeout) { return nodeAgentScheduler.setFrozen(frozen, timeout); } } @FunctionalInterface interface NodeAgentWithSchedulerFactory { NodeAgentWithScheduler create(NodeAgentContext context); } private static NodeAgentWithScheduler create(Clock clock, NodeAgentFactory nodeAgentFactory, NodeAgentContext context) { NodeAgentContextManager contextManager = new NodeAgentContextManager(clock, context); NodeAgent nodeAgent = nodeAgentFactory.create(contextManager); return new NodeAgentWithScheduler(nodeAgent, contextManager); } }
Asside from a bug in node-repo, the only way this can happen is if a container was de-allocated between the `getNodes()` call and the `getAcls()` call. In that case, using an empty `Acl` is fine, since that container will be removed on next tick anyway. The reverse case (container allocated between those two calls), the container will be created on the next tick, no different that the current behavior.
void adjustNodeAgentsToRunFromNodeRepository() { try { Map<String, NodeSpec> nodeSpecByHostname = nodeRepository.getNodes(hostHostname).stream() .collect(Collectors.toMap(NodeSpec::getHostname, Function.identity())); Map<String, Acl> aclByHostname = Optional.of(cachedAclSupplier.get()) .filter(acls -> acls.keySet().containsAll(nodeSpecByHostname.keySet())) .orElseGet(cachedAclSupplier::invalidateAndGet); Set<NodeAgentContext> nodeAgentContexts = nodeSpecByHostname.keySet().stream() .map(hostname -> nodeAgentContextFactory.create( nodeSpecByHostname.get(hostname), aclByHostname.getOrDefault(hostname, Acl.EMPTY))) .collect(Collectors.toSet()); nodeAdmin.refreshContainersToRun(nodeAgentContexts); } catch (RuntimeException e) { log.log(LogLevel.WARNING, "Failed to update which containers should be running", e); } }
aclByHostname.getOrDefault(hostname, Acl.EMPTY)))
void adjustNodeAgentsToRunFromNodeRepository() { try { Map<String, NodeSpec> nodeSpecByHostname = nodeRepository.getNodes(hostHostname).stream() .collect(Collectors.toMap(NodeSpec::getHostname, Function.identity())); Map<String, Acl> aclByHostname = Optional.of(cachedAclSupplier.get()) .filter(acls -> acls.keySet().containsAll(nodeSpecByHostname.keySet())) .orElseGet(cachedAclSupplier::invalidateAndGet); Set<NodeAgentContext> nodeAgentContexts = nodeSpecByHostname.keySet().stream() .map(hostname -> nodeAgentContextFactory.create( nodeSpecByHostname.get(hostname), aclByHostname.getOrDefault(hostname, Acl.EMPTY))) .collect(Collectors.toSet()); nodeAdmin.refreshContainersToRun(nodeAgentContexts); } catch (RuntimeException e) { log.log(LogLevel.WARNING, "Failed to update which containers should be running", e); } }
class NodeAdminStateUpdater { private static final Logger log = Logger.getLogger(NodeAdminStateUpdater.class.getName()); private static final Duration FREEZE_CONVERGENCE_TIMEOUT = Duration.ofMinutes(5); private final ScheduledExecutorService metricsScheduler = Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("metricsscheduler")); private final CachedSupplier<Map<String, Acl>> cachedAclSupplier; private final NodeAgentContextFactory nodeAgentContextFactory; private final NodeRepository nodeRepository; private final Orchestrator orchestrator; private final NodeAdmin nodeAdmin; private final String hostHostname; public enum State { TRANSITIONING, RESUMED, SUSPENDED_NODE_ADMIN, SUSPENDED } private volatile State currentState = SUSPENDED_NODE_ADMIN; public NodeAdminStateUpdater( NodeAgentContextFactory nodeAgentContextFactory, NodeRepository nodeRepository, Orchestrator orchestrator, NodeAdmin nodeAdmin, HostName hostHostname, Clock clock) { this.nodeAgentContextFactory = nodeAgentContextFactory; this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.nodeAdmin = nodeAdmin; this.hostHostname = hostHostname.value(); this.cachedAclSupplier = new CachedSupplier<>(clock, Duration.ofSeconds(115), () -> nodeRepository.getAcls(this.hostHostname)); } public void start() { nodeAdmin.start(); EnumSet<State> suspendedStates = EnumSet.of(SUSPENDED_NODE_ADMIN, SUSPENDED); metricsScheduler.scheduleAtFixedRate(() -> { try { if (suspendedStates.contains(currentState)) return; nodeAdmin.updateNodeAgentMetrics(); } catch (Throwable e) { log.log(Level.WARNING, "Metric fetcher scheduler failed", e); } }, 10, 55, TimeUnit.SECONDS); } public void stop() { metricsScheduler.shutdown(); nodeAdmin.stop(); do { try { metricsScheduler.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e) { log.info("Was interrupted while waiting for metricsScheduler and shutdown"); } } while (!metricsScheduler.isTerminated()); } /** * This method attempts to converge node-admin w/agents to a {@link State} * with respect to: freeze, Orchestrator, and services running. */ public void converge(State wantedState) { if (wantedState == RESUMED) { adjustNodeAgentsToRunFromNodeRepository(); } else if (currentState == TRANSITIONING && nodeAdmin.subsystemFreezeDuration().compareTo(FREEZE_CONVERGENCE_TIMEOUT) > 0) { adjustNodeAgentsToRunFromNodeRepository(); nodeAdmin.setFrozen(false); throw new ConvergenceException("Timed out trying to freeze all nodes: will force an unfrozen tick"); } if (currentState == wantedState) return; currentState = TRANSITIONING; boolean wantFrozen = wantedState != RESUMED; if (!nodeAdmin.setFrozen(wantFrozen)) { throw new ConvergenceException("NodeAdmin is not yet " + (wantFrozen ? "frozen" : "unfrozen")); } boolean hostIsActiveInNR = nodeRepository.getNode(hostHostname).getState() == Node.State.active; switch (wantedState) { case RESUMED: if (hostIsActiveInNR) orchestrator.resume(hostHostname); break; case SUSPENDED_NODE_ADMIN: if (hostIsActiveInNR) orchestrator.suspend(hostHostname); break; case SUSPENDED: List<String> nodesInActiveState = getNodesInActiveState(); List<String> nodesToSuspend = new ArrayList<>(nodesInActiveState); if (hostIsActiveInNR) nodesToSuspend.add(hostHostname); if (!nodesToSuspend.isEmpty()) { orchestrator.suspend(hostHostname, nodesToSuspend); log.info("Orchestrator allows suspension of " + nodesToSuspend); } nodeAdmin.stopNodeAgentServices(nodesInActiveState); break; default: throw new IllegalStateException("Unknown wanted state " + wantedState); } log.info("State changed from " + currentState + " to " + wantedState); currentState = wantedState; } private List<String> getNodesInActiveState() { return nodeRepository.getNodes(hostHostname) .stream() .filter(node -> node.getState() == Node.State.active) .map(NodeSpec::getHostname) .collect(Collectors.toList()); } private static class CachedSupplier<T> implements Supplier<T> { private final Clock clock; private final Duration expiration; private final Supplier<T> supplier; private Instant refreshAt; private T cachedValue; private CachedSupplier(Clock clock, Duration expiration, Supplier<T> supplier) { this.clock = clock; this.expiration = expiration; this.supplier = supplier; this.refreshAt = Instant.MIN; } @Override public T get() { if (! clock.instant().isBefore(refreshAt)) { cachedValue = supplier.get(); refreshAt = clock.instant().plus(expiration); } return cachedValue; } private T invalidateAndGet() { refreshAt = Instant.MIN; return get(); } } }
class NodeAdminStateUpdater { private static final Logger log = Logger.getLogger(NodeAdminStateUpdater.class.getName()); private static final Duration FREEZE_CONVERGENCE_TIMEOUT = Duration.ofMinutes(5); private final ScheduledExecutorService metricsScheduler = Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("metricsscheduler")); private final CachedSupplier<Map<String, Acl>> cachedAclSupplier; private final NodeAgentContextFactory nodeAgentContextFactory; private final NodeRepository nodeRepository; private final Orchestrator orchestrator; private final NodeAdmin nodeAdmin; private final String hostHostname; public enum State { TRANSITIONING, RESUMED, SUSPENDED_NODE_ADMIN, SUSPENDED } private volatile State currentState = SUSPENDED_NODE_ADMIN; public NodeAdminStateUpdater( NodeAgentContextFactory nodeAgentContextFactory, NodeRepository nodeRepository, Orchestrator orchestrator, NodeAdmin nodeAdmin, HostName hostHostname, Clock clock) { this.nodeAgentContextFactory = nodeAgentContextFactory; this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.nodeAdmin = nodeAdmin; this.hostHostname = hostHostname.value(); this.cachedAclSupplier = new CachedSupplier<>(clock, Duration.ofSeconds(115), () -> nodeRepository.getAcls(this.hostHostname)); } public void start() { nodeAdmin.start(); EnumSet<State> suspendedStates = EnumSet.of(SUSPENDED_NODE_ADMIN, SUSPENDED); metricsScheduler.scheduleAtFixedRate(() -> { try { if (suspendedStates.contains(currentState)) return; nodeAdmin.updateNodeAgentMetrics(); } catch (Throwable e) { log.log(Level.WARNING, "Metric fetcher scheduler failed", e); } }, 10, 55, TimeUnit.SECONDS); } public void stop() { metricsScheduler.shutdown(); nodeAdmin.stop(); do { try { metricsScheduler.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e) { log.info("Was interrupted while waiting for metricsScheduler and shutdown"); } } while (!metricsScheduler.isTerminated()); } /** * This method attempts to converge node-admin w/agents to a {@link State} * with respect to: freeze, Orchestrator, and services running. */ public void converge(State wantedState) { if (wantedState == RESUMED) { adjustNodeAgentsToRunFromNodeRepository(); } else if (currentState == TRANSITIONING && nodeAdmin.subsystemFreezeDuration().compareTo(FREEZE_CONVERGENCE_TIMEOUT) > 0) { adjustNodeAgentsToRunFromNodeRepository(); nodeAdmin.setFrozen(false); throw new ConvergenceException("Timed out trying to freeze all nodes: will force an unfrozen tick"); } if (currentState == wantedState) return; currentState = TRANSITIONING; boolean wantFrozen = wantedState != RESUMED; if (!nodeAdmin.setFrozen(wantFrozen)) { throw new ConvergenceException("NodeAdmin is not yet " + (wantFrozen ? "frozen" : "unfrozen")); } boolean hostIsActiveInNR = nodeRepository.getNode(hostHostname).getState() == Node.State.active; switch (wantedState) { case RESUMED: if (hostIsActiveInNR) orchestrator.resume(hostHostname); break; case SUSPENDED_NODE_ADMIN: if (hostIsActiveInNR) orchestrator.suspend(hostHostname); break; case SUSPENDED: List<String> nodesInActiveState = getNodesInActiveState(); List<String> nodesToSuspend = new ArrayList<>(nodesInActiveState); if (hostIsActiveInNR) nodesToSuspend.add(hostHostname); if (!nodesToSuspend.isEmpty()) { orchestrator.suspend(hostHostname, nodesToSuspend); log.info("Orchestrator allows suspension of " + nodesToSuspend); } nodeAdmin.stopNodeAgentServices(nodesInActiveState); break; default: throw new IllegalStateException("Unknown wanted state " + wantedState); } log.info("State changed from " + currentState + " to " + wantedState); currentState = wantedState; } private List<String> getNodesInActiveState() { return nodeRepository.getNodes(hostHostname) .stream() .filter(node -> node.getState() == Node.State.active) .map(NodeSpec::getHostname) .collect(Collectors.toList()); } private static class CachedSupplier<T> implements Supplier<T> { private final Clock clock; private final Duration expiration; private final Supplier<T> supplier; private Instant refreshAt; private T cachedValue; private CachedSupplier(Clock clock, Duration expiration, Supplier<T> supplier) { this.clock = clock; this.expiration = expiration; this.supplier = supplier; this.refreshAt = Instant.MIN; } @Override public T get() { if (! clock.instant().isBefore(refreshAt)) { cachedValue = supplier.get(); refreshAt = clock.instant().plus(expiration); } return cachedValue; } private T invalidateAndGet() { refreshAt = Instant.MIN; return get(); } } }
Consider using `var` the next time
public void refreshContainersToRun(Set<NodeAgentContext> nodeAgentContexts) { Map<String, NodeAgentContext> nodeAgentContextsByHostname = nodeAgentContexts.stream() .collect(Collectors.toMap(nac -> nac.hostname().value(), Function.identity())); diff(nodeAgentWithSchedulerByHostname.keySet(), nodeAgentContextsByHostname.keySet()) .forEach(hostname -> nodeAgentWithSchedulerByHostname.remove(hostname).stop()); diff(nodeAgentContextsByHostname.keySet(), nodeAgentWithSchedulerByHostname.keySet()).forEach(hostname -> { NodeAgentWithScheduler naws = nodeAgentWithSchedulerFactory.create(nodeAgentContextsByHostname.get(hostname)); naws.start(); nodeAgentWithSchedulerByHostname.put(hostname, naws); }); Duration timeBetweenNodeAgents = spread.dividedBy(Math.max(nodeAgentContextsByHostname.size() - 1, 1)); Instant nextAgentStart = clock.instant(); for (Map.Entry<String, NodeAgentContext> entry : nodeAgentContextsByHostname.entrySet()) { nodeAgentWithSchedulerByHostname.get(entry.getKey()).scheduleTickWith(entry.getValue(), nextAgentStart); nextAgentStart = nextAgentStart.plus(timeBetweenNodeAgents); } }
for (Map.Entry<String, NodeAgentContext> entry : nodeAgentContextsByHostname.entrySet()) {
public void refreshContainersToRun(Set<NodeAgentContext> nodeAgentContexts) { Map<String, NodeAgentContext> nodeAgentContextsByHostname = nodeAgentContexts.stream() .collect(Collectors.toMap(nac -> nac.hostname().value(), Function.identity())); diff(nodeAgentWithSchedulerByHostname.keySet(), nodeAgentContextsByHostname.keySet()) .forEach(hostname -> nodeAgentWithSchedulerByHostname.remove(hostname).stop()); diff(nodeAgentContextsByHostname.keySet(), nodeAgentWithSchedulerByHostname.keySet()).forEach(hostname -> { NodeAgentWithScheduler naws = nodeAgentWithSchedulerFactory.create(nodeAgentContextsByHostname.get(hostname)); naws.start(); nodeAgentWithSchedulerByHostname.put(hostname, naws); }); Duration timeBetweenNodeAgents = spread.dividedBy(Math.max(nodeAgentContextsByHostname.size() - 1, 1)); Instant nextAgentStart = clock.instant(); for (Map.Entry<String, NodeAgentContext> entry : nodeAgentContextsByHostname.entrySet()) { nodeAgentWithSchedulerByHostname.get(entry.getKey()).scheduleTickWith(entry.getValue(), nextAgentStart); nextAgentStart = nextAgentStart.plus(timeBetweenNodeAgents); } }
class NodeAdminImpl implements NodeAdmin { private static final Duration NODE_AGENT_FREEZE_TIMEOUT = Duration.ofSeconds(5); private static final Duration NODE_AGENT_SPREAD = Duration.ofSeconds(3); private final NodeAgentWithSchedulerFactory nodeAgentWithSchedulerFactory; private final Clock clock; private final Duration freezeTimeout; private final Duration spread; private boolean previousWantFrozen; private boolean isFrozen; private Instant startOfFreezeConvergence; private final Map<String, NodeAgentWithScheduler> nodeAgentWithSchedulerByHostname = new ConcurrentHashMap<>(); private final GaugeWrapper numberOfContainersInLoadImageState; private final CounterWrapper numberOfUnhandledExceptionsInNodeAgent; public NodeAdminImpl(NodeAgentFactory nodeAgentFactory, MetricReceiverWrapper metricReceiver, Clock clock) { this((NodeAgentWithSchedulerFactory) nodeAgentContext -> create(clock, nodeAgentFactory, nodeAgentContext), metricReceiver, clock, NODE_AGENT_FREEZE_TIMEOUT, NODE_AGENT_SPREAD); } public NodeAdminImpl(NodeAgentFactory nodeAgentFactory, MetricReceiverWrapper metricReceiver, Clock clock, Duration freezeTimeout, Duration spread) { this((NodeAgentWithSchedulerFactory) nodeAgentContext -> create(clock, nodeAgentFactory, nodeAgentContext), metricReceiver, clock, freezeTimeout, spread); } NodeAdminImpl(NodeAgentWithSchedulerFactory nodeAgentWithSchedulerFactory, MetricReceiverWrapper metricReceiver, Clock clock, Duration freezeTimeout, Duration spread) { this.nodeAgentWithSchedulerFactory = nodeAgentWithSchedulerFactory; this.clock = clock; this.freezeTimeout = freezeTimeout; this.spread = spread; this.previousWantFrozen = true; this.isFrozen = true; this.startOfFreezeConvergence = clock.instant(); Dimensions dimensions = new Dimensions.Builder().add("role", "docker").build(); this.numberOfContainersInLoadImageState = metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.image.loading"); this.numberOfUnhandledExceptionsInNodeAgent = metricReceiver.declareCounter(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.unhandled_exceptions"); } @Override @Override public void updateNodeAgentMetrics() { int numberContainersWaitingImage = 0; int numberOfNewUnhandledExceptions = 0; for (NodeAgentWithScheduler nodeAgentWithScheduler : nodeAgentWithSchedulerByHostname.values()) { if (nodeAgentWithScheduler.isDownloadingImage()) numberContainersWaitingImage++; numberOfNewUnhandledExceptions += nodeAgentWithScheduler.getAndResetNumberOfUnhandledExceptions(); nodeAgentWithScheduler.updateContainerNodeMetrics(); } numberOfContainersInLoadImageState.sample(numberContainersWaitingImage); numberOfUnhandledExceptionsInNodeAgent.add(numberOfNewUnhandledExceptions); } @Override public boolean setFrozen(boolean wantFrozen) { if (wantFrozen != previousWantFrozen) { if (wantFrozen) { this.startOfFreezeConvergence = clock.instant(); } else { this.startOfFreezeConvergence = null; } previousWantFrozen = wantFrozen; } boolean allNodeAgentsConverged = nodeAgentWithSchedulerByHostname.values().parallelStream() .filter(nodeAgentScheduler -> !nodeAgentScheduler.setFrozen(wantFrozen, freezeTimeout)) .count() == 0; if (wantFrozen) { if (allNodeAgentsConverged) isFrozen = true; } else isFrozen = false; return allNodeAgentsConverged; } @Override public boolean isFrozen() { return isFrozen; } @Override public Duration subsystemFreezeDuration() { if (startOfFreezeConvergence == null) { return Duration.ofSeconds(0); } else { return Duration.between(startOfFreezeConvergence, clock.instant()); } } @Override public void stopNodeAgentServices(List<String> hostnames) { hostnames.parallelStream() .filter(nodeAgentWithSchedulerByHostname::containsKey) .map(nodeAgentWithSchedulerByHostname::get) .forEach(nodeAgent -> { nodeAgent.suspend(); nodeAgent.stopServices(); }); } @Override public void start() { } @Override public void stop() { nodeAgentWithSchedulerByHostname.values().parallelStream().forEach(NodeAgent::stop); } private static <T> Set<T> diff(final Set<T> minuend, final Set<T> subtrahend) { final HashSet<T> result = new HashSet<>(minuend); result.removeAll(subtrahend); return result; } static class NodeAgentWithScheduler implements NodeAgent, NodeAgentScheduler { private final NodeAgent nodeAgent; private final NodeAgentScheduler nodeAgentScheduler; private NodeAgentWithScheduler(NodeAgent nodeAgent, NodeAgentScheduler nodeAgentScheduler) { this.nodeAgent = nodeAgent; this.nodeAgentScheduler = nodeAgentScheduler; } @Override public void stopServices() { nodeAgent.stopServices(); } @Override public void suspend() { nodeAgent.suspend(); } @Override public void start() { nodeAgent.start(); } @Override public void stop() { nodeAgent.stop(); } @Override public void updateContainerNodeMetrics() { nodeAgent.updateContainerNodeMetrics(); } @Override public boolean isDownloadingImage() { return nodeAgent.isDownloadingImage(); } @Override public int getAndResetNumberOfUnhandledExceptions() { return nodeAgent.getAndResetNumberOfUnhandledExceptions(); } @Override public void scheduleTickWith(NodeAgentContext context, Instant at) { nodeAgentScheduler.scheduleTickWith(context, at); } @Override public boolean setFrozen(boolean frozen, Duration timeout) { return nodeAgentScheduler.setFrozen(frozen, timeout); } } @FunctionalInterface interface NodeAgentWithSchedulerFactory { NodeAgentWithScheduler create(NodeAgentContext context); } private static NodeAgentWithScheduler create(Clock clock, NodeAgentFactory nodeAgentFactory, NodeAgentContext context) { NodeAgentContextManager contextManager = new NodeAgentContextManager(clock, context); NodeAgent nodeAgent = nodeAgentFactory.create(contextManager); return new NodeAgentWithScheduler(nodeAgent, contextManager); } }
class NodeAdminImpl implements NodeAdmin { private static final Duration NODE_AGENT_FREEZE_TIMEOUT = Duration.ofSeconds(5); private static final Duration NODE_AGENT_SPREAD = Duration.ofSeconds(3); private final NodeAgentWithSchedulerFactory nodeAgentWithSchedulerFactory; private final Clock clock; private final Duration freezeTimeout; private final Duration spread; private boolean previousWantFrozen; private boolean isFrozen; private Instant startOfFreezeConvergence; private final Map<String, NodeAgentWithScheduler> nodeAgentWithSchedulerByHostname = new ConcurrentHashMap<>(); private final GaugeWrapper numberOfContainersInLoadImageState; private final CounterWrapper numberOfUnhandledExceptionsInNodeAgent; public NodeAdminImpl(NodeAgentFactory nodeAgentFactory, MetricReceiverWrapper metricReceiver, Clock clock) { this((NodeAgentWithSchedulerFactory) nodeAgentContext -> create(clock, nodeAgentFactory, nodeAgentContext), metricReceiver, clock, NODE_AGENT_FREEZE_TIMEOUT, NODE_AGENT_SPREAD); } public NodeAdminImpl(NodeAgentFactory nodeAgentFactory, MetricReceiverWrapper metricReceiver, Clock clock, Duration freezeTimeout, Duration spread) { this((NodeAgentWithSchedulerFactory) nodeAgentContext -> create(clock, nodeAgentFactory, nodeAgentContext), metricReceiver, clock, freezeTimeout, spread); } NodeAdminImpl(NodeAgentWithSchedulerFactory nodeAgentWithSchedulerFactory, MetricReceiverWrapper metricReceiver, Clock clock, Duration freezeTimeout, Duration spread) { this.nodeAgentWithSchedulerFactory = nodeAgentWithSchedulerFactory; this.clock = clock; this.freezeTimeout = freezeTimeout; this.spread = spread; this.previousWantFrozen = true; this.isFrozen = true; this.startOfFreezeConvergence = clock.instant(); Dimensions dimensions = new Dimensions.Builder().add("role", "docker").build(); this.numberOfContainersInLoadImageState = metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.image.loading"); this.numberOfUnhandledExceptionsInNodeAgent = metricReceiver.declareCounter(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.unhandled_exceptions"); } @Override @Override public void updateNodeAgentMetrics() { int numberContainersWaitingImage = 0; int numberOfNewUnhandledExceptions = 0; for (NodeAgentWithScheduler nodeAgentWithScheduler : nodeAgentWithSchedulerByHostname.values()) { if (nodeAgentWithScheduler.isDownloadingImage()) numberContainersWaitingImage++; numberOfNewUnhandledExceptions += nodeAgentWithScheduler.getAndResetNumberOfUnhandledExceptions(); nodeAgentWithScheduler.updateContainerNodeMetrics(); } numberOfContainersInLoadImageState.sample(numberContainersWaitingImage); numberOfUnhandledExceptionsInNodeAgent.add(numberOfNewUnhandledExceptions); } @Override public boolean setFrozen(boolean wantFrozen) { if (wantFrozen != previousWantFrozen) { if (wantFrozen) { this.startOfFreezeConvergence = clock.instant(); } else { this.startOfFreezeConvergence = null; } previousWantFrozen = wantFrozen; } boolean allNodeAgentsConverged = nodeAgentWithSchedulerByHostname.values().parallelStream() .filter(nodeAgentScheduler -> !nodeAgentScheduler.setFrozen(wantFrozen, freezeTimeout)) .count() == 0; if (wantFrozen) { if (allNodeAgentsConverged) isFrozen = true; } else isFrozen = false; return allNodeAgentsConverged; } @Override public boolean isFrozen() { return isFrozen; } @Override public Duration subsystemFreezeDuration() { if (startOfFreezeConvergence == null) { return Duration.ofSeconds(0); } else { return Duration.between(startOfFreezeConvergence, clock.instant()); } } @Override public void stopNodeAgentServices(List<String> hostnames) { hostnames.parallelStream() .filter(nodeAgentWithSchedulerByHostname::containsKey) .map(nodeAgentWithSchedulerByHostname::get) .forEach(nodeAgent -> { nodeAgent.suspend(); nodeAgent.stopServices(); }); } @Override public void start() { } @Override public void stop() { nodeAgentWithSchedulerByHostname.values().parallelStream().forEach(NodeAgent::stop); } private static <T> Set<T> diff(final Set<T> minuend, final Set<T> subtrahend) { final HashSet<T> result = new HashSet<>(minuend); result.removeAll(subtrahend); return result; } static class NodeAgentWithScheduler implements NodeAgent, NodeAgentScheduler { private final NodeAgent nodeAgent; private final NodeAgentScheduler nodeAgentScheduler; private NodeAgentWithScheduler(NodeAgent nodeAgent, NodeAgentScheduler nodeAgentScheduler) { this.nodeAgent = nodeAgent; this.nodeAgentScheduler = nodeAgentScheduler; } @Override public void stopServices() { nodeAgent.stopServices(); } @Override public void suspend() { nodeAgent.suspend(); } @Override public void start() { nodeAgent.start(); } @Override public void stop() { nodeAgent.stop(); } @Override public void updateContainerNodeMetrics() { nodeAgent.updateContainerNodeMetrics(); } @Override public boolean isDownloadingImage() { return nodeAgent.isDownloadingImage(); } @Override public int getAndResetNumberOfUnhandledExceptions() { return nodeAgent.getAndResetNumberOfUnhandledExceptions(); } @Override public void scheduleTickWith(NodeAgentContext context, Instant at) { nodeAgentScheduler.scheduleTickWith(context, at); } @Override public boolean setFrozen(boolean frozen, Duration timeout) { return nodeAgentScheduler.setFrozen(frozen, timeout); } } @FunctionalInterface interface NodeAgentWithSchedulerFactory { NodeAgentWithScheduler create(NodeAgentContext context); } private static NodeAgentWithScheduler create(Clock clock, NodeAgentFactory nodeAgentFactory, NodeAgentContext context) { NodeAgentContextManager contextManager = new NodeAgentContextManager(clock, context); NodeAgent nodeAgent = nodeAgentFactory.create(contextManager); return new NodeAgentWithScheduler(nodeAgent, contextManager); } }
I haven't seen many `var`s in our code base. Seems like you're the only user so far :1st_place_medal:
public void refreshContainersToRun(Set<NodeAgentContext> nodeAgentContexts) { Map<String, NodeAgentContext> nodeAgentContextsByHostname = nodeAgentContexts.stream() .collect(Collectors.toMap(nac -> nac.hostname().value(), Function.identity())); diff(nodeAgentWithSchedulerByHostname.keySet(), nodeAgentContextsByHostname.keySet()) .forEach(hostname -> nodeAgentWithSchedulerByHostname.remove(hostname).stop()); diff(nodeAgentContextsByHostname.keySet(), nodeAgentWithSchedulerByHostname.keySet()).forEach(hostname -> { NodeAgentWithScheduler naws = nodeAgentWithSchedulerFactory.create(nodeAgentContextsByHostname.get(hostname)); naws.start(); nodeAgentWithSchedulerByHostname.put(hostname, naws); }); Duration timeBetweenNodeAgents = spread.dividedBy(Math.max(nodeAgentContextsByHostname.size() - 1, 1)); Instant nextAgentStart = clock.instant(); for (Map.Entry<String, NodeAgentContext> entry : nodeAgentContextsByHostname.entrySet()) { nodeAgentWithSchedulerByHostname.get(entry.getKey()).scheduleTickWith(entry.getValue(), nextAgentStart); nextAgentStart = nextAgentStart.plus(timeBetweenNodeAgents); } }
for (Map.Entry<String, NodeAgentContext> entry : nodeAgentContextsByHostname.entrySet()) {
public void refreshContainersToRun(Set<NodeAgentContext> nodeAgentContexts) { Map<String, NodeAgentContext> nodeAgentContextsByHostname = nodeAgentContexts.stream() .collect(Collectors.toMap(nac -> nac.hostname().value(), Function.identity())); diff(nodeAgentWithSchedulerByHostname.keySet(), nodeAgentContextsByHostname.keySet()) .forEach(hostname -> nodeAgentWithSchedulerByHostname.remove(hostname).stop()); diff(nodeAgentContextsByHostname.keySet(), nodeAgentWithSchedulerByHostname.keySet()).forEach(hostname -> { NodeAgentWithScheduler naws = nodeAgentWithSchedulerFactory.create(nodeAgentContextsByHostname.get(hostname)); naws.start(); nodeAgentWithSchedulerByHostname.put(hostname, naws); }); Duration timeBetweenNodeAgents = spread.dividedBy(Math.max(nodeAgentContextsByHostname.size() - 1, 1)); Instant nextAgentStart = clock.instant(); for (Map.Entry<String, NodeAgentContext> entry : nodeAgentContextsByHostname.entrySet()) { nodeAgentWithSchedulerByHostname.get(entry.getKey()).scheduleTickWith(entry.getValue(), nextAgentStart); nextAgentStart = nextAgentStart.plus(timeBetweenNodeAgents); } }
class NodeAdminImpl implements NodeAdmin { private static final Duration NODE_AGENT_FREEZE_TIMEOUT = Duration.ofSeconds(5); private static final Duration NODE_AGENT_SPREAD = Duration.ofSeconds(3); private final NodeAgentWithSchedulerFactory nodeAgentWithSchedulerFactory; private final Clock clock; private final Duration freezeTimeout; private final Duration spread; private boolean previousWantFrozen; private boolean isFrozen; private Instant startOfFreezeConvergence; private final Map<String, NodeAgentWithScheduler> nodeAgentWithSchedulerByHostname = new ConcurrentHashMap<>(); private final GaugeWrapper numberOfContainersInLoadImageState; private final CounterWrapper numberOfUnhandledExceptionsInNodeAgent; public NodeAdminImpl(NodeAgentFactory nodeAgentFactory, MetricReceiverWrapper metricReceiver, Clock clock) { this((NodeAgentWithSchedulerFactory) nodeAgentContext -> create(clock, nodeAgentFactory, nodeAgentContext), metricReceiver, clock, NODE_AGENT_FREEZE_TIMEOUT, NODE_AGENT_SPREAD); } public NodeAdminImpl(NodeAgentFactory nodeAgentFactory, MetricReceiverWrapper metricReceiver, Clock clock, Duration freezeTimeout, Duration spread) { this((NodeAgentWithSchedulerFactory) nodeAgentContext -> create(clock, nodeAgentFactory, nodeAgentContext), metricReceiver, clock, freezeTimeout, spread); } NodeAdminImpl(NodeAgentWithSchedulerFactory nodeAgentWithSchedulerFactory, MetricReceiverWrapper metricReceiver, Clock clock, Duration freezeTimeout, Duration spread) { this.nodeAgentWithSchedulerFactory = nodeAgentWithSchedulerFactory; this.clock = clock; this.freezeTimeout = freezeTimeout; this.spread = spread; this.previousWantFrozen = true; this.isFrozen = true; this.startOfFreezeConvergence = clock.instant(); Dimensions dimensions = new Dimensions.Builder().add("role", "docker").build(); this.numberOfContainersInLoadImageState = metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.image.loading"); this.numberOfUnhandledExceptionsInNodeAgent = metricReceiver.declareCounter(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.unhandled_exceptions"); } @Override @Override public void updateNodeAgentMetrics() { int numberContainersWaitingImage = 0; int numberOfNewUnhandledExceptions = 0; for (NodeAgentWithScheduler nodeAgentWithScheduler : nodeAgentWithSchedulerByHostname.values()) { if (nodeAgentWithScheduler.isDownloadingImage()) numberContainersWaitingImage++; numberOfNewUnhandledExceptions += nodeAgentWithScheduler.getAndResetNumberOfUnhandledExceptions(); nodeAgentWithScheduler.updateContainerNodeMetrics(); } numberOfContainersInLoadImageState.sample(numberContainersWaitingImage); numberOfUnhandledExceptionsInNodeAgent.add(numberOfNewUnhandledExceptions); } @Override public boolean setFrozen(boolean wantFrozen) { if (wantFrozen != previousWantFrozen) { if (wantFrozen) { this.startOfFreezeConvergence = clock.instant(); } else { this.startOfFreezeConvergence = null; } previousWantFrozen = wantFrozen; } boolean allNodeAgentsConverged = nodeAgentWithSchedulerByHostname.values().parallelStream() .filter(nodeAgentScheduler -> !nodeAgentScheduler.setFrozen(wantFrozen, freezeTimeout)) .count() == 0; if (wantFrozen) { if (allNodeAgentsConverged) isFrozen = true; } else isFrozen = false; return allNodeAgentsConverged; } @Override public boolean isFrozen() { return isFrozen; } @Override public Duration subsystemFreezeDuration() { if (startOfFreezeConvergence == null) { return Duration.ofSeconds(0); } else { return Duration.between(startOfFreezeConvergence, clock.instant()); } } @Override public void stopNodeAgentServices(List<String> hostnames) { hostnames.parallelStream() .filter(nodeAgentWithSchedulerByHostname::containsKey) .map(nodeAgentWithSchedulerByHostname::get) .forEach(nodeAgent -> { nodeAgent.suspend(); nodeAgent.stopServices(); }); } @Override public void start() { } @Override public void stop() { nodeAgentWithSchedulerByHostname.values().parallelStream().forEach(NodeAgent::stop); } private static <T> Set<T> diff(final Set<T> minuend, final Set<T> subtrahend) { final HashSet<T> result = new HashSet<>(minuend); result.removeAll(subtrahend); return result; } static class NodeAgentWithScheduler implements NodeAgent, NodeAgentScheduler { private final NodeAgent nodeAgent; private final NodeAgentScheduler nodeAgentScheduler; private NodeAgentWithScheduler(NodeAgent nodeAgent, NodeAgentScheduler nodeAgentScheduler) { this.nodeAgent = nodeAgent; this.nodeAgentScheduler = nodeAgentScheduler; } @Override public void stopServices() { nodeAgent.stopServices(); } @Override public void suspend() { nodeAgent.suspend(); } @Override public void start() { nodeAgent.start(); } @Override public void stop() { nodeAgent.stop(); } @Override public void updateContainerNodeMetrics() { nodeAgent.updateContainerNodeMetrics(); } @Override public boolean isDownloadingImage() { return nodeAgent.isDownloadingImage(); } @Override public int getAndResetNumberOfUnhandledExceptions() { return nodeAgent.getAndResetNumberOfUnhandledExceptions(); } @Override public void scheduleTickWith(NodeAgentContext context, Instant at) { nodeAgentScheduler.scheduleTickWith(context, at); } @Override public boolean setFrozen(boolean frozen, Duration timeout) { return nodeAgentScheduler.setFrozen(frozen, timeout); } } @FunctionalInterface interface NodeAgentWithSchedulerFactory { NodeAgentWithScheduler create(NodeAgentContext context); } private static NodeAgentWithScheduler create(Clock clock, NodeAgentFactory nodeAgentFactory, NodeAgentContext context) { NodeAgentContextManager contextManager = new NodeAgentContextManager(clock, context); NodeAgent nodeAgent = nodeAgentFactory.create(contextManager); return new NodeAgentWithScheduler(nodeAgent, contextManager); } }
class NodeAdminImpl implements NodeAdmin { private static final Duration NODE_AGENT_FREEZE_TIMEOUT = Duration.ofSeconds(5); private static final Duration NODE_AGENT_SPREAD = Duration.ofSeconds(3); private final NodeAgentWithSchedulerFactory nodeAgentWithSchedulerFactory; private final Clock clock; private final Duration freezeTimeout; private final Duration spread; private boolean previousWantFrozen; private boolean isFrozen; private Instant startOfFreezeConvergence; private final Map<String, NodeAgentWithScheduler> nodeAgentWithSchedulerByHostname = new ConcurrentHashMap<>(); private final GaugeWrapper numberOfContainersInLoadImageState; private final CounterWrapper numberOfUnhandledExceptionsInNodeAgent; public NodeAdminImpl(NodeAgentFactory nodeAgentFactory, MetricReceiverWrapper metricReceiver, Clock clock) { this((NodeAgentWithSchedulerFactory) nodeAgentContext -> create(clock, nodeAgentFactory, nodeAgentContext), metricReceiver, clock, NODE_AGENT_FREEZE_TIMEOUT, NODE_AGENT_SPREAD); } public NodeAdminImpl(NodeAgentFactory nodeAgentFactory, MetricReceiverWrapper metricReceiver, Clock clock, Duration freezeTimeout, Duration spread) { this((NodeAgentWithSchedulerFactory) nodeAgentContext -> create(clock, nodeAgentFactory, nodeAgentContext), metricReceiver, clock, freezeTimeout, spread); } NodeAdminImpl(NodeAgentWithSchedulerFactory nodeAgentWithSchedulerFactory, MetricReceiverWrapper metricReceiver, Clock clock, Duration freezeTimeout, Duration spread) { this.nodeAgentWithSchedulerFactory = nodeAgentWithSchedulerFactory; this.clock = clock; this.freezeTimeout = freezeTimeout; this.spread = spread; this.previousWantFrozen = true; this.isFrozen = true; this.startOfFreezeConvergence = clock.instant(); Dimensions dimensions = new Dimensions.Builder().add("role", "docker").build(); this.numberOfContainersInLoadImageState = metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.image.loading"); this.numberOfUnhandledExceptionsInNodeAgent = metricReceiver.declareCounter(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.unhandled_exceptions"); } @Override @Override public void updateNodeAgentMetrics() { int numberContainersWaitingImage = 0; int numberOfNewUnhandledExceptions = 0; for (NodeAgentWithScheduler nodeAgentWithScheduler : nodeAgentWithSchedulerByHostname.values()) { if (nodeAgentWithScheduler.isDownloadingImage()) numberContainersWaitingImage++; numberOfNewUnhandledExceptions += nodeAgentWithScheduler.getAndResetNumberOfUnhandledExceptions(); nodeAgentWithScheduler.updateContainerNodeMetrics(); } numberOfContainersInLoadImageState.sample(numberContainersWaitingImage); numberOfUnhandledExceptionsInNodeAgent.add(numberOfNewUnhandledExceptions); } @Override public boolean setFrozen(boolean wantFrozen) { if (wantFrozen != previousWantFrozen) { if (wantFrozen) { this.startOfFreezeConvergence = clock.instant(); } else { this.startOfFreezeConvergence = null; } previousWantFrozen = wantFrozen; } boolean allNodeAgentsConverged = nodeAgentWithSchedulerByHostname.values().parallelStream() .filter(nodeAgentScheduler -> !nodeAgentScheduler.setFrozen(wantFrozen, freezeTimeout)) .count() == 0; if (wantFrozen) { if (allNodeAgentsConverged) isFrozen = true; } else isFrozen = false; return allNodeAgentsConverged; } @Override public boolean isFrozen() { return isFrozen; } @Override public Duration subsystemFreezeDuration() { if (startOfFreezeConvergence == null) { return Duration.ofSeconds(0); } else { return Duration.between(startOfFreezeConvergence, clock.instant()); } } @Override public void stopNodeAgentServices(List<String> hostnames) { hostnames.parallelStream() .filter(nodeAgentWithSchedulerByHostname::containsKey) .map(nodeAgentWithSchedulerByHostname::get) .forEach(nodeAgent -> { nodeAgent.suspend(); nodeAgent.stopServices(); }); } @Override public void start() { } @Override public void stop() { nodeAgentWithSchedulerByHostname.values().parallelStream().forEach(NodeAgent::stop); } private static <T> Set<T> diff(final Set<T> minuend, final Set<T> subtrahend) { final HashSet<T> result = new HashSet<>(minuend); result.removeAll(subtrahend); return result; } static class NodeAgentWithScheduler implements NodeAgent, NodeAgentScheduler { private final NodeAgent nodeAgent; private final NodeAgentScheduler nodeAgentScheduler; private NodeAgentWithScheduler(NodeAgent nodeAgent, NodeAgentScheduler nodeAgentScheduler) { this.nodeAgent = nodeAgent; this.nodeAgentScheduler = nodeAgentScheduler; } @Override public void stopServices() { nodeAgent.stopServices(); } @Override public void suspend() { nodeAgent.suspend(); } @Override public void start() { nodeAgent.start(); } @Override public void stop() { nodeAgent.stop(); } @Override public void updateContainerNodeMetrics() { nodeAgent.updateContainerNodeMetrics(); } @Override public boolean isDownloadingImage() { return nodeAgent.isDownloadingImage(); } @Override public int getAndResetNumberOfUnhandledExceptions() { return nodeAgent.getAndResetNumberOfUnhandledExceptions(); } @Override public void scheduleTickWith(NodeAgentContext context, Instant at) { nodeAgentScheduler.scheduleTickWith(context, at); } @Override public boolean setFrozen(boolean frozen, Duration timeout) { return nodeAgentScheduler.setFrozen(frozen, timeout); } } @FunctionalInterface interface NodeAgentWithSchedulerFactory { NodeAgentWithScheduler create(NodeAgentContext context); } private static NodeAgentWithScheduler create(Clock clock, NodeAgentFactory nodeAgentFactory, NodeAgentContext context) { NodeAgentContextManager contextManager = new NodeAgentContextManager(clock, context); NodeAgent nodeAgent = nodeAgentFactory.create(contextManager); return new NodeAgentWithScheduler(nodeAgent, contextManager); } }
Here's a code review of the provided code patch: 1. Correctness: The code appears to handle different types and convert them appropriately based on the provided `Type desc`. However, it's not clear what `type` represents in this context, as it is not defined in the code snippet. Ensure that `type` is properly initialized and used correctly. 2. Naming: Variable names such as `desc`, `childString`, and `dateStr` are not descriptive enough. Consider using more meaningful names that accurately reflect the purpose of the variables. 3. Code style: Overall, the code formatting seems consistent and readable. However, there are some inconsistencies in indentation. Make sure all the lines within the same block are indented consistently. 4. Best practices: It would be beneficial to use constants or enums instead of relying on string comparisons like `Type.DATE.equals(type)`. By using constants/enums, you can avoid potential typos or inconsistencies in the strings. 5. Performance: The code repeatedly checks the type and calls conversions based on the type. Depending on the execution context, it may be more efficient to perform these type checks once outside the if-else ladder and then perform the appropriate conversion. This would avoid redundant type checks. 6. Compatibility: The code should be reviewed for compatibility with different versions of Java and C++. Ensure that the code does not rely on language features or libraries that might not be available in older versions. 7. Simplicity: The code could benefit from refactoring to improve readability and reduce duplication. Extracting common code blocks into helper methods would make the code more concise and maintainable. 8. Comments: The code could use more inline comments to explain the rationale behind certain steps, especially at points where complex operations or conversion functions are being called. These comments would help other developers understand the code more easily. 9. Optimization points: Without knowing the context or performance requirements, it's difficult to pinpoint specific optimization points. However, general performance improvements can be achieved by minimizing unnecessary string trimming operations or avoiding repeated conversions. 10. Bug risks and improvement suggestions: The provided code snippet looks fine based on the limited context. However, without a complete understanding of the surrounding code and its requirements, it's challenging to identify potential bug risks or suggest improvements. It's recommended to provide more information about the context and surrounding code in order to conduct a more thorough review and offer specific suggestions.
public ConstantOperator castTo(Type desc) throws Exception { if (type.isTime() || desc.isTime()) { throw UnsupportedException .unsupportedException(this + " cast to " + desc.getPrimitiveType().toString()); } String childString = toString(); if (getType().isBoolean()) { childString = getBoolean() ? "1" : "0"; } if (desc.isBoolean()) { if ("FALSE".equalsIgnoreCase(childString) || "0".equalsIgnoreCase(childString)) { return ConstantOperator.createBoolean(false); } else if ("TRUE".equalsIgnoreCase(childString) || "1".equalsIgnoreCase(childString)) { return ConstantOperator.createBoolean(true); } } else if (desc.isTinyint()) { return ConstantOperator.createTinyInt(Byte.parseByte(childString.trim())); } else if (desc.isSmallint()) { return ConstantOperator.createSmallInt(Short.parseShort(childString.trim())); } else if (desc.isInt()) { if (Type.DATE.equals(type)) { childString = DateUtils.convertDateFormaterToDateKeyFormater(childString); } return ConstantOperator.createInt(Integer.parseInt(childString.trim())); } else if (desc.isBigint()) { if (Type.DATE.equals(type)) { childString = DateUtils.convertDateFormaterToDateKeyFormater(childString); } else if (Type.DATETIME.equals(type)) { childString = DateUtils.convertDateTimeFormaterToSecondFormater(childString); } return ConstantOperator.createBigint(Long.parseLong(childString.trim())); } else if (desc.isLargeint()) { if (Type.DATE.equals(type)) { childString = DateUtils.convertDateFormaterToDateKeyFormater(childString); } else if (Type.DATETIME.equals(type)) { childString = DateUtils.convertDateTimeFormaterToSecondFormater(childString); } return ConstantOperator.createLargeInt(new BigInteger(childString.trim())); } else if (desc.isFloat()) { if (Type.DATE.equals(type)) { childString = DateUtils.convertDateFormaterToDateKeyFormater(childString); } else if (Type.DATETIME.equals(type)) { childString = DateUtils.convertDateTimeFormaterToSecondFormater(childString); } return ConstantOperator.createFloat(Double.parseDouble(childString)); } else if (desc.isDouble()) { if (Type.DATE.equals(type)) { childString = DateUtils.convertDateFormaterToDateKeyFormater(childString); } else if (Type.DATETIME.equals(type)) { childString = DateUtils.convertDateTimeFormaterToSecondFormater(childString); } return ConstantOperator.createDouble(Double.parseDouble(childString)); } else if (desc.isDate() || desc.isDatetime()) { String dateStr = StringUtils.strip(childString, "\r\n\t "); LocalDateTime dateTime = DateUtils.parseStrictDateTime(dateStr); if (Type.DATE.equals(desc)) { dateTime = dateTime.truncatedTo(ChronoUnit.DAYS); } return ConstantOperator.createDatetime(dateTime, desc); } else if (desc.isDecimalV2()) { return ConstantOperator.createDecimal(BigDecimal.valueOf(Double.parseDouble(childString)), Type.DECIMALV2); } else if (desc.isDecimalV3()) { BigDecimal decimal = new BigDecimal(childString); ScalarType scalarType = (ScalarType) desc; try { DecimalLiteral.checkLiteralOverflowInBinaryStyle(decimal, scalarType); } catch (AnalysisException ignored) { return ConstantOperator.createNull(desc); } int realScale = DecimalLiteral.getRealScale(decimal); int scale = scalarType.getScalarScale(); if (scale <= realScale) { decimal = decimal.setScale(scale, RoundingMode.HALF_UP); } if (scalarType.getScalarScale() == 0 && scalarType.getScalarPrecision() == 0) { throw new SemanticException("Forbidden cast to decimal(precision=0, scale=0)"); } return ConstantOperator.createDecimal(decimal, desc); } else if (desc.isChar() || desc.isVarchar()) { return ConstantOperator.createChar(childString, desc); } throw UnsupportedException.unsupportedException(this + " cast to " + desc.getPrimitiveType().toString()); }
String dateStr = StringUtils.strip(childString, "\r\n\t ");
public ConstantOperator castTo(Type desc) throws Exception { if (type.isTime() || desc.isTime()) { throw UnsupportedException .unsupportedException(this + " cast to " + desc.getPrimitiveType().toString()); } String childString = toString(); if (getType().isBoolean()) { childString = getBoolean() ? "1" : "0"; } if (desc.isBoolean()) { if ("FALSE".equalsIgnoreCase(childString) || "0".equalsIgnoreCase(childString)) { return ConstantOperator.createBoolean(false); } else if ("TRUE".equalsIgnoreCase(childString) || "1".equalsIgnoreCase(childString)) { return ConstantOperator.createBoolean(true); } } else if (desc.isTinyint()) { return ConstantOperator.createTinyInt(Byte.parseByte(childString.trim())); } else if (desc.isSmallint()) { return ConstantOperator.createSmallInt(Short.parseShort(childString.trim())); } else if (desc.isInt()) { if (Type.DATE.equals(type)) { childString = DateUtils.convertDateFormaterToDateKeyFormater(childString); } return ConstantOperator.createInt(Integer.parseInt(childString.trim())); } else if (desc.isBigint()) { if (Type.DATE.equals(type)) { childString = DateUtils.convertDateFormaterToDateKeyFormater(childString); } else if (Type.DATETIME.equals(type)) { childString = DateUtils.convertDateTimeFormaterToSecondFormater(childString); } return ConstantOperator.createBigint(Long.parseLong(childString.trim())); } else if (desc.isLargeint()) { if (Type.DATE.equals(type)) { childString = DateUtils.convertDateFormaterToDateKeyFormater(childString); } else if (Type.DATETIME.equals(type)) { childString = DateUtils.convertDateTimeFormaterToSecondFormater(childString); } return ConstantOperator.createLargeInt(new BigInteger(childString.trim())); } else if (desc.isFloat()) { if (Type.DATE.equals(type)) { childString = DateUtils.convertDateFormaterToDateKeyFormater(childString); } else if (Type.DATETIME.equals(type)) { childString = DateUtils.convertDateTimeFormaterToSecondFormater(childString); } return ConstantOperator.createFloat(Double.parseDouble(childString)); } else if (desc.isDouble()) { if (Type.DATE.equals(type)) { childString = DateUtils.convertDateFormaterToDateKeyFormater(childString); } else if (Type.DATETIME.equals(type)) { childString = DateUtils.convertDateTimeFormaterToSecondFormater(childString); } return ConstantOperator.createDouble(Double.parseDouble(childString)); } else if (desc.isDate() || desc.isDatetime()) { String dateStr = StringUtils.strip(childString, "\r\n\t "); LocalDateTime dateTime = DateUtils.parseStrictDateTime(dateStr); if (Type.DATE.equals(desc)) { dateTime = dateTime.truncatedTo(ChronoUnit.DAYS); } return ConstantOperator.createDatetime(dateTime, desc); } else if (desc.isDecimalV2()) { return ConstantOperator.createDecimal(BigDecimal.valueOf(Double.parseDouble(childString)), Type.DECIMALV2); } else if (desc.isDecimalV3()) { BigDecimal decimal = new BigDecimal(childString); ScalarType scalarType = (ScalarType) desc; try { DecimalLiteral.checkLiteralOverflowInBinaryStyle(decimal, scalarType); } catch (AnalysisException ignored) { return ConstantOperator.createNull(desc); } int realScale = DecimalLiteral.getRealScale(decimal); int scale = scalarType.getScalarScale(); if (scale <= realScale) { decimal = decimal.setScale(scale, RoundingMode.HALF_UP); } if (scalarType.getScalarScale() == 0 && scalarType.getScalarPrecision() == 0) { throw new SemanticException("Forbidden cast to decimal(precision=0, scale=0)"); } return ConstantOperator.createDecimal(decimal, desc); } else if (desc.isChar() || desc.isVarchar()) { return ConstantOperator.createChar(childString, desc); } throw UnsupportedException.unsupportedException(this + " cast to " + desc.getPrimitiveType().toString()); }
class ConstantOperator extends ScalarOperator implements Comparable<ConstantOperator> { private static final LocalDateTime MAX_DATETIME = LocalDateTime.of(9999, 12, 31, 23, 59, 59); private static final LocalDateTime MIN_DATETIME = LocalDateTime.of(0, 1, 1, 0, 0, 0); public static final ConstantOperator NULL = ConstantOperator.createNull(Type.BOOLEAN); public static final ConstantOperator TRUE = ConstantOperator.createBoolean(true); public static final ConstantOperator FALSE = ConstantOperator.createBoolean(false); private static final BigInteger MAX_LARGE_INT = new BigInteger("2").pow(127).subtract(BigInteger.ONE); private static final BigInteger MIN_LARGE_INT = new BigInteger("2").pow(128).multiply(BigInteger.valueOf(-1)); private static void requiredValid(LocalDateTime dateTime) throws SemanticException { if (null == dateTime || dateTime.isBefore(MIN_DATETIME) || dateTime.isAfter(MAX_DATETIME)) { throw new SemanticException("Invalid date value: " + (dateTime == null ? "NULL" : dateTime.toString())); } } private static void requiredValid(double value) throws SemanticException { if (Double.isNaN(value) || Double.isInfinite(value)) { throw new SemanticException("Invalid float/double value: " + value); } } private final Object value; private final boolean isNull; private ConstantOperator(Type type) { super(OperatorType.CONSTANT, type); this.value = null; this.isNull = true; } public ConstantOperator(Object value, Type type) { super(OperatorType.CONSTANT, type); Objects.requireNonNull(value, "constant value is null"); this.value = value; this.isNull = false; } public static ConstantOperator createObject(Object value, Type type) { return new ConstantOperator(value, type); } public static ConstantOperator createNull(Type type) { return new ConstantOperator(type); } public static ConstantOperator createBoolean(boolean value) { return new ConstantOperator(value, Type.BOOLEAN); } public static ConstantOperator createTinyInt(byte value) { return new ConstantOperator(value, TINYINT); } public static ConstantOperator createSmallInt(short value) { return new ConstantOperator(value, Type.SMALLINT); } public static ConstantOperator createInt(int value) { return new ConstantOperator(value, Type.INT); } public static ConstantOperator createBigint(long value) { return new ConstantOperator(value, Type.BIGINT); } public static ConstantOperator createLargeInt(BigInteger value) { return new ConstantOperator(value, Type.LARGEINT); } public static ConstantOperator createFloat(double value) throws SemanticException { requiredValid(value); return new ConstantOperator(value, Type.FLOAT); } public static ConstantOperator createDouble(double value) throws SemanticException { requiredValid(value); return new ConstantOperator(value, Type.DOUBLE); } public static ConstantOperator createDate(LocalDateTime value) throws SemanticException { requiredValid(value); return new ConstantOperator(value, Type.DATE); } public static ConstantOperator createDatetime(LocalDateTime value) throws SemanticException { requiredValid(value); return new ConstantOperator(value, Type.DATETIME); } public static ConstantOperator createDatetime(LocalDateTime value, Type dateType) { return new ConstantOperator(value, dateType); } public static ConstantOperator createTime(double value) { return new ConstantOperator(value, Type.TIME); } public static ConstantOperator createDecimal(BigDecimal value, Type type) { return new ConstantOperator(value, type); } public static ConstantOperator createVarchar(String value) { return new ConstantOperator(value, Type.VARCHAR); } public static ConstantOperator createChar(String value) { return new ConstantOperator(value, Type.CHAR); } public static ConstantOperator createChar(String value, Type charType) { return new ConstantOperator(value, charType); } public static ConstantOperator createBinary(byte[] value, Type binaryType) { return new ConstantOperator(value, binaryType); } public static ConstantOperator createExampleValueByType(Type type) { if (type.isTinyint()) { return createTinyInt((byte) 1); } else if (type.isSmallint()) { return createSmallInt((short) 1); } else if (type.isInt()) { return createInt(1); } else if (type.isBigint()) { return createBigint(1L); } else if (type.isLargeint()) { return createLargeInt(new BigInteger("1")); } else if (type.isDate()) { return createDate(LocalDateTime.of(2000, 1, 1, 00, 00, 00)); } else if (type.isDatetime()) { return createDatetime(LocalDateTime.of(2000, 1, 1, 00, 00, 00)); } else { throw new IllegalArgumentException("unsupported type: " + type); } } public boolean isNull() { return isNull; } public boolean isZero() { boolean isZero; if (isNull || value == null) { return false; } if (type.isInt()) { Integer val = (Integer) value; isZero = (val.compareTo(0) == 0); } else if (type.isBigint()) { Long val = (Long) value; isZero = (val.compareTo(0L) == 0); } else if (type.isLargeint()) { BigInteger val = (BigInteger) value; isZero = (val.compareTo(BigInteger.ZERO) == 0); } else if (type.isFloat()) { Float val = (Float) value; isZero = (val.compareTo(0.0f) == 0); } else if (type.isDouble()) { Double val = (Double) value; isZero = (val.compareTo(0.0) == 0); } else if (type.isDecimalV3()) { BigDecimal val = (BigDecimal) value; isZero = (val.compareTo(BigDecimal.ZERO) == 0); } else { isZero = false; } return isZero; } @Override public boolean isConstant() { return true; } @Override public boolean isVariable() { return false; } public Object getValue() { return value; } @Override public List<ScalarOperator> getChildren() { return emptyList(); } @Override public ScalarOperator getChild(int index) { return null; } @Override public void setChild(int index, ScalarOperator child) { } public ColumnRefSet getUsedColumns() { return new ColumnRefSet(); } public boolean getBoolean() { return (boolean) Optional.ofNullable(value).orElse(false); } public byte getTinyInt() { return (byte) Optional.ofNullable(value).orElse((byte) 0); } public short getSmallint() { return (short) Optional.ofNullable(value).orElse((short) 0); } public int getInt() { return (int) Optional.ofNullable(value).orElse(0); } public long getBigint() { return (long) Optional.ofNullable(value).orElse((long) 0); } public BigInteger getLargeInt() { return (BigInteger) Optional.ofNullable(value).orElse(new BigInteger("0")); } public double getDouble() { return (double) Optional.ofNullable(value).orElse((double) 0); } public double getFloat() { return (double) Optional.ofNullable(value).orElse((double) 0); } public LocalDateTime getDate() { return (LocalDateTime) Optional.ofNullable(value).orElse(LocalDateTime.MIN); } public LocalDateTime getDatetime() { return (LocalDateTime) Optional.ofNullable(value).orElse(LocalDateTime.MIN); } public double getTime() { return (double) Optional.ofNullable(value).orElse(0); } public BigDecimal getDecimal() { return (BigDecimal) Optional.ofNullable(value).orElse(new BigDecimal(0)); } public String getVarchar() { return (String) Optional.ofNullable(value).orElse(""); } public String getChar() { return (String) Optional.ofNullable(value).orElse(""); } public byte[] getBinary() { return (byte[]) (value); } @Override public String toString() { if (isNull()) { return "null"; } else if (type.isDatetime()) { LocalDateTime time = (LocalDateTime) Optional.ofNullable(value).orElse(LocalDateTime.MIN); if (time.getNano() != 0) { return time.format(DateUtils.DATE_TIME_MS_FORMATTER_UNIX); } return time.format(DateUtils.DATE_TIME_FORMATTER_UNIX); } else if (type.isDate()) { LocalDateTime time = (LocalDateTime) Optional.ofNullable(value).orElse(LocalDateTime.MIN); return time.format(DateUtils.DATE_FORMATTER_UNIX); } else if (type.isFloatingPointType()) { double val = (double) Optional.ofNullable(value).orElse((double) 0); BigDecimal decimal = BigDecimal.valueOf(val); return decimal.stripTrailingZeros().toPlainString(); } else if (type.isDecimalV2()) { return ((BigDecimal) value).stripTrailingZeros().toPlainString(); } else if (type.isDecimalOfAnyVersion()) { int scale = ((ScalarType) type).getScalarScale(); BigDecimal val = (BigDecimal) value; DecimalFormat df = new DecimalFormat((scale == 0 ? "0" : "0.") + StringUtils.repeat("0", scale)); return df.format(val); } return String.valueOf(value); } @Override public int hashCode() { return Objects.hash(value, type.getPrimitiveType(), isNull); } @Override public boolean equals(Object obj) { if (this == obj) { return true; } if (obj == null || getClass() != obj.getClass()) { return false; } ConstantOperator that = (ConstantOperator) obj; return isNull == that.isNull && Objects.equals(value, that.value) && type.matchesType(that.getType()); } @Override public <R, C> R accept(ScalarOperatorVisitor<R, C> visitor, C context) { return visitor.visitConstant(this, context); } @Override public int compareTo(ConstantOperator o) { if (isNull() && o.isNull()) { return 0; } else if (isNull() || o.isNull()) { return isNull() ? -1 : 1; } PrimitiveType t = type.getPrimitiveType(); if (t != o.getType().getPrimitiveType() && (!t.isCharFamily() && !o.getType().getPrimitiveType().isCharFamily()) && (!t.isDecimalOfAnyVersion() && !o.getType().getPrimitiveType().isDecimalOfAnyVersion())) { throw new StarRocksPlannerException("Constant " + this + " can't compare with Constant " + o, ErrorType.INTERNAL_ERROR); } if (t == PrimitiveType.BOOLEAN) { return Boolean.compare(getBoolean(), o.getBoolean()); } else if (t == PrimitiveType.TINYINT) { return Byte.compare(getTinyInt(), o.getTinyInt()); } else if (t == PrimitiveType.SMALLINT) { return Short.compare(getSmallint(), o.getSmallint()); } else if (t == PrimitiveType.INT) { return Integer.compare(getInt(), o.getInt()); } else if (t == PrimitiveType.BIGINT) { return Long.compare(getBigint(), o.getBigint()); } else if (t == PrimitiveType.LARGEINT) { return getLargeInt().compareTo(o.getLargeInt()); } else if (t == PrimitiveType.FLOAT || t == PrimitiveType.TIME) { return Double.compare(getDouble(), o.getDouble()); } else if (t == PrimitiveType.DOUBLE) { return Double.compare(getDouble(), o.getDouble()); } else if (t == PrimitiveType.DATE || t == PrimitiveType.DATETIME) { return getDatetime().compareTo(o.getDatetime()); } else if (t.isDecimalOfAnyVersion()) { return getDecimal().compareTo(o.getDecimal()); } else if (t == PrimitiveType.CHAR || t == PrimitiveType.VARCHAR) { return getVarchar().compareTo(o.getVarchar()); } return -1; } @Override public boolean isNullable() { return type.equals(Type.NULL) || isNull; } public ConstantOperator castToStrictly(Type type) throws Exception { if (!type.isDecimalV3()) { return castTo(type); } BigDecimal decimal = new BigDecimal(value.toString()); ScalarType scalarType = (ScalarType) type; try { DecimalLiteral.checkLiteralOverflowInDecimalStyle(decimal, scalarType); } catch (AnalysisException ignored) { return ConstantOperator.createNull(type); } int realScale = DecimalLiteral.getRealScale(decimal); int scale = scalarType.getScalarScale(); if (scale <= realScale) { decimal = decimal.setScale(scale, RoundingMode.HALF_UP); } if (scalarType.getScalarScale() == 0 && scalarType.getScalarPrecision() == 0) { throw new SemanticException("Forbidden cast to decimal(precision=0, scale=0)"); } return ConstantOperator.createDecimal(decimal, type); } public Optional<ConstantOperator> successor() { return computeValue(1); } public Optional<ConstantOperator> predecessor() { return computeValue(-1); } private Optional<ConstantOperator> computeValue(int delta) { return computeWithLimits(delta, v -> (byte) (v + delta), v -> (short) (v + delta), v -> v + delta, v -> (long) v + delta, v -> v.add(BigInteger.valueOf(delta)), date -> date.plus(delta, ChronoUnit.DAYS), date -> date.plus(delta, ChronoUnit.SECONDS) ); } private Optional<ConstantOperator> computeWithLimits(int delta, Function<Byte, Byte> byteFunc, Function<Short, Short> smallFunc, Function<Integer, Integer> intFunc, Function<Long, Long> longFunc, Function<BigInteger, BigInteger> bigintFunc, Function<LocalDateTime, LocalDateTime> dateFunc, Function<LocalDateTime, LocalDateTime> datetimeFunc) { if (type.isTinyint()) { return compute(delta, getTinyInt(), Byte.MAX_VALUE, Byte.MIN_VALUE, byteFunc, ConstantOperator::createTinyInt); } else if (type.isSmallint()) { return compute(delta, getSmallint(), Short.MAX_VALUE, Short.MIN_VALUE, smallFunc, ConstantOperator::createSmallInt); } else if (type.isInt()) { return compute(delta, getInt(), Integer.MAX_VALUE, Integer.MIN_VALUE, intFunc, ConstantOperator::createInt); } else if (type.isBigint()) { return compute(delta, getBigint(), Long.MAX_VALUE, Long.MIN_VALUE, longFunc, ConstantOperator::createBigint); } else if (type.isLargeint()) { return compute(delta, getLargeInt(), MAX_LARGE_INT, MIN_LARGE_INT, bigintFunc, ConstantOperator::createLargeInt); } else if (type.isDatetime()) { return compute(delta, (LocalDateTime) value, LocalDateTime.MAX, LocalDateTime.MIN, datetimeFunc, ConstantOperator::createDatetime); } else if (type.isDateType()) { return compute(delta, (LocalDateTime) value, LocalDate.MAX.atStartOfDay(), LocalDate.MIN.atStartOfDay(), dateFunc, ConstantOperator::createDate); } else { return Optional.empty(); } } private <T> Optional<ConstantOperator> compute(int delta, T value, T maxValue, T minValue, Function<T, T> func, Function<T, ConstantOperator> creator) { if ((delta > 0 && value.equals(maxValue)) || (delta < 0 && value.equals(minValue))) { return Optional.empty(); } else { return Optional.of(creator.apply(func.apply(value))); } } public long distance(ConstantOperator other) { if (type.isTinyint()) { return other.getTinyInt() - getTinyInt(); } else if (type.isSmallint()) { return other.getSmallint() - getSmallint(); } else if (type.isInt()) { return other.getInt() - getInt(); } else if (type.isBigint()) { return other.getBigint() - getBigint(); } else if (type.isLargeint()) { return other.getLargeInt().subtract(getLargeInt()).longValue(); } else if (type.isDatetime()) { return ChronoUnit.SECONDS.between(getDatetime(), other.getDatetime()); } else if (type.isDateType()) { return ChronoUnit.DAYS.between(getDatetime(), other.getDatetime()); } else { throw UnsupportedException.unsupportedException("unsupported distince for type:" + type); } } }
class ConstantOperator extends ScalarOperator implements Comparable<ConstantOperator> { private static final LocalDateTime MAX_DATETIME = LocalDateTime.of(9999, 12, 31, 23, 59, 59); private static final LocalDateTime MIN_DATETIME = LocalDateTime.of(0, 1, 1, 0, 0, 0); public static final ConstantOperator NULL = ConstantOperator.createNull(Type.BOOLEAN); public static final ConstantOperator TRUE = ConstantOperator.createBoolean(true); public static final ConstantOperator FALSE = ConstantOperator.createBoolean(false); private static final BigInteger MAX_LARGE_INT = new BigInteger("2").pow(127).subtract(BigInteger.ONE); private static final BigInteger MIN_LARGE_INT = new BigInteger("2").pow(128).multiply(BigInteger.valueOf(-1)); private static void requiredValid(LocalDateTime dateTime) throws SemanticException { if (null == dateTime || dateTime.isBefore(MIN_DATETIME) || dateTime.isAfter(MAX_DATETIME)) { throw new SemanticException("Invalid date value: " + (dateTime == null ? "NULL" : dateTime.toString())); } } private static void requiredValid(double value) throws SemanticException { if (Double.isNaN(value) || Double.isInfinite(value)) { throw new SemanticException("Invalid float/double value: " + value); } } private final Object value; private final boolean isNull; private ConstantOperator(Type type) { super(OperatorType.CONSTANT, type); this.value = null; this.isNull = true; } public ConstantOperator(Object value, Type type) { super(OperatorType.CONSTANT, type); Objects.requireNonNull(value, "constant value is null"); this.value = value; this.isNull = false; } public static ConstantOperator createObject(Object value, Type type) { return new ConstantOperator(value, type); } public static ConstantOperator createNull(Type type) { return new ConstantOperator(type); } public static ConstantOperator createBoolean(boolean value) { return new ConstantOperator(value, Type.BOOLEAN); } public static ConstantOperator createTinyInt(byte value) { return new ConstantOperator(value, TINYINT); } public static ConstantOperator createSmallInt(short value) { return new ConstantOperator(value, Type.SMALLINT); } public static ConstantOperator createInt(int value) { return new ConstantOperator(value, Type.INT); } public static ConstantOperator createBigint(long value) { return new ConstantOperator(value, Type.BIGINT); } public static ConstantOperator createLargeInt(BigInteger value) { return new ConstantOperator(value, Type.LARGEINT); } public static ConstantOperator createFloat(double value) throws SemanticException { requiredValid(value); return new ConstantOperator(value, Type.FLOAT); } public static ConstantOperator createDouble(double value) throws SemanticException { requiredValid(value); return new ConstantOperator(value, Type.DOUBLE); } public static ConstantOperator createDate(LocalDateTime value) throws SemanticException { requiredValid(value); return new ConstantOperator(value, Type.DATE); } public static ConstantOperator createDatetime(LocalDateTime value) throws SemanticException { requiredValid(value); return new ConstantOperator(value, Type.DATETIME); } public static ConstantOperator createDatetime(LocalDateTime value, Type dateType) { return new ConstantOperator(value, dateType); } public static ConstantOperator createTime(double value) { return new ConstantOperator(value, Type.TIME); } public static ConstantOperator createDecimal(BigDecimal value, Type type) { return new ConstantOperator(value, type); } public static ConstantOperator createVarchar(String value) { return new ConstantOperator(value, Type.VARCHAR); } public static ConstantOperator createChar(String value) { return new ConstantOperator(value, Type.CHAR); } public static ConstantOperator createChar(String value, Type charType) { return new ConstantOperator(value, charType); } public static ConstantOperator createBinary(byte[] value, Type binaryType) { return new ConstantOperator(value, binaryType); } public static ConstantOperator createExampleValueByType(Type type) { if (type.isTinyint()) { return createTinyInt((byte) 1); } else if (type.isSmallint()) { return createSmallInt((short) 1); } else if (type.isInt()) { return createInt(1); } else if (type.isBigint()) { return createBigint(1L); } else if (type.isLargeint()) { return createLargeInt(new BigInteger("1")); } else if (type.isDate()) { return createDate(LocalDateTime.of(2000, 1, 1, 00, 00, 00)); } else if (type.isDatetime()) { return createDatetime(LocalDateTime.of(2000, 1, 1, 00, 00, 00)); } else { throw new IllegalArgumentException("unsupported type: " + type); } } public boolean isNull() { return isNull; } public boolean isZero() { boolean isZero; if (isNull || value == null) { return false; } if (type.isInt()) { Integer val = (Integer) value; isZero = (val.compareTo(0) == 0); } else if (type.isBigint()) { Long val = (Long) value; isZero = (val.compareTo(0L) == 0); } else if (type.isLargeint()) { BigInteger val = (BigInteger) value; isZero = (val.compareTo(BigInteger.ZERO) == 0); } else if (type.isFloat()) { Float val = (Float) value; isZero = (val.compareTo(0.0f) == 0); } else if (type.isDouble()) { Double val = (Double) value; isZero = (val.compareTo(0.0) == 0); } else if (type.isDecimalV3()) { BigDecimal val = (BigDecimal) value; isZero = (val.compareTo(BigDecimal.ZERO) == 0); } else { isZero = false; } return isZero; } @Override public boolean isConstant() { return true; } @Override public boolean isVariable() { return false; } public Object getValue() { return value; } @Override public List<ScalarOperator> getChildren() { return emptyList(); } @Override public ScalarOperator getChild(int index) { return null; } @Override public void setChild(int index, ScalarOperator child) { } public ColumnRefSet getUsedColumns() { return new ColumnRefSet(); } public boolean getBoolean() { return (boolean) Optional.ofNullable(value).orElse(false); } public byte getTinyInt() { return (byte) Optional.ofNullable(value).orElse((byte) 0); } public short getSmallint() { return (short) Optional.ofNullable(value).orElse((short) 0); } public int getInt() { return (int) Optional.ofNullable(value).orElse(0); } public long getBigint() { return (long) Optional.ofNullable(value).orElse((long) 0); } public BigInteger getLargeInt() { return (BigInteger) Optional.ofNullable(value).orElse(new BigInteger("0")); } public double getDouble() { return (double) Optional.ofNullable(value).orElse((double) 0); } public double getFloat() { return (double) Optional.ofNullable(value).orElse((double) 0); } public LocalDateTime getDate() { return (LocalDateTime) Optional.ofNullable(value).orElse(LocalDateTime.MIN); } public LocalDateTime getDatetime() { return (LocalDateTime) Optional.ofNullable(value).orElse(LocalDateTime.MIN); } public double getTime() { return (double) Optional.ofNullable(value).orElse(0); } public BigDecimal getDecimal() { return (BigDecimal) Optional.ofNullable(value).orElse(new BigDecimal(0)); } public String getVarchar() { return (String) Optional.ofNullable(value).orElse(""); } public String getChar() { return (String) Optional.ofNullable(value).orElse(""); } public byte[] getBinary() { return (byte[]) (value); } @Override public String toString() { if (isNull()) { return "null"; } else if (type.isDatetime()) { LocalDateTime time = (LocalDateTime) Optional.ofNullable(value).orElse(LocalDateTime.MIN); if (time.getNano() != 0) { return time.format(DateUtils.DATE_TIME_MS_FORMATTER_UNIX); } return time.format(DateUtils.DATE_TIME_FORMATTER_UNIX); } else if (type.isDate()) { LocalDateTime time = (LocalDateTime) Optional.ofNullable(value).orElse(LocalDateTime.MIN); return time.format(DateUtils.DATE_FORMATTER_UNIX); } else if (type.isFloatingPointType()) { double val = (double) Optional.ofNullable(value).orElse((double) 0); BigDecimal decimal = BigDecimal.valueOf(val); return decimal.stripTrailingZeros().toPlainString(); } else if (type.isDecimalV2()) { return ((BigDecimal) value).stripTrailingZeros().toPlainString(); } else if (type.isDecimalOfAnyVersion()) { int scale = ((ScalarType) type).getScalarScale(); BigDecimal val = (BigDecimal) value; DecimalFormat df = new DecimalFormat((scale == 0 ? "0" : "0.") + StringUtils.repeat("0", scale)); return df.format(val); } return String.valueOf(value); } @Override public int hashCode() { return Objects.hash(value, type.getPrimitiveType(), isNull); } @Override public boolean equals(Object obj) { if (this == obj) { return true; } if (obj == null || getClass() != obj.getClass()) { return false; } ConstantOperator that = (ConstantOperator) obj; return isNull == that.isNull && Objects.equals(value, that.value) && type.matchesType(that.getType()); } @Override public <R, C> R accept(ScalarOperatorVisitor<R, C> visitor, C context) { return visitor.visitConstant(this, context); } @Override public int compareTo(ConstantOperator o) { if (isNull() && o.isNull()) { return 0; } else if (isNull() || o.isNull()) { return isNull() ? -1 : 1; } PrimitiveType t = type.getPrimitiveType(); if (t != o.getType().getPrimitiveType() && (!t.isCharFamily() && !o.getType().getPrimitiveType().isCharFamily()) && (!t.isDecimalOfAnyVersion() && !o.getType().getPrimitiveType().isDecimalOfAnyVersion())) { throw new StarRocksPlannerException("Constant " + this + " can't compare with Constant " + o, ErrorType.INTERNAL_ERROR); } if (t == PrimitiveType.BOOLEAN) { return Boolean.compare(getBoolean(), o.getBoolean()); } else if (t == PrimitiveType.TINYINT) { return Byte.compare(getTinyInt(), o.getTinyInt()); } else if (t == PrimitiveType.SMALLINT) { return Short.compare(getSmallint(), o.getSmallint()); } else if (t == PrimitiveType.INT) { return Integer.compare(getInt(), o.getInt()); } else if (t == PrimitiveType.BIGINT) { return Long.compare(getBigint(), o.getBigint()); } else if (t == PrimitiveType.LARGEINT) { return getLargeInt().compareTo(o.getLargeInt()); } else if (t == PrimitiveType.FLOAT || t == PrimitiveType.TIME) { return Double.compare(getDouble(), o.getDouble()); } else if (t == PrimitiveType.DOUBLE) { return Double.compare(getDouble(), o.getDouble()); } else if (t == PrimitiveType.DATE || t == PrimitiveType.DATETIME) { return getDatetime().compareTo(o.getDatetime()); } else if (t.isDecimalOfAnyVersion()) { return getDecimal().compareTo(o.getDecimal()); } else if (t == PrimitiveType.CHAR || t == PrimitiveType.VARCHAR) { return getVarchar().compareTo(o.getVarchar()); } return -1; } @Override public boolean isNullable() { return type.equals(Type.NULL) || isNull; } public ConstantOperator castToStrictly(Type type) throws Exception { if (!type.isDecimalV3()) { return castTo(type); } BigDecimal decimal = new BigDecimal(value.toString()); ScalarType scalarType = (ScalarType) type; try { DecimalLiteral.checkLiteralOverflowInDecimalStyle(decimal, scalarType); } catch (AnalysisException ignored) { return ConstantOperator.createNull(type); } int realScale = DecimalLiteral.getRealScale(decimal); int scale = scalarType.getScalarScale(); if (scale <= realScale) { decimal = decimal.setScale(scale, RoundingMode.HALF_UP); } if (scalarType.getScalarScale() == 0 && scalarType.getScalarPrecision() == 0) { throw new SemanticException("Forbidden cast to decimal(precision=0, scale=0)"); } return ConstantOperator.createDecimal(decimal, type); } public Optional<ConstantOperator> successor() { return computeValue(1); } public Optional<ConstantOperator> predecessor() { return computeValue(-1); } private Optional<ConstantOperator> computeValue(int delta) { return computeWithLimits(delta, v -> (byte) (v + delta), v -> (short) (v + delta), v -> v + delta, v -> (long) v + delta, v -> v.add(BigInteger.valueOf(delta)), date -> date.plus(delta, ChronoUnit.DAYS), date -> date.plus(delta, ChronoUnit.SECONDS) ); } private Optional<ConstantOperator> computeWithLimits(int delta, Function<Byte, Byte> byteFunc, Function<Short, Short> smallFunc, Function<Integer, Integer> intFunc, Function<Long, Long> longFunc, Function<BigInteger, BigInteger> bigintFunc, Function<LocalDateTime, LocalDateTime> dateFunc, Function<LocalDateTime, LocalDateTime> datetimeFunc) { if (type.isTinyint()) { return compute(delta, getTinyInt(), Byte.MAX_VALUE, Byte.MIN_VALUE, byteFunc, ConstantOperator::createTinyInt); } else if (type.isSmallint()) { return compute(delta, getSmallint(), Short.MAX_VALUE, Short.MIN_VALUE, smallFunc, ConstantOperator::createSmallInt); } else if (type.isInt()) { return compute(delta, getInt(), Integer.MAX_VALUE, Integer.MIN_VALUE, intFunc, ConstantOperator::createInt); } else if (type.isBigint()) { return compute(delta, getBigint(), Long.MAX_VALUE, Long.MIN_VALUE, longFunc, ConstantOperator::createBigint); } else if (type.isLargeint()) { return compute(delta, getLargeInt(), MAX_LARGE_INT, MIN_LARGE_INT, bigintFunc, ConstantOperator::createLargeInt); } else if (type.isDatetime()) { return compute(delta, (LocalDateTime) value, LocalDateTime.MAX, LocalDateTime.MIN, datetimeFunc, ConstantOperator::createDatetime); } else if (type.isDateType()) { return compute(delta, (LocalDateTime) value, LocalDate.MAX.atStartOfDay(), LocalDate.MIN.atStartOfDay(), dateFunc, ConstantOperator::createDate); } else { return Optional.empty(); } } private <T> Optional<ConstantOperator> compute(int delta, T value, T maxValue, T minValue, Function<T, T> func, Function<T, ConstantOperator> creator) { if ((delta > 0 && value.equals(maxValue)) || (delta < 0 && value.equals(minValue))) { return Optional.empty(); } else { return Optional.of(creator.apply(func.apply(value))); } } public long distance(ConstantOperator other) { if (type.isTinyint()) { return other.getTinyInt() - getTinyInt(); } else if (type.isSmallint()) { return other.getSmallint() - getSmallint(); } else if (type.isInt()) { return other.getInt() - getInt(); } else if (type.isBigint()) { return other.getBigint() - getBigint(); } else if (type.isLargeint()) { return other.getLargeInt().subtract(getLargeInt()).longValue(); } else if (type.isDatetime()) { return ChronoUnit.SECONDS.between(getDatetime(), other.getDatetime()); } else if (type.isDateType()) { return ChronoUnit.DAYS.between(getDatetime(), other.getDatetime()); } else { throw UnsupportedException.unsupportedException("unsupported distince for type:" + type); } } }
Seems a bit long? How about 10 seconds?
private void retireActive(String hostname, ApplicationId owner, List<String> reasons) { Duration lockWait = Duration.ofMinutes(1); try (Mutex lock = nodeRepository().lock(owner, lockWait)) { Optional<Node> node = nodeRepository().getNode(hostname); if (node.isEmpty()) return; if (node.get().state() != Node.State.active) return; if (!node.get().allocation().orElseThrow().owner().equals(owner)) return; if (node.get().status().wantToRetire()) return; log.info("Setting wantToRetire on " + node.get() + " due to these reports: " + reasons); nodeRepository().write(node.get().withWantToRetire(true, Agent.NodeFailer, clock.instant())); } catch (ApplicationLockException e) { log.warning("Failed to get lock on " + owner + " within " + lockWait + " to set wantToRetire, will retry later"); } }
Duration lockWait = Duration.ofMinutes(1);
private void retireActive(String hostname, ApplicationId owner, List<String> reasons) { Duration lockWait = Duration.ofSeconds(10); try (Mutex lock = nodeRepository().lock(owner, lockWait)) { Optional<Node> node = nodeRepository().getNode(hostname); if (node.isEmpty()) return; if (node.get().state() != Node.State.active) return; if (!node.get().allocation().orElseThrow().owner().equals(owner)) return; if (node.get().status().wantToRetire()) return; log.info("Setting wantToRetire on " + node.get() + " due to these reports: " + reasons); nodeRepository().write(node.get().withWantToRetire(true, Agent.NodeFailer, clock.instant())); } catch (ApplicationLockException e) { log.warning("Failed to get lock on " + owner + " within " + lockWait + " to set wantToRetire, will retry later"); } }
class NodeFailer extends Maintainer { private static final Logger log = Logger.getLogger(NodeFailer.class.getName()); private static final Duration nodeRequestInterval = Duration.ofMinutes(10); /** Metric for number of nodes that we want to fail, but cannot due to throttling */ public static final String throttledNodeFailuresMetric = "throttledNodeFailures"; /** Metric that indicates whether throttling is active where 1 means active and 0 means inactive */ public static final String throttlingActiveMetric = "nodeFailThrottling"; /** Provides information about the status of ready hosts */ private final HostLivenessTracker hostLivenessTracker; /** Provides (more accurate) information about the status of active hosts */ private final ServiceMonitor serviceMonitor; private final Deployer deployer; private final Duration downTimeLimit; private final Clock clock; private final Orchestrator orchestrator; private final Instant constructionTime; private final ThrottlePolicy throttlePolicy; private final Metric metric; private final ConfigserverConfig configserverConfig; public NodeFailer(Deployer deployer, HostLivenessTracker hostLivenessTracker, ServiceMonitor serviceMonitor, NodeRepository nodeRepository, Duration downTimeLimit, Clock clock, Orchestrator orchestrator, ThrottlePolicy throttlePolicy, Metric metric, JobControl jobControl, ConfigserverConfig configserverConfig) { super(nodeRepository, min(downTimeLimit.dividedBy(2), Duration.ofMinutes(5)), jobControl); this.deployer = deployer; this.hostLivenessTracker = hostLivenessTracker; this.serviceMonitor = serviceMonitor; this.downTimeLimit = downTimeLimit; this.clock = clock; this.orchestrator = orchestrator; this.constructionTime = clock.instant(); this.throttlePolicy = throttlePolicy; this.metric = metric; this.configserverConfig = configserverConfig; } @Override protected void maintain() { int throttledNodeFailures = 0; try (Mutex lock = nodeRepository().lockAllocation()) { updateNodeLivenessEventsForReadyNodes(); for (Map.Entry<Node, String> entry : getReadyNodesByFailureReason().entrySet()) { Node node = entry.getKey(); if (throttle(node)) { throttledNodeFailures++; continue; } String reason = entry.getValue(); nodeRepository().fail(node.hostname(), Agent.NodeFailer, reason); } } updateNodeDownState(); List<Node> activeNodes = nodeRepository().getNodes(Node.State.active); Set<Node> nodesWithFailureReason = new HashSet<>(); for (Map.Entry<Node, String> entry : getActiveNodesByFailureReason(activeNodes).entrySet()) { Node node = entry.getKey(); nodesWithFailureReason.add(node); if (!failAllowedFor(node.type())) { continue; } if (throttle(node)) { throttledNodeFailures++; continue; } String reason = entry.getValue(); failActive(node, reason); } final long maxWantToRetireHosts = 1; List<Node> candidateNodes = activeNodes.stream() .filter(node -> failAllowedFor(node.type())) .filter(node -> !nodesWithFailureReason.contains(node)) .filter(node -> node.parentHostname().isEmpty()) .collect(Collectors.toList()); long currentWantToRetireHosts = candidateNodes.stream().filter(node -> node.status().wantToRetire()).count(); for (int i = 0; i < candidateNodes.size() && currentWantToRetireHosts < maxWantToRetireHosts; ++i) { Node node = candidateNodes.get(i); List<String> reasons = reasonsToRetireActiveParentHost(node); if (reasons.size() > 0) { retireRecursively(node, reasons, activeNodes); ++currentWantToRetireHosts; } } metric.set(throttlingActiveMetric, Math.min( 1, throttledNodeFailures), null); metric.set(throttledNodeFailuresMetric, throttledNodeFailures, null); } private void updateNodeLivenessEventsForReadyNodes() { for (Node node : nodeRepository().getNodes(Node.State.ready)) { Optional<Instant> lastLocalRequest = hostLivenessTracker.lastRequestFrom(node.hostname()); if ( ! lastLocalRequest.isPresent()) continue; if (! node.history().hasEventAfter(History.Event.Type.requested, lastLocalRequest.get())) { History updatedHistory = node.history() .with(new History.Event(History.Event.Type.requested, Agent.NodeFailer, lastLocalRequest.get())); nodeRepository().write(node.with(updatedHistory)); } } } private Map<Node, String> getReadyNodesByFailureReason() { Instant oldestAcceptableRequestTime = constructionTime.isAfter(clock.instant().minus(nodeRequestInterval.multipliedBy(2))) ? Instant.EPOCH : clock.instant().minus(downTimeLimit).minus(nodeRequestInterval); Map<Node, String> nodesByFailureReason = new HashMap<>(); for (Node node : nodeRepository().getNodes(Node.State.ready)) { if (expectConfigRequests(node) && ! hasNodeRequestedConfigAfter(node, oldestAcceptableRequestTime)) { nodesByFailureReason.put(node, "Not receiving config requests from node"); } else if (node.status().hardwareFailureDescription().isPresent()) { nodesByFailureReason.put(node, "Node has hardware failure"); } else if (node.status().hardwareDivergence().isPresent()) { nodesByFailureReason.put(node, "Node has hardware divergence"); } else { Node hostNode = node.parentHostname().flatMap(parent -> nodeRepository().getNode(parent)).orElse(node); List<String> failureReports = reasonsToRetireActiveParentHost(hostNode); if (failureReports.size() > 0) { if (hostNode.equals(node)) { nodesByFailureReason.put(node, "Host has failure reports: " + failureReports); } else { nodesByFailureReason.put(node, "Parent (" + hostNode + ") has failure reports: " + failureReports); } } } } return nodesByFailureReason; } /** * If the node is down (see {@link * Otherwise we remove any "down" history record. */ private void updateNodeDownState() { Map<String, Node> activeNodesByHostname = nodeRepository().getNodes(Node.State.active).stream() .collect(Collectors.toMap(Node::hostname, node -> node)); serviceMonitor.getServiceModelSnapshot().getServiceInstancesByHostName() .forEach((hostName, serviceInstances) -> { Node node = activeNodesByHostname.get(hostName.s()); if (node == null) return; if (badNode(serviceInstances)) { recordAsDown(node); } else { clearDownRecord(node); } }); } private Map<Node, String> getActiveNodesByFailureReason(List<Node> activeNodes) { Instant graceTimeEnd = clock.instant().minus(downTimeLimit); Map<Node, String> nodesByFailureReason = new HashMap<>(); for (Node node : activeNodes) { if (node.history().hasEventBefore(History.Event.Type.down, graceTimeEnd) && ! applicationSuspended(node)) { nodesByFailureReason.put(node, "Node has been down longer than " + downTimeLimit); } else if (node.status().hardwareFailureDescription().isPresent() && nodeSuspended(node)) { nodesByFailureReason.put(node, "Node has hardware failure: " + node.status().hardwareFailureDescription().get()); } } return nodesByFailureReason; } private static List<String> reasonsToRetireActiveParentHost(Node hostNode) { return Stream.of( "badTotalMemorySize", "badTotalDiskSize", "badDiskType", "badInterfaceSpeed", "badCpuCount" ) .map(reportId -> baseReportToString(hostNode, reportId)) .flatMap(Optional::stream) .collect(Collectors.toList()); } /** The generated string is built from the report's ID, created time, and description only. */ static Optional<String> baseReportToString(Node node, String reportId) { return node.reports().getReport(reportId).map(report -> reportId + " reported " + report.getCreatedTime() + ": " + report.getDescription()); } /** * There are reasons why this node should be parked, and we'd like to do it through retiring, * including any child nodes. */ private void retireRecursively(Node node, List<String> reasons, List<Node> activeNodesIfMaybeParent) { if (activeNodesIfMaybeParent != null) { List<Node> childNodesToRetire = activeNodesIfMaybeParent.stream() .filter(n -> n.parentHostname().equals(Optional.of(node.hostname()))) .collect(Collectors.toList()); for (Node childNode : childNodesToRetire) { retireRecursively(childNode, reasons, null); } } if (node.status().wantToRetire()) return; retireActive(node.hostname(), node.allocation().get().owner(), reasons); } /** Returns whether node has any kind of hardware issue */ public static boolean hasHardwareIssue(Node node, NodeRepository nodeRepository) { if (node.status().hardwareFailureDescription().isPresent() || node.status().hardwareDivergence().isPresent()) { return true; } Node hostNode = node.parentHostname().flatMap(parent -> nodeRepository.getNode(parent)).orElse(node); return reasonsToRetireActiveParentHost(hostNode).size() > 0; } private boolean expectConfigRequests(Node node) { return !node.type().isDockerHost() || configserverConfig.nodeAdminInContainer(); } private boolean hasNodeRequestedConfigAfter(Node node, Instant instant) { return !wasMadeReadyBefore(node, instant) || hasRecordedRequestAfter(node, instant); } private boolean wasMadeReadyBefore(Node node, Instant instant) { return node.history().hasEventBefore(History.Event.Type.readied, instant); } private boolean hasRecordedRequestAfter(Node node, Instant instant) { return node.history().hasEventAfter(History.Event.Type.requested, instant); } private boolean applicationSuspended(Node node) { try { return orchestrator.getApplicationInstanceStatus(node.allocation().get().owner()) == ApplicationInstanceStatus.ALLOWED_TO_BE_DOWN; } catch (ApplicationIdNotFoundException e) { return false; } } private boolean nodeSuspended(Node node) { try { return orchestrator.getNodeStatus(new HostName(node.hostname())) == HostStatus.ALLOWED_TO_BE_DOWN; } catch (HostNameNotFoundException e) { return false; } } /** * We can attempt to fail any number of *tenant* and *host* nodes because the operation will not be effected * unless the node is replaced. * However, nodes of other types are not replaced (because all of the type are used by a single application), * so we only allow one to be in failed at any point in time to protect against runaway failing. */ private boolean failAllowedFor(NodeType nodeType) { if (nodeType == NodeType.tenant || nodeType == NodeType.host) return true; return nodeRepository().getNodes(nodeType, Node.State.failed).size() == 0; } /** * Returns true if the node is considered bad: all monitored services services are down. * If a node remains bad for a long time, the NodeFailer will eventually try to fail the node. */ public static boolean badNode(List<ServiceInstance> services) { Map<ServiceStatus, Long> countsByStatus = services.stream() .collect(Collectors.groupingBy(ServiceInstance::serviceStatus, counting())); return countsByStatus.getOrDefault(ServiceStatus.UP, 0L) <= 0L && countsByStatus.getOrDefault(ServiceStatus.DOWN, 0L) > 0L; } /** * Record a node as down if not already recorded and returns the node in the new state. * This assumes the node is found in the node * repo and that the node is allocated. If we get here otherwise something is truly odd. */ private Node recordAsDown(Node node) { if (node.history().event(History.Event.Type.down).isPresent()) return node; try (Mutex lock = nodeRepository().lock(node.allocation().get().owner())) { node = nodeRepository().getNode(node.hostname(), Node.State.active).get(); return nodeRepository().write(node.downAt(clock.instant())); } } private void clearDownRecord(Node node) { if ( ! node.history().event(History.Event.Type.down).isPresent()) return; try (Mutex lock = nodeRepository().lock(node.allocation().get().owner())) { node = nodeRepository().getNode(node.hostname(), Node.State.active).get(); nodeRepository().write(node.up()); } } /** * Called when a node should be moved to the failed state: Do that if it seems safe, * which is when the node repo has available capacity to replace the node (and all its tenant nodes if host). * Otherwise not replacing the node ensures (by Orchestrator check) that no further action will be taken. * * @return whether node was successfully failed */ private boolean failActive(Node node, String reason) { Optional<Deployment> deployment = deployer.deployFromLocalActive(node.allocation().get().owner(), Duration.ofMinutes(30)); if ( ! deployment.isPresent()) return false; try (Mutex lock = nodeRepository().lock(node.allocation().get().owner())) { boolean allTenantNodesFailedOutSuccessfully = true; String reasonForChildFailure = "Failing due to parent host " + node.hostname() + " failure: " + reason; for (Node failingTenantNode : nodeRepository().list().childrenOf(node)) { if (failingTenantNode.state() == Node.State.active) { allTenantNodesFailedOutSuccessfully &= failActive(failingTenantNode, reasonForChildFailure); } else { nodeRepository().fail(failingTenantNode.hostname(), Agent.NodeFailer, reasonForChildFailure); } } if (! allTenantNodesFailedOutSuccessfully) return false; node = nodeRepository().fail(node.hostname(), Agent.NodeFailer, reason); try { deployment.get().activate(); return true; } catch (RuntimeException e) { nodeRepository().reactivate(node.hostname(), Agent.NodeFailer, "Failed to redeploy after being failed by NodeFailer"); log.log(Level.WARNING, "Attempted to fail " + node + " for " + node.allocation().get().owner() + ", but redeploying without the node failed", e); return false; } } } /** Returns true if node failing should be throttled */ private boolean throttle(Node node) { if (throttlePolicy == ThrottlePolicy.disabled) return false; Instant startOfThrottleWindow = clock.instant().minus(throttlePolicy.throttleWindow); List<Node> nodes = nodeRepository().getNodes(); NodeList recentlyFailedNodes = nodes.stream() .filter(n -> n.history().hasEventAfter(History.Event.Type.failed, startOfThrottleWindow)) .collect(collectingAndThen(Collectors.toList(), NodeList::new)); if (recentlyFailedNodes.size() < throttlePolicy.allowedToFailOf(nodes.size())) return false; if (!node.parentHostname().isPresent() && recentlyFailedNodes.parents().size() < throttlePolicy.minimumAllowedToFail) return false; log.info(String.format("Want to fail node %s, but throttling is in effect: %s", node.hostname(), throttlePolicy.toHumanReadableString(nodes.size()))); return true; } public enum ThrottlePolicy { hosted(Duration.ofDays(1), 0.02, 2), disabled(Duration.ZERO, 0, 0); private final Duration throttleWindow; private final double fractionAllowedToFail; private final int minimumAllowedToFail; ThrottlePolicy(Duration throttleWindow, double fractionAllowedToFail, int minimumAllowedToFail) { this.throttleWindow = throttleWindow; this.fractionAllowedToFail = fractionAllowedToFail; this.minimumAllowedToFail = minimumAllowedToFail; } public int allowedToFailOf(int totalNodes) { return (int) Math.max(totalNodes * fractionAllowedToFail, minimumAllowedToFail); } public String toHumanReadableString(int totalNodes) { return String.format("Max %.0f%% (%d) or %d nodes can fail over a period of %s", fractionAllowedToFail*100, allowedToFailOf(totalNodes), minimumAllowedToFail, throttleWindow); } } }
class NodeFailer extends Maintainer { private static final Logger log = Logger.getLogger(NodeFailer.class.getName()); private static final Duration nodeRequestInterval = Duration.ofMinutes(10); /** Metric for number of nodes that we want to fail, but cannot due to throttling */ public static final String throttledNodeFailuresMetric = "throttledNodeFailures"; /** Metric that indicates whether throttling is active where 1 means active and 0 means inactive */ public static final String throttlingActiveMetric = "nodeFailThrottling"; /** Provides information about the status of ready hosts */ private final HostLivenessTracker hostLivenessTracker; /** Provides (more accurate) information about the status of active hosts */ private final ServiceMonitor serviceMonitor; private final Deployer deployer; private final Duration downTimeLimit; private final Clock clock; private final Orchestrator orchestrator; private final Instant constructionTime; private final ThrottlePolicy throttlePolicy; private final Metric metric; private final ConfigserverConfig configserverConfig; public NodeFailer(Deployer deployer, HostLivenessTracker hostLivenessTracker, ServiceMonitor serviceMonitor, NodeRepository nodeRepository, Duration downTimeLimit, Clock clock, Orchestrator orchestrator, ThrottlePolicy throttlePolicy, Metric metric, JobControl jobControl, ConfigserverConfig configserverConfig) { super(nodeRepository, min(downTimeLimit.dividedBy(2), Duration.ofMinutes(5)), jobControl); this.deployer = deployer; this.hostLivenessTracker = hostLivenessTracker; this.serviceMonitor = serviceMonitor; this.downTimeLimit = downTimeLimit; this.clock = clock; this.orchestrator = orchestrator; this.constructionTime = clock.instant(); this.throttlePolicy = throttlePolicy; this.metric = metric; this.configserverConfig = configserverConfig; } @Override protected void maintain() { int throttledNodeFailures = 0; try (Mutex lock = nodeRepository().lockAllocation()) { updateNodeLivenessEventsForReadyNodes(); for (Map.Entry<Node, String> entry : getReadyNodesByFailureReason().entrySet()) { Node node = entry.getKey(); if (throttle(node)) { throttledNodeFailures++; continue; } String reason = entry.getValue(); nodeRepository().fail(node.hostname(), Agent.NodeFailer, reason); } } updateNodeDownState(); List<Node> activeNodes = nodeRepository().getNodes(Node.State.active); Set<Node> nodesWithFailureReason = new HashSet<>(); for (Map.Entry<Node, String> entry : getActiveNodesByFailureReason(activeNodes).entrySet()) { Node node = entry.getKey(); nodesWithFailureReason.add(node); if (!failAllowedFor(node.type())) { continue; } if (throttle(node)) { throttledNodeFailures++; continue; } String reason = entry.getValue(); failActive(node, reason); } activeNodes.stream() .filter(node -> failAllowedFor(node.type())) .filter(node -> !nodesWithFailureReason.contains(node)) .filter(node -> node.parentHostname().isEmpty()) .sorted(Comparator.comparing(node -> node.status().wantToRetire(), Comparator.reverseOrder())) .filter(node -> { if (node.status().wantToRetire()) return true; if (node.allocation().map(a -> a.membership().retired()).orElse(false)) return true; List<String> reasons = reasonsToRetireActiveParentHost(node); if (reasons.size() > 0) { retireRecursively(node, reasons, activeNodes); return true; } return false; }) .limit(1) .count(); metric.set(throttlingActiveMetric, Math.min( 1, throttledNodeFailures), null); metric.set(throttledNodeFailuresMetric, throttledNodeFailures, null); } private void updateNodeLivenessEventsForReadyNodes() { for (Node node : nodeRepository().getNodes(Node.State.ready)) { Optional<Instant> lastLocalRequest = hostLivenessTracker.lastRequestFrom(node.hostname()); if ( ! lastLocalRequest.isPresent()) continue; if (! node.history().hasEventAfter(History.Event.Type.requested, lastLocalRequest.get())) { History updatedHistory = node.history() .with(new History.Event(History.Event.Type.requested, Agent.NodeFailer, lastLocalRequest.get())); nodeRepository().write(node.with(updatedHistory)); } } } private Map<Node, String> getReadyNodesByFailureReason() { Instant oldestAcceptableRequestTime = constructionTime.isAfter(clock.instant().minus(nodeRequestInterval.multipliedBy(2))) ? Instant.EPOCH : clock.instant().minus(downTimeLimit).minus(nodeRequestInterval); Map<Node, String> nodesByFailureReason = new HashMap<>(); for (Node node : nodeRepository().getNodes(Node.State.ready)) { if (expectConfigRequests(node) && ! hasNodeRequestedConfigAfter(node, oldestAcceptableRequestTime)) { nodesByFailureReason.put(node, "Not receiving config requests from node"); } else if (node.status().hardwareFailureDescription().isPresent()) { nodesByFailureReason.put(node, "Node has hardware failure"); } else if (node.status().hardwareDivergence().isPresent()) { nodesByFailureReason.put(node, "Node has hardware divergence"); } else { Node hostNode = node.parentHostname().flatMap(parent -> nodeRepository().getNode(parent)).orElse(node); List<String> failureReports = reasonsToRetireActiveParentHost(hostNode); if (failureReports.size() > 0) { if (hostNode.equals(node)) { nodesByFailureReason.put(node, "Host has failure reports: " + failureReports); } else { nodesByFailureReason.put(node, "Parent (" + hostNode + ") has failure reports: " + failureReports); } } } } return nodesByFailureReason; } /** * If the node is down (see {@link * Otherwise we remove any "down" history record. */ private void updateNodeDownState() { Map<String, Node> activeNodesByHostname = nodeRepository().getNodes(Node.State.active).stream() .collect(Collectors.toMap(Node::hostname, node -> node)); serviceMonitor.getServiceModelSnapshot().getServiceInstancesByHostName() .forEach((hostName, serviceInstances) -> { Node node = activeNodesByHostname.get(hostName.s()); if (node == null) return; if (badNode(serviceInstances)) { recordAsDown(node); } else { clearDownRecord(node); } }); } private Map<Node, String> getActiveNodesByFailureReason(List<Node> activeNodes) { Instant graceTimeEnd = clock.instant().minus(downTimeLimit); Map<Node, String> nodesByFailureReason = new HashMap<>(); for (Node node : activeNodes) { if (node.history().hasEventBefore(History.Event.Type.down, graceTimeEnd) && ! applicationSuspended(node)) { nodesByFailureReason.put(node, "Node has been down longer than " + downTimeLimit); } else if (node.status().hardwareFailureDescription().isPresent() && nodeSuspended(node)) { nodesByFailureReason.put(node, "Node has hardware failure: " + node.status().hardwareFailureDescription().get()); } } return nodesByFailureReason; } private static List<String> reasonsToRetireActiveParentHost(Node hostNode) { return Stream.of( "badTotalMemorySize", "badTotalDiskSize", "badDiskType", "badInterfaceSpeed", "badCpuCount" ) .map(reportId -> baseReportToString(hostNode, reportId)) .flatMap(Optional::stream) .collect(Collectors.toList()); } /** The generated string is built from the report's ID, created time, and description only. */ static Optional<String> baseReportToString(Node node, String reportId) { return node.reports().getReport(reportId).map(report -> reportId + " reported " + report.getCreatedTime() + ": " + report.getDescription()); } /** * There are reasons why this node should be parked, and we'd like to do it through retiring, * including any child nodes. */ private void retireRecursively(Node node, List<String> reasons, List<Node> activeNodes) { if (activeNodes != null) { List<Node> childNodesToRetire = activeNodes.stream() .filter(n -> n.parentHostname().equals(Optional.of(node.hostname()))) .collect(Collectors.toList()); for (Node childNode : childNodesToRetire) { retireRecursively(childNode, reasons, null); } } if (node.status().wantToRetire()) return; retireActive(node.hostname(), node.allocation().get().owner(), reasons); } /** Returns whether node has any kind of hardware issue */ public static boolean hasHardwareIssue(Node node, NodeRepository nodeRepository) { if (node.status().hardwareFailureDescription().isPresent() || node.status().hardwareDivergence().isPresent()) { return true; } Node hostNode = node.parentHostname().flatMap(parent -> nodeRepository.getNode(parent)).orElse(node); return reasonsToRetireActiveParentHost(hostNode).size() > 0; } private boolean expectConfigRequests(Node node) { return !node.type().isDockerHost() || configserverConfig.nodeAdminInContainer(); } private boolean hasNodeRequestedConfigAfter(Node node, Instant instant) { return !wasMadeReadyBefore(node, instant) || hasRecordedRequestAfter(node, instant); } private boolean wasMadeReadyBefore(Node node, Instant instant) { return node.history().hasEventBefore(History.Event.Type.readied, instant); } private boolean hasRecordedRequestAfter(Node node, Instant instant) { return node.history().hasEventAfter(History.Event.Type.requested, instant); } private boolean applicationSuspended(Node node) { try { return orchestrator.getApplicationInstanceStatus(node.allocation().get().owner()) == ApplicationInstanceStatus.ALLOWED_TO_BE_DOWN; } catch (ApplicationIdNotFoundException e) { return false; } } private boolean nodeSuspended(Node node) { try { return orchestrator.getNodeStatus(new HostName(node.hostname())) == HostStatus.ALLOWED_TO_BE_DOWN; } catch (HostNameNotFoundException e) { return false; } } /** * We can attempt to fail any number of *tenant* and *host* nodes because the operation will not be effected * unless the node is replaced. * However, nodes of other types are not replaced (because all of the type are used by a single application), * so we only allow one to be in failed at any point in time to protect against runaway failing. */ private boolean failAllowedFor(NodeType nodeType) { if (nodeType == NodeType.tenant || nodeType == NodeType.host) return true; return nodeRepository().getNodes(nodeType, Node.State.failed).size() == 0; } /** * Returns true if the node is considered bad: all monitored services services are down. * If a node remains bad for a long time, the NodeFailer will eventually try to fail the node. */ public static boolean badNode(List<ServiceInstance> services) { Map<ServiceStatus, Long> countsByStatus = services.stream() .collect(Collectors.groupingBy(ServiceInstance::serviceStatus, counting())); return countsByStatus.getOrDefault(ServiceStatus.UP, 0L) <= 0L && countsByStatus.getOrDefault(ServiceStatus.DOWN, 0L) > 0L; } /** * Record a node as down if not already recorded and returns the node in the new state. * This assumes the node is found in the node * repo and that the node is allocated. If we get here otherwise something is truly odd. */ private Node recordAsDown(Node node) { if (node.history().event(History.Event.Type.down).isPresent()) return node; try (Mutex lock = nodeRepository().lock(node.allocation().get().owner())) { node = nodeRepository().getNode(node.hostname(), Node.State.active).get(); return nodeRepository().write(node.downAt(clock.instant())); } } private void clearDownRecord(Node node) { if ( ! node.history().event(History.Event.Type.down).isPresent()) return; try (Mutex lock = nodeRepository().lock(node.allocation().get().owner())) { node = nodeRepository().getNode(node.hostname(), Node.State.active).get(); nodeRepository().write(node.up()); } } /** * Called when a node should be moved to the failed state: Do that if it seems safe, * which is when the node repo has available capacity to replace the node (and all its tenant nodes if host). * Otherwise not replacing the node ensures (by Orchestrator check) that no further action will be taken. * * @return whether node was successfully failed */ private boolean failActive(Node node, String reason) { Optional<Deployment> deployment = deployer.deployFromLocalActive(node.allocation().get().owner(), Duration.ofMinutes(30)); if ( ! deployment.isPresent()) return false; try (Mutex lock = nodeRepository().lock(node.allocation().get().owner())) { boolean allTenantNodesFailedOutSuccessfully = true; String reasonForChildFailure = "Failing due to parent host " + node.hostname() + " failure: " + reason; for (Node failingTenantNode : nodeRepository().list().childrenOf(node)) { if (failingTenantNode.state() == Node.State.active) { allTenantNodesFailedOutSuccessfully &= failActive(failingTenantNode, reasonForChildFailure); } else { nodeRepository().fail(failingTenantNode.hostname(), Agent.NodeFailer, reasonForChildFailure); } } if (! allTenantNodesFailedOutSuccessfully) return false; node = nodeRepository().fail(node.hostname(), Agent.NodeFailer, reason); try { deployment.get().activate(); return true; } catch (RuntimeException e) { nodeRepository().reactivate(node.hostname(), Agent.NodeFailer, "Failed to redeploy after being failed by NodeFailer"); log.log(Level.WARNING, "Attempted to fail " + node + " for " + node.allocation().get().owner() + ", but redeploying without the node failed", e); return false; } } } /** Returns true if node failing should be throttled */ private boolean throttle(Node node) { if (throttlePolicy == ThrottlePolicy.disabled) return false; Instant startOfThrottleWindow = clock.instant().minus(throttlePolicy.throttleWindow); List<Node> nodes = nodeRepository().getNodes(); NodeList recentlyFailedNodes = nodes.stream() .filter(n -> n.history().hasEventAfter(History.Event.Type.failed, startOfThrottleWindow)) .collect(collectingAndThen(Collectors.toList(), NodeList::new)); if (recentlyFailedNodes.size() < throttlePolicy.allowedToFailOf(nodes.size())) return false; if (!node.parentHostname().isPresent() && recentlyFailedNodes.parents().size() < throttlePolicy.minimumAllowedToFail) return false; log.info(String.format("Want to fail node %s, but throttling is in effect: %s", node.hostname(), throttlePolicy.toHumanReadableString(nodes.size()))); return true; } public enum ThrottlePolicy { hosted(Duration.ofDays(1), 0.02, 2), disabled(Duration.ZERO, 0, 0); private final Duration throttleWindow; private final double fractionAllowedToFail; private final int minimumAllowedToFail; ThrottlePolicy(Duration throttleWindow, double fractionAllowedToFail, int minimumAllowedToFail) { this.throttleWindow = throttleWindow; this.fractionAllowedToFail = fractionAllowedToFail; this.minimumAllowedToFail = minimumAllowedToFail; } public int allowedToFailOf(int totalNodes) { return (int) Math.max(totalNodes * fractionAllowedToFail, minimumAllowedToFail); } public String toHumanReadableString(int totalNodes) { return String.format("Max %.0f%% (%d) or %d nodes can fail over a period of %s", fractionAllowedToFail*100, allowedToFailOf(totalNodes), minimumAllowedToFail, throttleWindow); } } }
Done
private void retireActive(String hostname, ApplicationId owner, List<String> reasons) { Duration lockWait = Duration.ofMinutes(1); try (Mutex lock = nodeRepository().lock(owner, lockWait)) { Optional<Node> node = nodeRepository().getNode(hostname); if (node.isEmpty()) return; if (node.get().state() != Node.State.active) return; if (!node.get().allocation().orElseThrow().owner().equals(owner)) return; if (node.get().status().wantToRetire()) return; log.info("Setting wantToRetire on " + node.get() + " due to these reports: " + reasons); nodeRepository().write(node.get().withWantToRetire(true, Agent.NodeFailer, clock.instant())); } catch (ApplicationLockException e) { log.warning("Failed to get lock on " + owner + " within " + lockWait + " to set wantToRetire, will retry later"); } }
Duration lockWait = Duration.ofMinutes(1);
private void retireActive(String hostname, ApplicationId owner, List<String> reasons) { Duration lockWait = Duration.ofSeconds(10); try (Mutex lock = nodeRepository().lock(owner, lockWait)) { Optional<Node> node = nodeRepository().getNode(hostname); if (node.isEmpty()) return; if (node.get().state() != Node.State.active) return; if (!node.get().allocation().orElseThrow().owner().equals(owner)) return; if (node.get().status().wantToRetire()) return; log.info("Setting wantToRetire on " + node.get() + " due to these reports: " + reasons); nodeRepository().write(node.get().withWantToRetire(true, Agent.NodeFailer, clock.instant())); } catch (ApplicationLockException e) { log.warning("Failed to get lock on " + owner + " within " + lockWait + " to set wantToRetire, will retry later"); } }
class NodeFailer extends Maintainer { private static final Logger log = Logger.getLogger(NodeFailer.class.getName()); private static final Duration nodeRequestInterval = Duration.ofMinutes(10); /** Metric for number of nodes that we want to fail, but cannot due to throttling */ public static final String throttledNodeFailuresMetric = "throttledNodeFailures"; /** Metric that indicates whether throttling is active where 1 means active and 0 means inactive */ public static final String throttlingActiveMetric = "nodeFailThrottling"; /** Provides information about the status of ready hosts */ private final HostLivenessTracker hostLivenessTracker; /** Provides (more accurate) information about the status of active hosts */ private final ServiceMonitor serviceMonitor; private final Deployer deployer; private final Duration downTimeLimit; private final Clock clock; private final Orchestrator orchestrator; private final Instant constructionTime; private final ThrottlePolicy throttlePolicy; private final Metric metric; private final ConfigserverConfig configserverConfig; public NodeFailer(Deployer deployer, HostLivenessTracker hostLivenessTracker, ServiceMonitor serviceMonitor, NodeRepository nodeRepository, Duration downTimeLimit, Clock clock, Orchestrator orchestrator, ThrottlePolicy throttlePolicy, Metric metric, JobControl jobControl, ConfigserverConfig configserverConfig) { super(nodeRepository, min(downTimeLimit.dividedBy(2), Duration.ofMinutes(5)), jobControl); this.deployer = deployer; this.hostLivenessTracker = hostLivenessTracker; this.serviceMonitor = serviceMonitor; this.downTimeLimit = downTimeLimit; this.clock = clock; this.orchestrator = orchestrator; this.constructionTime = clock.instant(); this.throttlePolicy = throttlePolicy; this.metric = metric; this.configserverConfig = configserverConfig; } @Override protected void maintain() { int throttledNodeFailures = 0; try (Mutex lock = nodeRepository().lockAllocation()) { updateNodeLivenessEventsForReadyNodes(); for (Map.Entry<Node, String> entry : getReadyNodesByFailureReason().entrySet()) { Node node = entry.getKey(); if (throttle(node)) { throttledNodeFailures++; continue; } String reason = entry.getValue(); nodeRepository().fail(node.hostname(), Agent.NodeFailer, reason); } } updateNodeDownState(); List<Node> activeNodes = nodeRepository().getNodes(Node.State.active); Set<Node> nodesWithFailureReason = new HashSet<>(); for (Map.Entry<Node, String> entry : getActiveNodesByFailureReason(activeNodes).entrySet()) { Node node = entry.getKey(); nodesWithFailureReason.add(node); if (!failAllowedFor(node.type())) { continue; } if (throttle(node)) { throttledNodeFailures++; continue; } String reason = entry.getValue(); failActive(node, reason); } final long maxWantToRetireHosts = 1; List<Node> candidateNodes = activeNodes.stream() .filter(node -> failAllowedFor(node.type())) .filter(node -> !nodesWithFailureReason.contains(node)) .filter(node -> node.parentHostname().isEmpty()) .collect(Collectors.toList()); long currentWantToRetireHosts = candidateNodes.stream().filter(node -> node.status().wantToRetire()).count(); for (int i = 0; i < candidateNodes.size() && currentWantToRetireHosts < maxWantToRetireHosts; ++i) { Node node = candidateNodes.get(i); List<String> reasons = reasonsToRetireActiveParentHost(node); if (reasons.size() > 0) { retireRecursively(node, reasons, activeNodes); ++currentWantToRetireHosts; } } metric.set(throttlingActiveMetric, Math.min( 1, throttledNodeFailures), null); metric.set(throttledNodeFailuresMetric, throttledNodeFailures, null); } private void updateNodeLivenessEventsForReadyNodes() { for (Node node : nodeRepository().getNodes(Node.State.ready)) { Optional<Instant> lastLocalRequest = hostLivenessTracker.lastRequestFrom(node.hostname()); if ( ! lastLocalRequest.isPresent()) continue; if (! node.history().hasEventAfter(History.Event.Type.requested, lastLocalRequest.get())) { History updatedHistory = node.history() .with(new History.Event(History.Event.Type.requested, Agent.NodeFailer, lastLocalRequest.get())); nodeRepository().write(node.with(updatedHistory)); } } } private Map<Node, String> getReadyNodesByFailureReason() { Instant oldestAcceptableRequestTime = constructionTime.isAfter(clock.instant().minus(nodeRequestInterval.multipliedBy(2))) ? Instant.EPOCH : clock.instant().minus(downTimeLimit).minus(nodeRequestInterval); Map<Node, String> nodesByFailureReason = new HashMap<>(); for (Node node : nodeRepository().getNodes(Node.State.ready)) { if (expectConfigRequests(node) && ! hasNodeRequestedConfigAfter(node, oldestAcceptableRequestTime)) { nodesByFailureReason.put(node, "Not receiving config requests from node"); } else if (node.status().hardwareFailureDescription().isPresent()) { nodesByFailureReason.put(node, "Node has hardware failure"); } else if (node.status().hardwareDivergence().isPresent()) { nodesByFailureReason.put(node, "Node has hardware divergence"); } else { Node hostNode = node.parentHostname().flatMap(parent -> nodeRepository().getNode(parent)).orElse(node); List<String> failureReports = reasonsToRetireActiveParentHost(hostNode); if (failureReports.size() > 0) { if (hostNode.equals(node)) { nodesByFailureReason.put(node, "Host has failure reports: " + failureReports); } else { nodesByFailureReason.put(node, "Parent (" + hostNode + ") has failure reports: " + failureReports); } } } } return nodesByFailureReason; } /** * If the node is down (see {@link * Otherwise we remove any "down" history record. */ private void updateNodeDownState() { Map<String, Node> activeNodesByHostname = nodeRepository().getNodes(Node.State.active).stream() .collect(Collectors.toMap(Node::hostname, node -> node)); serviceMonitor.getServiceModelSnapshot().getServiceInstancesByHostName() .forEach((hostName, serviceInstances) -> { Node node = activeNodesByHostname.get(hostName.s()); if (node == null) return; if (badNode(serviceInstances)) { recordAsDown(node); } else { clearDownRecord(node); } }); } private Map<Node, String> getActiveNodesByFailureReason(List<Node> activeNodes) { Instant graceTimeEnd = clock.instant().minus(downTimeLimit); Map<Node, String> nodesByFailureReason = new HashMap<>(); for (Node node : activeNodes) { if (node.history().hasEventBefore(History.Event.Type.down, graceTimeEnd) && ! applicationSuspended(node)) { nodesByFailureReason.put(node, "Node has been down longer than " + downTimeLimit); } else if (node.status().hardwareFailureDescription().isPresent() && nodeSuspended(node)) { nodesByFailureReason.put(node, "Node has hardware failure: " + node.status().hardwareFailureDescription().get()); } } return nodesByFailureReason; } private static List<String> reasonsToRetireActiveParentHost(Node hostNode) { return Stream.of( "badTotalMemorySize", "badTotalDiskSize", "badDiskType", "badInterfaceSpeed", "badCpuCount" ) .map(reportId -> baseReportToString(hostNode, reportId)) .flatMap(Optional::stream) .collect(Collectors.toList()); } /** The generated string is built from the report's ID, created time, and description only. */ static Optional<String> baseReportToString(Node node, String reportId) { return node.reports().getReport(reportId).map(report -> reportId + " reported " + report.getCreatedTime() + ": " + report.getDescription()); } /** * There are reasons why this node should be parked, and we'd like to do it through retiring, * including any child nodes. */ private void retireRecursively(Node node, List<String> reasons, List<Node> activeNodesIfMaybeParent) { if (activeNodesIfMaybeParent != null) { List<Node> childNodesToRetire = activeNodesIfMaybeParent.stream() .filter(n -> n.parentHostname().equals(Optional.of(node.hostname()))) .collect(Collectors.toList()); for (Node childNode : childNodesToRetire) { retireRecursively(childNode, reasons, null); } } if (node.status().wantToRetire()) return; retireActive(node.hostname(), node.allocation().get().owner(), reasons); } /** Returns whether node has any kind of hardware issue */ public static boolean hasHardwareIssue(Node node, NodeRepository nodeRepository) { if (node.status().hardwareFailureDescription().isPresent() || node.status().hardwareDivergence().isPresent()) { return true; } Node hostNode = node.parentHostname().flatMap(parent -> nodeRepository.getNode(parent)).orElse(node); return reasonsToRetireActiveParentHost(hostNode).size() > 0; } private boolean expectConfigRequests(Node node) { return !node.type().isDockerHost() || configserverConfig.nodeAdminInContainer(); } private boolean hasNodeRequestedConfigAfter(Node node, Instant instant) { return !wasMadeReadyBefore(node, instant) || hasRecordedRequestAfter(node, instant); } private boolean wasMadeReadyBefore(Node node, Instant instant) { return node.history().hasEventBefore(History.Event.Type.readied, instant); } private boolean hasRecordedRequestAfter(Node node, Instant instant) { return node.history().hasEventAfter(History.Event.Type.requested, instant); } private boolean applicationSuspended(Node node) { try { return orchestrator.getApplicationInstanceStatus(node.allocation().get().owner()) == ApplicationInstanceStatus.ALLOWED_TO_BE_DOWN; } catch (ApplicationIdNotFoundException e) { return false; } } private boolean nodeSuspended(Node node) { try { return orchestrator.getNodeStatus(new HostName(node.hostname())) == HostStatus.ALLOWED_TO_BE_DOWN; } catch (HostNameNotFoundException e) { return false; } } /** * We can attempt to fail any number of *tenant* and *host* nodes because the operation will not be effected * unless the node is replaced. * However, nodes of other types are not replaced (because all of the type are used by a single application), * so we only allow one to be in failed at any point in time to protect against runaway failing. */ private boolean failAllowedFor(NodeType nodeType) { if (nodeType == NodeType.tenant || nodeType == NodeType.host) return true; return nodeRepository().getNodes(nodeType, Node.State.failed).size() == 0; } /** * Returns true if the node is considered bad: all monitored services services are down. * If a node remains bad for a long time, the NodeFailer will eventually try to fail the node. */ public static boolean badNode(List<ServiceInstance> services) { Map<ServiceStatus, Long> countsByStatus = services.stream() .collect(Collectors.groupingBy(ServiceInstance::serviceStatus, counting())); return countsByStatus.getOrDefault(ServiceStatus.UP, 0L) <= 0L && countsByStatus.getOrDefault(ServiceStatus.DOWN, 0L) > 0L; } /** * Record a node as down if not already recorded and returns the node in the new state. * This assumes the node is found in the node * repo and that the node is allocated. If we get here otherwise something is truly odd. */ private Node recordAsDown(Node node) { if (node.history().event(History.Event.Type.down).isPresent()) return node; try (Mutex lock = nodeRepository().lock(node.allocation().get().owner())) { node = nodeRepository().getNode(node.hostname(), Node.State.active).get(); return nodeRepository().write(node.downAt(clock.instant())); } } private void clearDownRecord(Node node) { if ( ! node.history().event(History.Event.Type.down).isPresent()) return; try (Mutex lock = nodeRepository().lock(node.allocation().get().owner())) { node = nodeRepository().getNode(node.hostname(), Node.State.active).get(); nodeRepository().write(node.up()); } } /** * Called when a node should be moved to the failed state: Do that if it seems safe, * which is when the node repo has available capacity to replace the node (and all its tenant nodes if host). * Otherwise not replacing the node ensures (by Orchestrator check) that no further action will be taken. * * @return whether node was successfully failed */ private boolean failActive(Node node, String reason) { Optional<Deployment> deployment = deployer.deployFromLocalActive(node.allocation().get().owner(), Duration.ofMinutes(30)); if ( ! deployment.isPresent()) return false; try (Mutex lock = nodeRepository().lock(node.allocation().get().owner())) { boolean allTenantNodesFailedOutSuccessfully = true; String reasonForChildFailure = "Failing due to parent host " + node.hostname() + " failure: " + reason; for (Node failingTenantNode : nodeRepository().list().childrenOf(node)) { if (failingTenantNode.state() == Node.State.active) { allTenantNodesFailedOutSuccessfully &= failActive(failingTenantNode, reasonForChildFailure); } else { nodeRepository().fail(failingTenantNode.hostname(), Agent.NodeFailer, reasonForChildFailure); } } if (! allTenantNodesFailedOutSuccessfully) return false; node = nodeRepository().fail(node.hostname(), Agent.NodeFailer, reason); try { deployment.get().activate(); return true; } catch (RuntimeException e) { nodeRepository().reactivate(node.hostname(), Agent.NodeFailer, "Failed to redeploy after being failed by NodeFailer"); log.log(Level.WARNING, "Attempted to fail " + node + " for " + node.allocation().get().owner() + ", but redeploying without the node failed", e); return false; } } } /** Returns true if node failing should be throttled */ private boolean throttle(Node node) { if (throttlePolicy == ThrottlePolicy.disabled) return false; Instant startOfThrottleWindow = clock.instant().minus(throttlePolicy.throttleWindow); List<Node> nodes = nodeRepository().getNodes(); NodeList recentlyFailedNodes = nodes.stream() .filter(n -> n.history().hasEventAfter(History.Event.Type.failed, startOfThrottleWindow)) .collect(collectingAndThen(Collectors.toList(), NodeList::new)); if (recentlyFailedNodes.size() < throttlePolicy.allowedToFailOf(nodes.size())) return false; if (!node.parentHostname().isPresent() && recentlyFailedNodes.parents().size() < throttlePolicy.minimumAllowedToFail) return false; log.info(String.format("Want to fail node %s, but throttling is in effect: %s", node.hostname(), throttlePolicy.toHumanReadableString(nodes.size()))); return true; } public enum ThrottlePolicy { hosted(Duration.ofDays(1), 0.02, 2), disabled(Duration.ZERO, 0, 0); private final Duration throttleWindow; private final double fractionAllowedToFail; private final int minimumAllowedToFail; ThrottlePolicy(Duration throttleWindow, double fractionAllowedToFail, int minimumAllowedToFail) { this.throttleWindow = throttleWindow; this.fractionAllowedToFail = fractionAllowedToFail; this.minimumAllowedToFail = minimumAllowedToFail; } public int allowedToFailOf(int totalNodes) { return (int) Math.max(totalNodes * fractionAllowedToFail, minimumAllowedToFail); } public String toHumanReadableString(int totalNodes) { return String.format("Max %.0f%% (%d) or %d nodes can fail over a period of %s", fractionAllowedToFail*100, allowedToFailOf(totalNodes), minimumAllowedToFail, throttleWindow); } } }
class NodeFailer extends Maintainer { private static final Logger log = Logger.getLogger(NodeFailer.class.getName()); private static final Duration nodeRequestInterval = Duration.ofMinutes(10); /** Metric for number of nodes that we want to fail, but cannot due to throttling */ public static final String throttledNodeFailuresMetric = "throttledNodeFailures"; /** Metric that indicates whether throttling is active where 1 means active and 0 means inactive */ public static final String throttlingActiveMetric = "nodeFailThrottling"; /** Provides information about the status of ready hosts */ private final HostLivenessTracker hostLivenessTracker; /** Provides (more accurate) information about the status of active hosts */ private final ServiceMonitor serviceMonitor; private final Deployer deployer; private final Duration downTimeLimit; private final Clock clock; private final Orchestrator orchestrator; private final Instant constructionTime; private final ThrottlePolicy throttlePolicy; private final Metric metric; private final ConfigserverConfig configserverConfig; public NodeFailer(Deployer deployer, HostLivenessTracker hostLivenessTracker, ServiceMonitor serviceMonitor, NodeRepository nodeRepository, Duration downTimeLimit, Clock clock, Orchestrator orchestrator, ThrottlePolicy throttlePolicy, Metric metric, JobControl jobControl, ConfigserverConfig configserverConfig) { super(nodeRepository, min(downTimeLimit.dividedBy(2), Duration.ofMinutes(5)), jobControl); this.deployer = deployer; this.hostLivenessTracker = hostLivenessTracker; this.serviceMonitor = serviceMonitor; this.downTimeLimit = downTimeLimit; this.clock = clock; this.orchestrator = orchestrator; this.constructionTime = clock.instant(); this.throttlePolicy = throttlePolicy; this.metric = metric; this.configserverConfig = configserverConfig; } @Override protected void maintain() { int throttledNodeFailures = 0; try (Mutex lock = nodeRepository().lockAllocation()) { updateNodeLivenessEventsForReadyNodes(); for (Map.Entry<Node, String> entry : getReadyNodesByFailureReason().entrySet()) { Node node = entry.getKey(); if (throttle(node)) { throttledNodeFailures++; continue; } String reason = entry.getValue(); nodeRepository().fail(node.hostname(), Agent.NodeFailer, reason); } } updateNodeDownState(); List<Node> activeNodes = nodeRepository().getNodes(Node.State.active); Set<Node> nodesWithFailureReason = new HashSet<>(); for (Map.Entry<Node, String> entry : getActiveNodesByFailureReason(activeNodes).entrySet()) { Node node = entry.getKey(); nodesWithFailureReason.add(node); if (!failAllowedFor(node.type())) { continue; } if (throttle(node)) { throttledNodeFailures++; continue; } String reason = entry.getValue(); failActive(node, reason); } activeNodes.stream() .filter(node -> failAllowedFor(node.type())) .filter(node -> !nodesWithFailureReason.contains(node)) .filter(node -> node.parentHostname().isEmpty()) .sorted(Comparator.comparing(node -> node.status().wantToRetire(), Comparator.reverseOrder())) .filter(node -> { if (node.status().wantToRetire()) return true; if (node.allocation().map(a -> a.membership().retired()).orElse(false)) return true; List<String> reasons = reasonsToRetireActiveParentHost(node); if (reasons.size() > 0) { retireRecursively(node, reasons, activeNodes); return true; } return false; }) .limit(1) .count(); metric.set(throttlingActiveMetric, Math.min( 1, throttledNodeFailures), null); metric.set(throttledNodeFailuresMetric, throttledNodeFailures, null); } private void updateNodeLivenessEventsForReadyNodes() { for (Node node : nodeRepository().getNodes(Node.State.ready)) { Optional<Instant> lastLocalRequest = hostLivenessTracker.lastRequestFrom(node.hostname()); if ( ! lastLocalRequest.isPresent()) continue; if (! node.history().hasEventAfter(History.Event.Type.requested, lastLocalRequest.get())) { History updatedHistory = node.history() .with(new History.Event(History.Event.Type.requested, Agent.NodeFailer, lastLocalRequest.get())); nodeRepository().write(node.with(updatedHistory)); } } } private Map<Node, String> getReadyNodesByFailureReason() { Instant oldestAcceptableRequestTime = constructionTime.isAfter(clock.instant().minus(nodeRequestInterval.multipliedBy(2))) ? Instant.EPOCH : clock.instant().minus(downTimeLimit).minus(nodeRequestInterval); Map<Node, String> nodesByFailureReason = new HashMap<>(); for (Node node : nodeRepository().getNodes(Node.State.ready)) { if (expectConfigRequests(node) && ! hasNodeRequestedConfigAfter(node, oldestAcceptableRequestTime)) { nodesByFailureReason.put(node, "Not receiving config requests from node"); } else if (node.status().hardwareFailureDescription().isPresent()) { nodesByFailureReason.put(node, "Node has hardware failure"); } else if (node.status().hardwareDivergence().isPresent()) { nodesByFailureReason.put(node, "Node has hardware divergence"); } else { Node hostNode = node.parentHostname().flatMap(parent -> nodeRepository().getNode(parent)).orElse(node); List<String> failureReports = reasonsToRetireActiveParentHost(hostNode); if (failureReports.size() > 0) { if (hostNode.equals(node)) { nodesByFailureReason.put(node, "Host has failure reports: " + failureReports); } else { nodesByFailureReason.put(node, "Parent (" + hostNode + ") has failure reports: " + failureReports); } } } } return nodesByFailureReason; } /** * If the node is down (see {@link * Otherwise we remove any "down" history record. */ private void updateNodeDownState() { Map<String, Node> activeNodesByHostname = nodeRepository().getNodes(Node.State.active).stream() .collect(Collectors.toMap(Node::hostname, node -> node)); serviceMonitor.getServiceModelSnapshot().getServiceInstancesByHostName() .forEach((hostName, serviceInstances) -> { Node node = activeNodesByHostname.get(hostName.s()); if (node == null) return; if (badNode(serviceInstances)) { recordAsDown(node); } else { clearDownRecord(node); } }); } private Map<Node, String> getActiveNodesByFailureReason(List<Node> activeNodes) { Instant graceTimeEnd = clock.instant().minus(downTimeLimit); Map<Node, String> nodesByFailureReason = new HashMap<>(); for (Node node : activeNodes) { if (node.history().hasEventBefore(History.Event.Type.down, graceTimeEnd) && ! applicationSuspended(node)) { nodesByFailureReason.put(node, "Node has been down longer than " + downTimeLimit); } else if (node.status().hardwareFailureDescription().isPresent() && nodeSuspended(node)) { nodesByFailureReason.put(node, "Node has hardware failure: " + node.status().hardwareFailureDescription().get()); } } return nodesByFailureReason; } private static List<String> reasonsToRetireActiveParentHost(Node hostNode) { return Stream.of( "badTotalMemorySize", "badTotalDiskSize", "badDiskType", "badInterfaceSpeed", "badCpuCount" ) .map(reportId -> baseReportToString(hostNode, reportId)) .flatMap(Optional::stream) .collect(Collectors.toList()); } /** The generated string is built from the report's ID, created time, and description only. */ static Optional<String> baseReportToString(Node node, String reportId) { return node.reports().getReport(reportId).map(report -> reportId + " reported " + report.getCreatedTime() + ": " + report.getDescription()); } /** * There are reasons why this node should be parked, and we'd like to do it through retiring, * including any child nodes. */ private void retireRecursively(Node node, List<String> reasons, List<Node> activeNodes) { if (activeNodes != null) { List<Node> childNodesToRetire = activeNodes.stream() .filter(n -> n.parentHostname().equals(Optional.of(node.hostname()))) .collect(Collectors.toList()); for (Node childNode : childNodesToRetire) { retireRecursively(childNode, reasons, null); } } if (node.status().wantToRetire()) return; retireActive(node.hostname(), node.allocation().get().owner(), reasons); } /** Returns whether node has any kind of hardware issue */ public static boolean hasHardwareIssue(Node node, NodeRepository nodeRepository) { if (node.status().hardwareFailureDescription().isPresent() || node.status().hardwareDivergence().isPresent()) { return true; } Node hostNode = node.parentHostname().flatMap(parent -> nodeRepository.getNode(parent)).orElse(node); return reasonsToRetireActiveParentHost(hostNode).size() > 0; } private boolean expectConfigRequests(Node node) { return !node.type().isDockerHost() || configserverConfig.nodeAdminInContainer(); } private boolean hasNodeRequestedConfigAfter(Node node, Instant instant) { return !wasMadeReadyBefore(node, instant) || hasRecordedRequestAfter(node, instant); } private boolean wasMadeReadyBefore(Node node, Instant instant) { return node.history().hasEventBefore(History.Event.Type.readied, instant); } private boolean hasRecordedRequestAfter(Node node, Instant instant) { return node.history().hasEventAfter(History.Event.Type.requested, instant); } private boolean applicationSuspended(Node node) { try { return orchestrator.getApplicationInstanceStatus(node.allocation().get().owner()) == ApplicationInstanceStatus.ALLOWED_TO_BE_DOWN; } catch (ApplicationIdNotFoundException e) { return false; } } private boolean nodeSuspended(Node node) { try { return orchestrator.getNodeStatus(new HostName(node.hostname())) == HostStatus.ALLOWED_TO_BE_DOWN; } catch (HostNameNotFoundException e) { return false; } } /** * We can attempt to fail any number of *tenant* and *host* nodes because the operation will not be effected * unless the node is replaced. * However, nodes of other types are not replaced (because all of the type are used by a single application), * so we only allow one to be in failed at any point in time to protect against runaway failing. */ private boolean failAllowedFor(NodeType nodeType) { if (nodeType == NodeType.tenant || nodeType == NodeType.host) return true; return nodeRepository().getNodes(nodeType, Node.State.failed).size() == 0; } /** * Returns true if the node is considered bad: all monitored services services are down. * If a node remains bad for a long time, the NodeFailer will eventually try to fail the node. */ public static boolean badNode(List<ServiceInstance> services) { Map<ServiceStatus, Long> countsByStatus = services.stream() .collect(Collectors.groupingBy(ServiceInstance::serviceStatus, counting())); return countsByStatus.getOrDefault(ServiceStatus.UP, 0L) <= 0L && countsByStatus.getOrDefault(ServiceStatus.DOWN, 0L) > 0L; } /** * Record a node as down if not already recorded and returns the node in the new state. * This assumes the node is found in the node * repo and that the node is allocated. If we get here otherwise something is truly odd. */ private Node recordAsDown(Node node) { if (node.history().event(History.Event.Type.down).isPresent()) return node; try (Mutex lock = nodeRepository().lock(node.allocation().get().owner())) { node = nodeRepository().getNode(node.hostname(), Node.State.active).get(); return nodeRepository().write(node.downAt(clock.instant())); } } private void clearDownRecord(Node node) { if ( ! node.history().event(History.Event.Type.down).isPresent()) return; try (Mutex lock = nodeRepository().lock(node.allocation().get().owner())) { node = nodeRepository().getNode(node.hostname(), Node.State.active).get(); nodeRepository().write(node.up()); } } /** * Called when a node should be moved to the failed state: Do that if it seems safe, * which is when the node repo has available capacity to replace the node (and all its tenant nodes if host). * Otherwise not replacing the node ensures (by Orchestrator check) that no further action will be taken. * * @return whether node was successfully failed */ private boolean failActive(Node node, String reason) { Optional<Deployment> deployment = deployer.deployFromLocalActive(node.allocation().get().owner(), Duration.ofMinutes(30)); if ( ! deployment.isPresent()) return false; try (Mutex lock = nodeRepository().lock(node.allocation().get().owner())) { boolean allTenantNodesFailedOutSuccessfully = true; String reasonForChildFailure = "Failing due to parent host " + node.hostname() + " failure: " + reason; for (Node failingTenantNode : nodeRepository().list().childrenOf(node)) { if (failingTenantNode.state() == Node.State.active) { allTenantNodesFailedOutSuccessfully &= failActive(failingTenantNode, reasonForChildFailure); } else { nodeRepository().fail(failingTenantNode.hostname(), Agent.NodeFailer, reasonForChildFailure); } } if (! allTenantNodesFailedOutSuccessfully) return false; node = nodeRepository().fail(node.hostname(), Agent.NodeFailer, reason); try { deployment.get().activate(); return true; } catch (RuntimeException e) { nodeRepository().reactivate(node.hostname(), Agent.NodeFailer, "Failed to redeploy after being failed by NodeFailer"); log.log(Level.WARNING, "Attempted to fail " + node + " for " + node.allocation().get().owner() + ", but redeploying without the node failed", e); return false; } } } /** Returns true if node failing should be throttled */ private boolean throttle(Node node) { if (throttlePolicy == ThrottlePolicy.disabled) return false; Instant startOfThrottleWindow = clock.instant().minus(throttlePolicy.throttleWindow); List<Node> nodes = nodeRepository().getNodes(); NodeList recentlyFailedNodes = nodes.stream() .filter(n -> n.history().hasEventAfter(History.Event.Type.failed, startOfThrottleWindow)) .collect(collectingAndThen(Collectors.toList(), NodeList::new)); if (recentlyFailedNodes.size() < throttlePolicy.allowedToFailOf(nodes.size())) return false; if (!node.parentHostname().isPresent() && recentlyFailedNodes.parents().size() < throttlePolicy.minimumAllowedToFail) return false; log.info(String.format("Want to fail node %s, but throttling is in effect: %s", node.hostname(), throttlePolicy.toHumanReadableString(nodes.size()))); return true; } public enum ThrottlePolicy { hosted(Duration.ofDays(1), 0.02, 2), disabled(Duration.ZERO, 0, 0); private final Duration throttleWindow; private final double fractionAllowedToFail; private final int minimumAllowedToFail; ThrottlePolicy(Duration throttleWindow, double fractionAllowedToFail, int minimumAllowedToFail) { this.throttleWindow = throttleWindow; this.fractionAllowedToFail = fractionAllowedToFail; this.minimumAllowedToFail = minimumAllowedToFail; } public int allowedToFailOf(int totalNodes) { return (int) Math.max(totalNodes * fractionAllowedToFail, minimumAllowedToFail); } public String toHumanReadableString(int totalNodes) { return String.format("Max %.0f%% (%d) or %d nodes can fail over a period of %s", fractionAllowedToFail*100, allowedToFailOf(totalNodes), minimumAllowedToFail, throttleWindow); } } }
return elements[1]; double space
private String parseInstanceId(String str) { final String[] elements = str.split(" "); if (elements.length == 4) { return elements[1]; } else { Preconditions.checkState(false); return ""; } }
return elements[1];
private String parseInstanceId(String str) { final String[] elements = str.split(" "); if (elements.length == 4) { return elements[1]; } else { Preconditions.checkState(false); return ""; } }
class CurrentQueryInfoProvider { private static final Logger LOG = LogManager.getLogger(CurrentQueryInfoProvider.class); public CurrentQueryInfoProvider() { } /** * Firstly send request to trigger report profile for specified query and wait a while, * Secondly get Counters from Coordinator's RuntimeProfile and return query's consumption. * * @param item * @return * @throws AnalysisException */ public Consumption getQueryConsumption(QueryStatisticsItem item) throws AnalysisException { triggerReportAndWait(item, getWaitTime(1), false); return new Consumption(item.getQueryProfile()); } /** * Same as getQueryConsumption, but this will cause BE to report all queries profile. * * @param items * @return * @throws AnalysisException */ public Map<String, Consumption> getQueriesConsumptions(Collection<QueryStatisticsItem> items) throws AnalysisException { triggerReportAndWait(items, getWaitTime(items.size()), true); final Map<String, Consumption> queryConsumpations = Maps.newHashMap(); for (QueryStatisticsItem item : items) { queryConsumpations.put(item.getQueryId(), new Consumption(item.getQueryProfile())); } return queryConsumpations; } /** * Return query's instances consumption. * * @param item * @return * @throws AnalysisException */ public Collection<InstanceConsumption> getQueryInstancesConsumptions(QueryStatisticsItem item) throws AnalysisException { triggerReportAndWait(item, getWaitTime(1), false); final Map<String, RuntimeProfile> instanceProfiles = collectInstanceProfile(item.getQueryProfile()); final List<InstanceConsumption> instanceConsumptions = Lists.newArrayList(); for (QueryStatisticsItem.FragmentInstanceInfo instanceInfo : item.getFragmentInstanceInfos()) { final RuntimeProfile instanceProfile = instanceProfiles.get(DebugUtil.printId(instanceInfo.getInstanceId())); Preconditions.checkNotNull(instanceProfile); final InstanceConsumption consumption = new InstanceConsumption( instanceInfo.getFragmentId(), instanceInfo.getInstanceId(), instanceInfo.getAddress(), instanceProfile); instanceConsumptions.add(consumption); } return instanceConsumptions; } /** * Profile trees is query profile -> fragment profile -> instance profile .... * @param queryProfile * @return instanceProfiles */ private Map<String, RuntimeProfile> collectInstanceProfile(RuntimeProfile queryProfile) { final Map<String, RuntimeProfile> instanceProfiles = Maps.newHashMap(); for (RuntimeProfile fragmentProfile : queryProfile.getChildMap().values()) { for (Map.Entry<String, RuntimeProfile> entry: fragmentProfile.getChildMap().entrySet()) { Preconditions.checkState(instanceProfiles.put(parseInstanceId(entry.getKey()), entry.getValue()) == null); } } return instanceProfiles; } /** * Instance profile key is "Instance ${instance_id} (host=$host $port)" * @param str * @return */ private long getWaitTime(int numOfQuery) { final int oneQueryWaitTime = 200; final int allQueryMaxWaitTime = 2000; final int waitTime = numOfQuery * oneQueryWaitTime; return waitTime > allQueryMaxWaitTime ? allQueryMaxWaitTime : waitTime; } private void triggerReportAndWait(QueryStatisticsItem item, long waitTime, boolean allQuery) throws AnalysisException { final List<QueryStatisticsItem> items = Lists.newArrayList(item); triggerReportAndWait(items, waitTime, allQuery); } private void triggerReportAndWait(Collection<QueryStatisticsItem> items, long waitTime, boolean allQuery) throws AnalysisException { triggerReportProfile(items, allQuery); try { Thread.currentThread().sleep(waitTime); } catch (InterruptedException e) { } } /** * send report profile request. * @param items * @param allQuery true:all queries profile will be reported, false:specified queries profile will be reported. * @throws AnalysisException */ private void triggerReportProfile(Collection<QueryStatisticsItem> items, boolean allQuery) throws AnalysisException { final Map<TNetworkAddress, Request> requestMap = Maps.newHashMap(); final Map<TNetworkAddress, TNetworkAddress> brpcAddressMap = Maps.newHashMap(); for (QueryStatisticsItem item : items) { for (QueryStatisticsItem.FragmentInstanceInfo instanceInfo : item.getFragmentInstanceInfos()) { TNetworkAddress brpcNetAddress = brpcAddressMap.get(instanceInfo.getAddress()); if (brpcNetAddress == null) { try { brpcNetAddress = toBrpcHost(instanceInfo.getAddress()); brpcAddressMap.put(instanceInfo.getAddress(), brpcNetAddress); } catch (Exception e) { LOG.warn(e.getMessage()); throw new AnalysisException(e.getMessage()); } } Request request = requestMap.get(brpcNetAddress); if (request == null) { request = new Request(brpcNetAddress); requestMap.put(brpcNetAddress, request); } if (!allQuery) { final PUniqueId pUId = new PUniqueId(instanceInfo.getInstanceId()); request.addInstanceId(pUId); } } } recvResponse(sendRequest(requestMap)); } private List<Pair<Request, Future<PTiggerReportProfileResult>>> sendRequest( Map<TNetworkAddress, Request> requestMap) throws AnalysisException { final List<Pair<Request, Future<PTiggerReportProfileResult>>> futures = Lists.newArrayList(); for (TNetworkAddress address : requestMap.keySet()) { final Request request = requestMap.get(address); final PTiggerReportProfileRequest pbRequest = new PTiggerReportProfileRequest(request.getInstanceIds()); try { futures.add(Pair.create(request, BackendServiceProxy.getInstance(). triggerReportProfileAsync(address, pbRequest))); } catch (RpcException e) { throw new AnalysisException("Sending request fails for query's execution informations."); } } return futures; } private void recvResponse(List<Pair<Request, Future<PTiggerReportProfileResult>>> futures) throws AnalysisException { final String reasonPrefix = "Fail to receive result."; for (Pair<Request, Future<PTiggerReportProfileResult>> pair : futures) { try { final PTiggerReportProfileResult result = pair.second.get(10, TimeUnit.SECONDS); final TStatusCode code = TStatusCode.findByValue(result.status.code); if (code != TStatusCode.OK) { String errMsg = ""; if (result.status.msgs != null && !result.status.msgs.isEmpty()) { errMsg = result.status.msgs.get(0); } throw new AnalysisException(reasonPrefix + " backend:" + pair.first.getAddress() + " reason:" + errMsg); } } catch (InterruptedException | ExecutionException | TimeoutException e) { LOG.warn(reasonPrefix + " reason:" + e.getCause()); throw new AnalysisException(reasonPrefix); } } } private TNetworkAddress toBrpcHost(TNetworkAddress host) throws AnalysisException { final Backend backend = Catalog.getCurrentSystemInfo().getBackendWithBePort( host.getHostname(), host.getPort()); if (backend == null) { throw new AnalysisException(new StringBuilder("Backend ") .append(host.getHostname()) .append(":") .append(host.getPort()) .append(" does not exist") .toString()); } if (backend.getBrpcPort() < 0) { throw new AnalysisException("BRPC port is't exist."); } return new TNetworkAddress(backend.getHost(), backend.getBrpcPort()); } public static class Consumption { private final static String OLAP_SCAN_NODE = "OLAP_SCAN_NODE"; private final static String HASH_JOIN_NODE = "HASH_JOIN_NODE"; private final static String HASH_AGGREGATION_NODE = "AGGREGATION_NODE"; private final static String SORT_NODE = "SORT_NODE"; private final static String ANALYTIC_EVAL_NODE = "ANALYTIC_EVAL_NODE"; private final static String UNION_NODE = "UNION_NODE"; private final static String EXCHANGE_NODE = "EXCHANGE_NODE"; protected final List<ConsumptionCalculator> calculators; public Consumption(RuntimeProfile profile) { this.calculators = Lists.newArrayList(); init(profile); } private void init(RuntimeProfile profile) { final List<Map<String, Counter>> olapScanCounters = Lists.newArrayList(); collectNodeProfileCounters(profile, olapScanCounters, OLAP_SCAN_NODE); calculators.add(new OlapScanNodeConsumptionCalculator(olapScanCounters)); final List<Map<String, Counter>> hashJoinCounters = Lists.newArrayList(); collectNodeProfileCounters(profile, hashJoinCounters, HASH_JOIN_NODE); calculators.add(new HashJoinConsumptionCalculator(hashJoinCounters)); final List<Map<String, Counter>> hashAggCounters = Lists.newArrayList(); collectNodeProfileCounters(profile, hashAggCounters, HASH_AGGREGATION_NODE); calculators.add(new HashAggConsumptionCalculator(hashAggCounters)); final List<Map<String, Counter>> sortCounters = Lists.newArrayList(); collectNodeProfileCounters(profile, sortCounters, SORT_NODE); calculators.add(new SortConsumptionCalculator(sortCounters)); final List<Map<String, Counter>> windowsCounters = Lists.newArrayList(); collectNodeProfileCounters(profile, windowsCounters, ANALYTIC_EVAL_NODE); calculators.add(new WindowsConsumptionCalculator(windowsCounters)); final List<Map<String, Counter>> unionCounters = Lists.newArrayList(); collectNodeProfileCounters(profile, unionCounters, UNION_NODE); calculators.add(new UnionConsumptionCalculator(unionCounters)); final List<Map<String, Counter>> exchangeCounters = Lists.newArrayList(); collectNodeProfileCounters(profile, exchangeCounters, EXCHANGE_NODE); calculators.add(new ExchangeConsumptionCalculator(exchangeCounters)); } private void collectNodeProfileCounters(RuntimeProfile profile, List<Map<String, Counter>> counterMaps, String name) { for (Map.Entry<String, RuntimeProfile> entry : profile.getChildMap().entrySet()) { if (name.equals(parsePossibleExecNodeName(entry.getKey()))) { counterMaps.add(entry.getValue().getCounterMap()); } collectNodeProfileCounters(entry.getValue(), counterMaps, name); } } /** * ExecNode's RuntimeProfile name is "$node_type_name (id=?)" * @param str * @return */ private String parsePossibleExecNodeName(String str) { final String[] elements = str.split(" "); if (elements.length == 2) { return elements[0]; } else { return ""; } } public long getTotalCpuConsumpation() { long cpu = 0; for (ConsumptionCalculator consumpation : calculators) { cpu += consumpation.getCpu(); } return cpu; } public long getTotalIoConsumpation() { long io = 0; for (ConsumptionCalculator consumpation : calculators) { io += consumpation.getIo(); } return io; } } public static class InstanceConsumption extends Consumption { private final String fragmentId; private final TUniqueId instanceId; private final TNetworkAddress address; public InstanceConsumption( String fragmentId, TUniqueId instanceId, TNetworkAddress address, RuntimeProfile profile) { super(profile); this.fragmentId = fragmentId; this.instanceId = instanceId; this.address = address; } public String getFragmentId() { return fragmentId; } public TUniqueId getInstanceId() { return instanceId; } public TNetworkAddress getAddress() { return address; } } private static abstract class ConsumptionCalculator { protected final List<Map<String, Counter>> counterMaps; public ConsumptionCalculator(List<Map<String, Counter>> counterMaps) { this.counterMaps = counterMaps; } public long getCpu() { long cpu = 0; for (Map<String, Counter> counterMap : counterMaps) { cpu += getCpuByRows(counterMap); } return cpu; } public long getIo() { long io = 0; for (Map<String, Counter> counterMap : counterMaps) { io += getIoByByte(counterMap); } return io; } protected long getCpuByRows(Map<String, Counter> counterMap) { return 0; } protected long getIoByByte(Map<String, Counter> counterMap) { return 0; } } private static class OlapScanNodeConsumptionCalculator extends ConsumptionCalculator { public OlapScanNodeConsumptionCalculator(List<Map<String, Counter>> counterMaps) { super(counterMaps); } @Override protected long getIoByByte(Map<String, Counter> counterMap) { final Counter counter = counterMap.get("CompressedBytesRead"); return counter == null ? 0 : counter.getValue(); } } private static class HashJoinConsumptionCalculator extends ConsumptionCalculator { public HashJoinConsumptionCalculator(List<Map<String, Counter>> counterMaps) { super(counterMaps); } @Override protected long getCpuByRows(Map<String, Counter> counterMap) { final Counter probeCounter = counterMap.get("ProbeRows"); final Counter buildCounter = counterMap.get("BuildRows"); return probeCounter == null || buildCounter == null ? 0 : probeCounter.getValue() + buildCounter.getValue(); } } private static class HashAggConsumptionCalculator extends ConsumptionCalculator { public HashAggConsumptionCalculator(List<Map<String, Counter>> counterMaps) { super(counterMaps); } @Override protected long getCpuByRows(Map<String, Counter> counterMap) { final Counter buildCounter = counterMap.get("BuildRows"); return buildCounter == null ? 0 : buildCounter.getValue(); } } private static class SortConsumptionCalculator extends ConsumptionCalculator { public SortConsumptionCalculator(List<Map<String, Counter>> counterMaps) { super(counterMaps); } @Override protected long getCpuByRows(Map<String, Counter> counterMap) { final Counter sortRowsCounter = counterMap.get("SortRows"); return sortRowsCounter == null ? 0 : sortRowsCounter.getValue(); } } private static class WindowsConsumptionCalculator extends ConsumptionCalculator { public WindowsConsumptionCalculator(List<Map<String, Counter>> counterMaps) { super(counterMaps); } @Override protected long getCpuByRows(Map<String, Counter> counterMap) { final Counter processRowsCounter = counterMap.get("ProcessRows"); return processRowsCounter == null ? 0 : processRowsCounter.getValue(); } } private static class UnionConsumptionCalculator extends ConsumptionCalculator { public UnionConsumptionCalculator(List<Map<String, Counter>> counterMaps) { super(counterMaps); } @Override protected long getCpuByRows(Map<String, Counter> counterMap) { final Counter materializeRowsCounter = counterMap.get("MaterializeRows"); return materializeRowsCounter == null ? 0 : materializeRowsCounter.getValue(); } } private static class ExchangeConsumptionCalculator extends ConsumptionCalculator { public ExchangeConsumptionCalculator(List<Map<String, Counter>> counterMaps) { super(counterMaps); } @Override protected long getCpuByRows(Map<String, Counter> counterMap) { final Counter mergeRowsCounter = counterMap.get("MergeRows"); return mergeRowsCounter == null ? 0 : mergeRowsCounter.getValue(); } } private static class Request { private final TNetworkAddress address; private final List<PUniqueId> instanceIds; public Request(TNetworkAddress address) { this.address = address; this.instanceIds = Lists.newArrayList(); } public TNetworkAddress getAddress() { return address; } public List<PUniqueId> getInstanceIds() { return instanceIds; } public void addInstanceId(PUniqueId instanceId) { this.instanceIds.add(instanceId); } } }
class CurrentQueryInfoProvider { private static final Logger LOG = LogManager.getLogger(CurrentQueryInfoProvider.class); public CurrentQueryInfoProvider() { } /** * Firstly send request to trigger profile to report for specified query and wait a while, * Secondly get Counters from Coordinator's RuntimeProfile and return query's consumption. * * @param item * @return * @throws AnalysisException */ public Consumption getQueryConsumption(QueryStatisticsItem item) throws AnalysisException { triggerReportAndWait(item, getWaitingTimeForSingleQuery(), false); return new Consumption(item.getQueryProfile()); } /** * Same as getQueryConsumption, but this will cause BE to report all queries profile. * * @param items * @return * @throws AnalysisException */ public Map<String, Consumption> getQueryConsumption(Collection<QueryStatisticsItem> items) throws AnalysisException { triggerReportAndWait(items, getWaitingTime(items.size()), true); final Map<String, Consumption> queryConsumptions = Maps.newHashMap(); for (QueryStatisticsItem item : items) { queryConsumptions.put(item.getQueryId(), new Consumption(item.getQueryProfile())); } return queryConsumptions; } /** * Return query's instances consumption. * * @param item * @return * @throws AnalysisException */ public Collection<InstanceConsumption> getQueryInstanceConsumption(QueryStatisticsItem item) throws AnalysisException { triggerReportAndWait(item, getWaitingTimeForSingleQuery(), false); final Map<String, RuntimeProfile> instanceProfiles = collectInstanceProfile(item.getQueryProfile()); final List<InstanceConsumption> instanceConsumptions = Lists.newArrayList(); for (QueryStatisticsItem.FragmentInstanceInfo instanceInfo : item.getFragmentInstanceInfos()) { final RuntimeProfile instanceProfile = instanceProfiles.get(DebugUtil.printId(instanceInfo.getInstanceId())); Preconditions.checkNotNull(instanceProfile); final InstanceConsumption consumption = new InstanceConsumption( instanceInfo.getFragmentId(), instanceInfo.getInstanceId(), instanceInfo.getAddress(), instanceProfile); instanceConsumptions.add(consumption); } return instanceConsumptions; } /** * Profile trees is query profile -> fragment profile -> instance profile .... * @param queryProfile * @return instanceProfiles */ private Map<String, RuntimeProfile> collectInstanceProfile(RuntimeProfile queryProfile) { final Map<String, RuntimeProfile> instanceProfiles = Maps.newHashMap(); for (RuntimeProfile fragmentProfile : queryProfile.getChildMap().values()) { for (Map.Entry<String, RuntimeProfile> entry: fragmentProfile.getChildMap().entrySet()) { Preconditions.checkState(instanceProfiles.put(parseInstanceId(entry.getKey()), entry.getValue()) == null); } } return instanceProfiles; } /** * Instance profile key is "Instance ${instance_id} (host=$host $port)" * @param str * @return */ private long getWaitingTimeForSingleQuery() { return getWaitingTime(1); } /** * @param numOfQuery * @return unit(ms) */ private long getWaitingTime(int numOfQuery) { final int oneQueryWaitingTime = 100; final int allQueryMaxWaitingTime = 2000; final int waitingTime = numOfQuery * oneQueryWaitingTime; return waitingTime > allQueryMaxWaitingTime ? allQueryMaxWaitingTime : waitingTime; } private void triggerReportAndWait(QueryStatisticsItem item, long waitingTime, boolean allQuery) throws AnalysisException { final List<QueryStatisticsItem> items = Lists.newArrayList(item); triggerReportAndWait(items, waitingTime, allQuery); } private void triggerReportAndWait(Collection<QueryStatisticsItem> items, long waitingTime, boolean allQuery) throws AnalysisException { triggerProfileReport(items, allQuery); try { Thread.currentThread().sleep(waitingTime); } catch (InterruptedException e) { } } /** * send report profile request. * @param items * @param allQuery true:all queries profile will be reported, false:specified queries profile will be reported. * @throws AnalysisException */ private void triggerProfileReport(Collection<QueryStatisticsItem> items, boolean allQuery) throws AnalysisException { final Map<TNetworkAddress, Request> requests = Maps.newHashMap(); final Map<TNetworkAddress, TNetworkAddress> brpcAddresses = Maps.newHashMap(); for (QueryStatisticsItem item : items) { for (QueryStatisticsItem.FragmentInstanceInfo instanceInfo : item.getFragmentInstanceInfos()) { TNetworkAddress brpcNetAddress = brpcAddresses.get(instanceInfo.getAddress()); if (brpcNetAddress == null) { try { brpcNetAddress = toBrpcHost(instanceInfo.getAddress()); brpcAddresses.put(instanceInfo.getAddress(), brpcNetAddress); } catch (Exception e) { LOG.warn(e.getMessage()); throw new AnalysisException(e.getMessage()); } } Request request = requests.get(brpcNetAddress); if (request == null) { request = new Request(brpcNetAddress); requests.put(brpcNetAddress, request); } if (!allQuery) { final PUniqueId pUId = new PUniqueId(instanceInfo.getInstanceId()); request.addInstanceId(pUId); } } } recvResponse(sendRequest(requests)); } private List<Pair<Request, Future<PTriggerProfileReportResult>>> sendRequest( Map<TNetworkAddress, Request> requests) throws AnalysisException { final List<Pair<Request, Future<PTriggerProfileReportResult>>> futures = Lists.newArrayList(); for (TNetworkAddress address : requests.keySet()) { final Request request = requests.get(address); final PTriggerProfileReportRequest pbRequest = new PTriggerProfileReportRequest(request.getInstanceIds()); try { futures.add(Pair.create(request, BackendServiceProxy.getInstance(). triggerProfileReportAsync(address, pbRequest))); } catch (RpcException e) { throw new AnalysisException("Sending request fails for query's execution informations."); } } return futures; } private void recvResponse(List<Pair<Request, Future<PTriggerProfileReportResult>>> futures) throws AnalysisException { final String reasonPrefix = "Fail to receive result."; for (Pair<Request, Future<PTriggerProfileReportResult>> pair : futures) { try { final PTriggerProfileReportResult result = pair.second.get(2, TimeUnit.SECONDS); final TStatusCode code = TStatusCode.findByValue(result.status.code); if (code != TStatusCode.OK) { String errMsg = ""; if (result.status.msgs != null && !result.status.msgs.isEmpty()) { errMsg = result.status.msgs.get(0); } throw new AnalysisException(reasonPrefix + " backend:" + pair.first.getAddress() + " reason:" + errMsg); } } catch (InterruptedException | ExecutionException | TimeoutException e) { LOG.warn(reasonPrefix + " reason:" + e.getCause()); throw new AnalysisException(reasonPrefix); } } } private TNetworkAddress toBrpcHost(TNetworkAddress host) throws AnalysisException { final Backend backend = Catalog.getCurrentSystemInfo().getBackendWithBePort( host.getHostname(), host.getPort()); if (backend == null) { throw new AnalysisException(new StringBuilder("Backend ") .append(host.getHostname()) .append(":") .append(host.getPort()) .append(" does not exist") .toString()); } if (backend.getBrpcPort() < 0) { throw new AnalysisException("BRPC port is't exist."); } return new TNetworkAddress(backend.getHost(), backend.getBrpcPort()); } public static class Consumption { private final static String OLAP_SCAN_NODE = "OLAP_SCAN_NODE"; private final static String HASH_JOIN_NODE = "HASH_JOIN_NODE"; private final static String HASH_AGGREGATION_NODE = "AGGREGATION_NODE"; private final static String SORT_NODE = "SORT_NODE"; private final static String ANALYTIC_EVAL_NODE = "ANALYTIC_EVAL_NODE"; private final static String UNION_NODE = "UNION_NODE"; private final static String EXCHANGE_NODE = "EXCHANGE_NODE"; protected final List<ConsumptionCalculator> calculators; public Consumption(RuntimeProfile profile) { this.calculators = Lists.newArrayList(); init(profile); } private void init(RuntimeProfile profile) { final List<Map<String, Counter>> olapScanCounters = Lists.newArrayList(); collectNodeProfileCounters(profile, olapScanCounters, OLAP_SCAN_NODE); calculators.add(new OlapScanNodeConsumptionCalculator(olapScanCounters)); final List<Map<String, Counter>> hashJoinCounters = Lists.newArrayList(); collectNodeProfileCounters(profile, hashJoinCounters, HASH_JOIN_NODE); calculators.add(new HashJoinConsumptionCalculator(hashJoinCounters)); final List<Map<String, Counter>> hashAggCounters = Lists.newArrayList(); collectNodeProfileCounters(profile, hashAggCounters, HASH_AGGREGATION_NODE); calculators.add(new HashAggConsumptionCalculator(hashAggCounters)); final List<Map<String, Counter>> sortCounters = Lists.newArrayList(); collectNodeProfileCounters(profile, sortCounters, SORT_NODE); calculators.add(new SortConsumptionCalculator(sortCounters)); final List<Map<String, Counter>> windowsCounters = Lists.newArrayList(); collectNodeProfileCounters(profile, windowsCounters, ANALYTIC_EVAL_NODE); calculators.add(new WindowsConsumptionCalculator(windowsCounters)); final List<Map<String, Counter>> unionCounters = Lists.newArrayList(); collectNodeProfileCounters(profile, unionCounters, UNION_NODE); calculators.add(new UnionConsumptionCalculator(unionCounters)); final List<Map<String, Counter>> exchangeCounters = Lists.newArrayList(); collectNodeProfileCounters(profile, exchangeCounters, EXCHANGE_NODE); calculators.add(new ExchangeConsumptionCalculator(exchangeCounters)); } private void collectNodeProfileCounters(RuntimeProfile profile, List<Map<String, Counter>> counterMaps, String name) { for (Map.Entry<String, RuntimeProfile> entry : profile.getChildMap().entrySet()) { if (name.equals(parsePossibleExecNodeName(entry.getKey()))) { counterMaps.add(entry.getValue().getCounterMap()); } collectNodeProfileCounters(entry.getValue(), counterMaps, name); } } /** * ExecNode's RuntimeProfile name is "$node_type_name (id=?)" * @param str * @return */ private String parsePossibleExecNodeName(String str) { final String[] elements = str.split(" "); if (elements.length == 2) { return elements[0]; } else { return ""; } } public long getTotalCpuConsumption() { long cpu = 0; for (ConsumptionCalculator consumption : calculators) { cpu += consumption.getCpu(); } return cpu; } public long getTotalIoConsumption() { long io = 0; for (ConsumptionCalculator consumption : calculators) { io += consumption.getIo(); } return io; } } public static class InstanceConsumption extends Consumption { private final String fragmentId; private final TUniqueId instanceId; private final TNetworkAddress address; public InstanceConsumption( String fragmentId, TUniqueId instanceId, TNetworkAddress address, RuntimeProfile profile) { super(profile); this.fragmentId = fragmentId; this.instanceId = instanceId; this.address = address; } public String getFragmentId() { return fragmentId; } public TUniqueId getInstanceId() { return instanceId; } public TNetworkAddress getAddress() { return address; } } private static abstract class ConsumptionCalculator { protected final List<Map<String, Counter>> counterMaps; public ConsumptionCalculator(List<Map<String, Counter>> counterMaps) { this.counterMaps = counterMaps; } public long getCpu() { long cpu = 0; for (Map<String, Counter> counters : counterMaps) { cpu += getCpuByRows(counters); } return cpu; } public long getIo() { long io = 0; for (Map<String, Counter> counters : counterMaps) { io += getIoByByte(counters); } return io; } protected long getCpuByRows(Map<String, Counter> counters) { return 0; } protected long getIoByByte(Map<String, Counter> counters) { return 0; } } private static class OlapScanNodeConsumptionCalculator extends ConsumptionCalculator { public OlapScanNodeConsumptionCalculator(List<Map<String, Counter>> counterMaps) { super(counterMaps); } @Override protected long getIoByByte(Map<String, Counter> counters) { final Counter counter = counters.get("CompressedBytesRead"); return counter == null ? 0 : counter.getValue(); } } private static class HashJoinConsumptionCalculator extends ConsumptionCalculator { public HashJoinConsumptionCalculator(List<Map<String, Counter>> counterMaps) { super(counterMaps); } @Override protected long getCpuByRows(Map<String, Counter> counters) { final Counter probeCounter = counters.get("ProbeRows"); final Counter buildCounter = counters.get("BuildRows"); return probeCounter == null || buildCounter == null ? 0 : probeCounter.getValue() + buildCounter.getValue(); } } private static class HashAggConsumptionCalculator extends ConsumptionCalculator { public HashAggConsumptionCalculator(List<Map<String, Counter>> counterMaps) { super(counterMaps); } @Override protected long getCpuByRows(Map<String, Counter> counters) { final Counter buildCounter = counters.get("BuildRows"); return buildCounter == null ? 0 : buildCounter.getValue(); } } private static class SortConsumptionCalculator extends ConsumptionCalculator { public SortConsumptionCalculator(List<Map<String, Counter>> counterMaps) { super(counterMaps); } @Override protected long getCpuByRows(Map<String, Counter> counters) { final Counter sortRowsCounter = counters.get("SortRows"); return sortRowsCounter == null ? 0 : sortRowsCounter.getValue(); } } private static class WindowsConsumptionCalculator extends ConsumptionCalculator { public WindowsConsumptionCalculator(List<Map<String, Counter>> counterMaps) { super(counterMaps); } @Override protected long getCpuByRows(Map<String, Counter> counters) { final Counter processRowsCounter = counters.get("ProcessRows"); return processRowsCounter == null ? 0 : processRowsCounter.getValue(); } } private static class UnionConsumptionCalculator extends ConsumptionCalculator { public UnionConsumptionCalculator(List<Map<String, Counter>> counterMaps) { super(counterMaps); } @Override protected long getCpuByRows(Map<String, Counter> counters) { final Counter materializeRowsCounter = counters.get("MaterializeRows"); return materializeRowsCounter == null ? 0 : materializeRowsCounter.getValue(); } } private static class ExchangeConsumptionCalculator extends ConsumptionCalculator { public ExchangeConsumptionCalculator(List<Map<String, Counter>> counterMaps) { super(counterMaps); } @Override protected long getCpuByRows(Map<String, Counter> counters) { final Counter mergeRowsCounter = counters.get("MergeRows"); return mergeRowsCounter == null ? 0 : mergeRowsCounter.getValue(); } } private static class Request { private final TNetworkAddress address; private final List<PUniqueId> instanceIds; public Request(TNetworkAddress address) { this.address = address; this.instanceIds = Lists.newArrayList(); } public TNetworkAddress getAddress() { return address; } public List<PUniqueId> getInstanceIds() { return instanceIds; } public void addInstanceId(PUniqueId instanceId) { this.instanceIds.add(instanceId); } } }
`failAllowedFor()` may allow a single failure of proxy(host), config(host) and controller(host), but none of those support retiring...
protected void maintain() { int throttledNodeFailures = 0; try (Mutex lock = nodeRepository().lockAllocation()) { updateNodeLivenessEventsForReadyNodes(); for (Map.Entry<Node, String> entry : getReadyNodesByFailureReason().entrySet()) { Node node = entry.getKey(); if (throttle(node)) { throttledNodeFailures++; continue; } String reason = entry.getValue(); nodeRepository().fail(node.hostname(), Agent.NodeFailer, reason); } } updateNodeDownState(); List<Node> activeNodes = nodeRepository().getNodes(Node.State.active); Set<Node> nodesWithFailureReason = new HashSet<>(); for (Map.Entry<Node, String> entry : getActiveNodesByFailureReason(activeNodes).entrySet()) { Node node = entry.getKey(); nodesWithFailureReason.add(node); if (!failAllowedFor(node.type())) { continue; } if (throttle(node)) { throttledNodeFailures++; continue; } String reason = entry.getValue(); failActive(node, reason); } final long maxWantToRetireHosts = 1; List<Node> candidateNodes = activeNodes.stream() .filter(node -> failAllowedFor(node.type())) .filter(node -> !nodesWithFailureReason.contains(node)) .filter(node -> node.parentHostname().isEmpty()) .collect(Collectors.toList()); long currentWantToRetireHosts = candidateNodes.stream().filter(node -> node.status().wantToRetire()).count(); for (int i = 0; i < candidateNodes.size() && currentWantToRetireHosts < maxWantToRetireHosts; ++i) { Node node = candidateNodes.get(i); List<String> reasons = reasonsToRetireActiveParentHost(node); if (reasons.size() > 0) { retireRecursively(node, reasons, activeNodes); ++currentWantToRetireHosts; } } metric.set(throttlingActiveMetric, Math.min( 1, throttledNodeFailures), null); metric.set(throttledNodeFailuresMetric, throttledNodeFailures, null); }
.filter(node -> failAllowedFor(node.type()))
protected void maintain() { int throttledNodeFailures = 0; try (Mutex lock = nodeRepository().lockAllocation()) { updateNodeLivenessEventsForReadyNodes(); for (Map.Entry<Node, String> entry : getReadyNodesByFailureReason().entrySet()) { Node node = entry.getKey(); if (throttle(node)) { throttledNodeFailures++; continue; } String reason = entry.getValue(); nodeRepository().fail(node.hostname(), Agent.NodeFailer, reason); } } updateNodeDownState(); List<Node> activeNodes = nodeRepository().getNodes(Node.State.active); Set<Node> nodesWithFailureReason = new HashSet<>(); for (Map.Entry<Node, String> entry : getActiveNodesByFailureReason(activeNodes).entrySet()) { Node node = entry.getKey(); nodesWithFailureReason.add(node); if (!failAllowedFor(node.type())) { continue; } if (throttle(node)) { throttledNodeFailures++; continue; } String reason = entry.getValue(); failActive(node, reason); } activeNodes.stream() .filter(node -> failAllowedFor(node.type())) .filter(node -> !nodesWithFailureReason.contains(node)) .filter(node -> node.parentHostname().isEmpty()) .sorted(Comparator.comparing(node -> node.status().wantToRetire(), Comparator.reverseOrder())) .filter(node -> { if (node.status().wantToRetire()) return true; if (node.allocation().map(a -> a.membership().retired()).orElse(false)) return true; List<String> reasons = reasonsToRetireActiveParentHost(node); if (reasons.size() > 0) { retireRecursively(node, reasons, activeNodes); return true; } return false; }) .limit(1) .count(); metric.set(throttlingActiveMetric, Math.min( 1, throttledNodeFailures), null); metric.set(throttledNodeFailuresMetric, throttledNodeFailures, null); }
class NodeFailer extends Maintainer { private static final Logger log = Logger.getLogger(NodeFailer.class.getName()); private static final Duration nodeRequestInterval = Duration.ofMinutes(10); /** Metric for number of nodes that we want to fail, but cannot due to throttling */ public static final String throttledNodeFailuresMetric = "throttledNodeFailures"; /** Metric that indicates whether throttling is active where 1 means active and 0 means inactive */ public static final String throttlingActiveMetric = "nodeFailThrottling"; /** Provides information about the status of ready hosts */ private final HostLivenessTracker hostLivenessTracker; /** Provides (more accurate) information about the status of active hosts */ private final ServiceMonitor serviceMonitor; private final Deployer deployer; private final Duration downTimeLimit; private final Clock clock; private final Orchestrator orchestrator; private final Instant constructionTime; private final ThrottlePolicy throttlePolicy; private final Metric metric; private final ConfigserverConfig configserverConfig; public NodeFailer(Deployer deployer, HostLivenessTracker hostLivenessTracker, ServiceMonitor serviceMonitor, NodeRepository nodeRepository, Duration downTimeLimit, Clock clock, Orchestrator orchestrator, ThrottlePolicy throttlePolicy, Metric metric, JobControl jobControl, ConfigserverConfig configserverConfig) { super(nodeRepository, min(downTimeLimit.dividedBy(2), Duration.ofMinutes(5)), jobControl); this.deployer = deployer; this.hostLivenessTracker = hostLivenessTracker; this.serviceMonitor = serviceMonitor; this.downTimeLimit = downTimeLimit; this.clock = clock; this.orchestrator = orchestrator; this.constructionTime = clock.instant(); this.throttlePolicy = throttlePolicy; this.metric = metric; this.configserverConfig = configserverConfig; } @Override private void updateNodeLivenessEventsForReadyNodes() { for (Node node : nodeRepository().getNodes(Node.State.ready)) { Optional<Instant> lastLocalRequest = hostLivenessTracker.lastRequestFrom(node.hostname()); if ( ! lastLocalRequest.isPresent()) continue; if (! node.history().hasEventAfter(History.Event.Type.requested, lastLocalRequest.get())) { History updatedHistory = node.history() .with(new History.Event(History.Event.Type.requested, Agent.NodeFailer, lastLocalRequest.get())); nodeRepository().write(node.with(updatedHistory)); } } } private Map<Node, String> getReadyNodesByFailureReason() { Instant oldestAcceptableRequestTime = constructionTime.isAfter(clock.instant().minus(nodeRequestInterval.multipliedBy(2))) ? Instant.EPOCH : clock.instant().minus(downTimeLimit).minus(nodeRequestInterval); Map<Node, String> nodesByFailureReason = new HashMap<>(); for (Node node : nodeRepository().getNodes(Node.State.ready)) { if (expectConfigRequests(node) && ! hasNodeRequestedConfigAfter(node, oldestAcceptableRequestTime)) { nodesByFailureReason.put(node, "Not receiving config requests from node"); } else if (node.status().hardwareFailureDescription().isPresent()) { nodesByFailureReason.put(node, "Node has hardware failure"); } else if (node.status().hardwareDivergence().isPresent()) { nodesByFailureReason.put(node, "Node has hardware divergence"); } else { Node hostNode = node.parentHostname().flatMap(parent -> nodeRepository().getNode(parent)).orElse(node); List<String> failureReports = reasonsToRetireActiveParentHost(hostNode); if (failureReports.size() > 0) { if (hostNode.equals(node)) { nodesByFailureReason.put(node, "Host has failure reports: " + failureReports); } else { nodesByFailureReason.put(node, "Parent (" + hostNode + ") has failure reports: " + failureReports); } } } } return nodesByFailureReason; } /** * If the node is down (see {@link * Otherwise we remove any "down" history record. */ private void updateNodeDownState() { Map<String, Node> activeNodesByHostname = nodeRepository().getNodes(Node.State.active).stream() .collect(Collectors.toMap(Node::hostname, node -> node)); serviceMonitor.getServiceModelSnapshot().getServiceInstancesByHostName() .forEach((hostName, serviceInstances) -> { Node node = activeNodesByHostname.get(hostName.s()); if (node == null) return; if (badNode(serviceInstances)) { recordAsDown(node); } else { clearDownRecord(node); } }); } private Map<Node, String> getActiveNodesByFailureReason(List<Node> activeNodes) { Instant graceTimeEnd = clock.instant().minus(downTimeLimit); Map<Node, String> nodesByFailureReason = new HashMap<>(); for (Node node : activeNodes) { if (node.history().hasEventBefore(History.Event.Type.down, graceTimeEnd) && ! applicationSuspended(node)) { nodesByFailureReason.put(node, "Node has been down longer than " + downTimeLimit); } else if (node.status().hardwareFailureDescription().isPresent() && nodeSuspended(node)) { nodesByFailureReason.put(node, "Node has hardware failure: " + node.status().hardwareFailureDescription().get()); } } return nodesByFailureReason; } private static List<String> reasonsToRetireActiveParentHost(Node hostNode) { return Stream.of( "badTotalMemorySize", "badTotalDiskSize", "badDiskType", "badInterfaceSpeed", "badCpuCount" ) .map(reportId -> baseReportToString(hostNode, reportId)) .flatMap(Optional::stream) .collect(Collectors.toList()); } /** The generated string is built from the report's ID, created time, and description only. */ static Optional<String> baseReportToString(Node node, String reportId) { return node.reports().getReport(reportId).map(report -> reportId + " reported " + report.getCreatedTime() + ": " + report.getDescription()); } /** * There are reasons why this node should be parked, and we'd like to do it through retiring, * including any child nodes. */ private void retireRecursively(Node node, List<String> reasons, List<Node> activeNodes) { if (activeNodes != null) { List<Node> childNodesToRetire = activeNodes.stream() .filter(n -> n.parentHostname().equals(Optional.of(node.hostname()))) .collect(Collectors.toList()); for (Node childNode : childNodesToRetire) { retireRecursively(childNode, reasons, null); } } if (node.status().wantToRetire()) return; retireActive(node.hostname(), node.allocation().get().owner(), reasons); } private void retireActive(String hostname, ApplicationId owner, List<String> reasons) { Duration lockWait = Duration.ofSeconds(10); try (Mutex lock = nodeRepository().lock(owner, lockWait)) { Optional<Node> node = nodeRepository().getNode(hostname); if (node.isEmpty()) return; if (node.get().state() != Node.State.active) return; if (!node.get().allocation().orElseThrow().owner().equals(owner)) return; if (node.get().status().wantToRetire()) return; log.info("Setting wantToRetire on " + node.get() + " due to these reports: " + reasons); nodeRepository().write(node.get().withWantToRetire(true, Agent.NodeFailer, clock.instant())); } catch (ApplicationLockException e) { log.warning("Failed to get lock on " + owner + " within " + lockWait + " to set wantToRetire, will retry later"); } } /** Returns whether node has any kind of hardware issue */ public static boolean hasHardwareIssue(Node node, NodeRepository nodeRepository) { if (node.status().hardwareFailureDescription().isPresent() || node.status().hardwareDivergence().isPresent()) { return true; } Node hostNode = node.parentHostname().flatMap(parent -> nodeRepository.getNode(parent)).orElse(node); return reasonsToRetireActiveParentHost(hostNode).size() > 0; } private boolean expectConfigRequests(Node node) { return !node.type().isDockerHost() || configserverConfig.nodeAdminInContainer(); } private boolean hasNodeRequestedConfigAfter(Node node, Instant instant) { return !wasMadeReadyBefore(node, instant) || hasRecordedRequestAfter(node, instant); } private boolean wasMadeReadyBefore(Node node, Instant instant) { return node.history().hasEventBefore(History.Event.Type.readied, instant); } private boolean hasRecordedRequestAfter(Node node, Instant instant) { return node.history().hasEventAfter(History.Event.Type.requested, instant); } private boolean applicationSuspended(Node node) { try { return orchestrator.getApplicationInstanceStatus(node.allocation().get().owner()) == ApplicationInstanceStatus.ALLOWED_TO_BE_DOWN; } catch (ApplicationIdNotFoundException e) { return false; } } private boolean nodeSuspended(Node node) { try { return orchestrator.getNodeStatus(new HostName(node.hostname())) == HostStatus.ALLOWED_TO_BE_DOWN; } catch (HostNameNotFoundException e) { return false; } } /** * We can attempt to fail any number of *tenant* and *host* nodes because the operation will not be effected * unless the node is replaced. * However, nodes of other types are not replaced (because all of the type are used by a single application), * so we only allow one to be in failed at any point in time to protect against runaway failing. */ private boolean failAllowedFor(NodeType nodeType) { if (nodeType == NodeType.tenant || nodeType == NodeType.host) return true; return nodeRepository().getNodes(nodeType, Node.State.failed).size() == 0; } /** * Returns true if the node is considered bad: all monitored services services are down. * If a node remains bad for a long time, the NodeFailer will eventually try to fail the node. */ public static boolean badNode(List<ServiceInstance> services) { Map<ServiceStatus, Long> countsByStatus = services.stream() .collect(Collectors.groupingBy(ServiceInstance::serviceStatus, counting())); return countsByStatus.getOrDefault(ServiceStatus.UP, 0L) <= 0L && countsByStatus.getOrDefault(ServiceStatus.DOWN, 0L) > 0L; } /** * Record a node as down if not already recorded and returns the node in the new state. * This assumes the node is found in the node * repo and that the node is allocated. If we get here otherwise something is truly odd. */ private Node recordAsDown(Node node) { if (node.history().event(History.Event.Type.down).isPresent()) return node; try (Mutex lock = nodeRepository().lock(node.allocation().get().owner())) { node = nodeRepository().getNode(node.hostname(), Node.State.active).get(); return nodeRepository().write(node.downAt(clock.instant())); } } private void clearDownRecord(Node node) { if ( ! node.history().event(History.Event.Type.down).isPresent()) return; try (Mutex lock = nodeRepository().lock(node.allocation().get().owner())) { node = nodeRepository().getNode(node.hostname(), Node.State.active).get(); nodeRepository().write(node.up()); } } /** * Called when a node should be moved to the failed state: Do that if it seems safe, * which is when the node repo has available capacity to replace the node (and all its tenant nodes if host). * Otherwise not replacing the node ensures (by Orchestrator check) that no further action will be taken. * * @return whether node was successfully failed */ private boolean failActive(Node node, String reason) { Optional<Deployment> deployment = deployer.deployFromLocalActive(node.allocation().get().owner(), Duration.ofMinutes(30)); if ( ! deployment.isPresent()) return false; try (Mutex lock = nodeRepository().lock(node.allocation().get().owner())) { boolean allTenantNodesFailedOutSuccessfully = true; String reasonForChildFailure = "Failing due to parent host " + node.hostname() + " failure: " + reason; for (Node failingTenantNode : nodeRepository().list().childrenOf(node)) { if (failingTenantNode.state() == Node.State.active) { allTenantNodesFailedOutSuccessfully &= failActive(failingTenantNode, reasonForChildFailure); } else { nodeRepository().fail(failingTenantNode.hostname(), Agent.NodeFailer, reasonForChildFailure); } } if (! allTenantNodesFailedOutSuccessfully) return false; node = nodeRepository().fail(node.hostname(), Agent.NodeFailer, reason); try { deployment.get().activate(); return true; } catch (RuntimeException e) { nodeRepository().reactivate(node.hostname(), Agent.NodeFailer, "Failed to redeploy after being failed by NodeFailer"); log.log(Level.WARNING, "Attempted to fail " + node + " for " + node.allocation().get().owner() + ", but redeploying without the node failed", e); return false; } } } /** Returns true if node failing should be throttled */ private boolean throttle(Node node) { if (throttlePolicy == ThrottlePolicy.disabled) return false; Instant startOfThrottleWindow = clock.instant().minus(throttlePolicy.throttleWindow); List<Node> nodes = nodeRepository().getNodes(); NodeList recentlyFailedNodes = nodes.stream() .filter(n -> n.history().hasEventAfter(History.Event.Type.failed, startOfThrottleWindow)) .collect(collectingAndThen(Collectors.toList(), NodeList::new)); if (recentlyFailedNodes.size() < throttlePolicy.allowedToFailOf(nodes.size())) return false; if (!node.parentHostname().isPresent() && recentlyFailedNodes.parents().size() < throttlePolicy.minimumAllowedToFail) return false; log.info(String.format("Want to fail node %s, but throttling is in effect: %s", node.hostname(), throttlePolicy.toHumanReadableString(nodes.size()))); return true; } public enum ThrottlePolicy { hosted(Duration.ofDays(1), 0.02, 2), disabled(Duration.ZERO, 0, 0); private final Duration throttleWindow; private final double fractionAllowedToFail; private final int minimumAllowedToFail; ThrottlePolicy(Duration throttleWindow, double fractionAllowedToFail, int minimumAllowedToFail) { this.throttleWindow = throttleWindow; this.fractionAllowedToFail = fractionAllowedToFail; this.minimumAllowedToFail = minimumAllowedToFail; } public int allowedToFailOf(int totalNodes) { return (int) Math.max(totalNodes * fractionAllowedToFail, minimumAllowedToFail); } public String toHumanReadableString(int totalNodes) { return String.format("Max %.0f%% (%d) or %d nodes can fail over a period of %s", fractionAllowedToFail*100, allowedToFailOf(totalNodes), minimumAllowedToFail, throttleWindow); } } }
class NodeFailer extends Maintainer { private static final Logger log = Logger.getLogger(NodeFailer.class.getName()); private static final Duration nodeRequestInterval = Duration.ofMinutes(10); /** Metric for number of nodes that we want to fail, but cannot due to throttling */ public static final String throttledNodeFailuresMetric = "throttledNodeFailures"; /** Metric that indicates whether throttling is active where 1 means active and 0 means inactive */ public static final String throttlingActiveMetric = "nodeFailThrottling"; /** Provides information about the status of ready hosts */ private final HostLivenessTracker hostLivenessTracker; /** Provides (more accurate) information about the status of active hosts */ private final ServiceMonitor serviceMonitor; private final Deployer deployer; private final Duration downTimeLimit; private final Clock clock; private final Orchestrator orchestrator; private final Instant constructionTime; private final ThrottlePolicy throttlePolicy; private final Metric metric; private final ConfigserverConfig configserverConfig; public NodeFailer(Deployer deployer, HostLivenessTracker hostLivenessTracker, ServiceMonitor serviceMonitor, NodeRepository nodeRepository, Duration downTimeLimit, Clock clock, Orchestrator orchestrator, ThrottlePolicy throttlePolicy, Metric metric, JobControl jobControl, ConfigserverConfig configserverConfig) { super(nodeRepository, min(downTimeLimit.dividedBy(2), Duration.ofMinutes(5)), jobControl); this.deployer = deployer; this.hostLivenessTracker = hostLivenessTracker; this.serviceMonitor = serviceMonitor; this.downTimeLimit = downTimeLimit; this.clock = clock; this.orchestrator = orchestrator; this.constructionTime = clock.instant(); this.throttlePolicy = throttlePolicy; this.metric = metric; this.configserverConfig = configserverConfig; } @Override private void updateNodeLivenessEventsForReadyNodes() { for (Node node : nodeRepository().getNodes(Node.State.ready)) { Optional<Instant> lastLocalRequest = hostLivenessTracker.lastRequestFrom(node.hostname()); if ( ! lastLocalRequest.isPresent()) continue; if (! node.history().hasEventAfter(History.Event.Type.requested, lastLocalRequest.get())) { History updatedHistory = node.history() .with(new History.Event(History.Event.Type.requested, Agent.NodeFailer, lastLocalRequest.get())); nodeRepository().write(node.with(updatedHistory)); } } } private Map<Node, String> getReadyNodesByFailureReason() { Instant oldestAcceptableRequestTime = constructionTime.isAfter(clock.instant().minus(nodeRequestInterval.multipliedBy(2))) ? Instant.EPOCH : clock.instant().minus(downTimeLimit).minus(nodeRequestInterval); Map<Node, String> nodesByFailureReason = new HashMap<>(); for (Node node : nodeRepository().getNodes(Node.State.ready)) { if (expectConfigRequests(node) && ! hasNodeRequestedConfigAfter(node, oldestAcceptableRequestTime)) { nodesByFailureReason.put(node, "Not receiving config requests from node"); } else if (node.status().hardwareFailureDescription().isPresent()) { nodesByFailureReason.put(node, "Node has hardware failure"); } else if (node.status().hardwareDivergence().isPresent()) { nodesByFailureReason.put(node, "Node has hardware divergence"); } else { Node hostNode = node.parentHostname().flatMap(parent -> nodeRepository().getNode(parent)).orElse(node); List<String> failureReports = reasonsToRetireActiveParentHost(hostNode); if (failureReports.size() > 0) { if (hostNode.equals(node)) { nodesByFailureReason.put(node, "Host has failure reports: " + failureReports); } else { nodesByFailureReason.put(node, "Parent (" + hostNode + ") has failure reports: " + failureReports); } } } } return nodesByFailureReason; } /** * If the node is down (see {@link * Otherwise we remove any "down" history record. */ private void updateNodeDownState() { Map<String, Node> activeNodesByHostname = nodeRepository().getNodes(Node.State.active).stream() .collect(Collectors.toMap(Node::hostname, node -> node)); serviceMonitor.getServiceModelSnapshot().getServiceInstancesByHostName() .forEach((hostName, serviceInstances) -> { Node node = activeNodesByHostname.get(hostName.s()); if (node == null) return; if (badNode(serviceInstances)) { recordAsDown(node); } else { clearDownRecord(node); } }); } private Map<Node, String> getActiveNodesByFailureReason(List<Node> activeNodes) { Instant graceTimeEnd = clock.instant().minus(downTimeLimit); Map<Node, String> nodesByFailureReason = new HashMap<>(); for (Node node : activeNodes) { if (node.history().hasEventBefore(History.Event.Type.down, graceTimeEnd) && ! applicationSuspended(node)) { nodesByFailureReason.put(node, "Node has been down longer than " + downTimeLimit); } else if (node.status().hardwareFailureDescription().isPresent() && nodeSuspended(node)) { nodesByFailureReason.put(node, "Node has hardware failure: " + node.status().hardwareFailureDescription().get()); } } return nodesByFailureReason; } private static List<String> reasonsToRetireActiveParentHost(Node hostNode) { return Stream.of( "badTotalMemorySize", "badTotalDiskSize", "badDiskType", "badInterfaceSpeed", "badCpuCount" ) .map(reportId -> baseReportToString(hostNode, reportId)) .flatMap(Optional::stream) .collect(Collectors.toList()); } /** The generated string is built from the report's ID, created time, and description only. */ static Optional<String> baseReportToString(Node node, String reportId) { return node.reports().getReport(reportId).map(report -> reportId + " reported " + report.getCreatedTime() + ": " + report.getDescription()); } /** * There are reasons why this node should be parked, and we'd like to do it through retiring, * including any child nodes. */ private void retireRecursively(Node node, List<String> reasons, List<Node> activeNodes) { if (activeNodes != null) { List<Node> childNodesToRetire = activeNodes.stream() .filter(n -> n.parentHostname().equals(Optional.of(node.hostname()))) .collect(Collectors.toList()); for (Node childNode : childNodesToRetire) { retireRecursively(childNode, reasons, null); } } if (node.status().wantToRetire()) return; retireActive(node.hostname(), node.allocation().get().owner(), reasons); } private void retireActive(String hostname, ApplicationId owner, List<String> reasons) { Duration lockWait = Duration.ofSeconds(10); try (Mutex lock = nodeRepository().lock(owner, lockWait)) { Optional<Node> node = nodeRepository().getNode(hostname); if (node.isEmpty()) return; if (node.get().state() != Node.State.active) return; if (!node.get().allocation().orElseThrow().owner().equals(owner)) return; if (node.get().status().wantToRetire()) return; log.info("Setting wantToRetire on " + node.get() + " due to these reports: " + reasons); nodeRepository().write(node.get().withWantToRetire(true, Agent.NodeFailer, clock.instant())); } catch (ApplicationLockException e) { log.warning("Failed to get lock on " + owner + " within " + lockWait + " to set wantToRetire, will retry later"); } } /** Returns whether node has any kind of hardware issue */ public static boolean hasHardwareIssue(Node node, NodeRepository nodeRepository) { if (node.status().hardwareFailureDescription().isPresent() || node.status().hardwareDivergence().isPresent()) { return true; } Node hostNode = node.parentHostname().flatMap(parent -> nodeRepository.getNode(parent)).orElse(node); return reasonsToRetireActiveParentHost(hostNode).size() > 0; } private boolean expectConfigRequests(Node node) { return !node.type().isDockerHost() || configserverConfig.nodeAdminInContainer(); } private boolean hasNodeRequestedConfigAfter(Node node, Instant instant) { return !wasMadeReadyBefore(node, instant) || hasRecordedRequestAfter(node, instant); } private boolean wasMadeReadyBefore(Node node, Instant instant) { return node.history().hasEventBefore(History.Event.Type.readied, instant); } private boolean hasRecordedRequestAfter(Node node, Instant instant) { return node.history().hasEventAfter(History.Event.Type.requested, instant); } private boolean applicationSuspended(Node node) { try { return orchestrator.getApplicationInstanceStatus(node.allocation().get().owner()) == ApplicationInstanceStatus.ALLOWED_TO_BE_DOWN; } catch (ApplicationIdNotFoundException e) { return false; } } private boolean nodeSuspended(Node node) { try { return orchestrator.getNodeStatus(new HostName(node.hostname())) == HostStatus.ALLOWED_TO_BE_DOWN; } catch (HostNameNotFoundException e) { return false; } } /** * We can attempt to fail any number of *tenant* and *host* nodes because the operation will not be effected * unless the node is replaced. * However, nodes of other types are not replaced (because all of the type are used by a single application), * so we only allow one to be in failed at any point in time to protect against runaway failing. */ private boolean failAllowedFor(NodeType nodeType) { if (nodeType == NodeType.tenant || nodeType == NodeType.host) return true; return nodeRepository().getNodes(nodeType, Node.State.failed).size() == 0; } /** * Returns true if the node is considered bad: all monitored services services are down. * If a node remains bad for a long time, the NodeFailer will eventually try to fail the node. */ public static boolean badNode(List<ServiceInstance> services) { Map<ServiceStatus, Long> countsByStatus = services.stream() .collect(Collectors.groupingBy(ServiceInstance::serviceStatus, counting())); return countsByStatus.getOrDefault(ServiceStatus.UP, 0L) <= 0L && countsByStatus.getOrDefault(ServiceStatus.DOWN, 0L) > 0L; } /** * Record a node as down if not already recorded and returns the node in the new state. * This assumes the node is found in the node * repo and that the node is allocated. If we get here otherwise something is truly odd. */ private Node recordAsDown(Node node) { if (node.history().event(History.Event.Type.down).isPresent()) return node; try (Mutex lock = nodeRepository().lock(node.allocation().get().owner())) { node = nodeRepository().getNode(node.hostname(), Node.State.active).get(); return nodeRepository().write(node.downAt(clock.instant())); } } private void clearDownRecord(Node node) { if ( ! node.history().event(History.Event.Type.down).isPresent()) return; try (Mutex lock = nodeRepository().lock(node.allocation().get().owner())) { node = nodeRepository().getNode(node.hostname(), Node.State.active).get(); nodeRepository().write(node.up()); } } /** * Called when a node should be moved to the failed state: Do that if it seems safe, * which is when the node repo has available capacity to replace the node (and all its tenant nodes if host). * Otherwise not replacing the node ensures (by Orchestrator check) that no further action will be taken. * * @return whether node was successfully failed */ private boolean failActive(Node node, String reason) { Optional<Deployment> deployment = deployer.deployFromLocalActive(node.allocation().get().owner(), Duration.ofMinutes(30)); if ( ! deployment.isPresent()) return false; try (Mutex lock = nodeRepository().lock(node.allocation().get().owner())) { boolean allTenantNodesFailedOutSuccessfully = true; String reasonForChildFailure = "Failing due to parent host " + node.hostname() + " failure: " + reason; for (Node failingTenantNode : nodeRepository().list().childrenOf(node)) { if (failingTenantNode.state() == Node.State.active) { allTenantNodesFailedOutSuccessfully &= failActive(failingTenantNode, reasonForChildFailure); } else { nodeRepository().fail(failingTenantNode.hostname(), Agent.NodeFailer, reasonForChildFailure); } } if (! allTenantNodesFailedOutSuccessfully) return false; node = nodeRepository().fail(node.hostname(), Agent.NodeFailer, reason); try { deployment.get().activate(); return true; } catch (RuntimeException e) { nodeRepository().reactivate(node.hostname(), Agent.NodeFailer, "Failed to redeploy after being failed by NodeFailer"); log.log(Level.WARNING, "Attempted to fail " + node + " for " + node.allocation().get().owner() + ", but redeploying without the node failed", e); return false; } } } /** Returns true if node failing should be throttled */ private boolean throttle(Node node) { if (throttlePolicy == ThrottlePolicy.disabled) return false; Instant startOfThrottleWindow = clock.instant().minus(throttlePolicy.throttleWindow); List<Node> nodes = nodeRepository().getNodes(); NodeList recentlyFailedNodes = nodes.stream() .filter(n -> n.history().hasEventAfter(History.Event.Type.failed, startOfThrottleWindow)) .collect(collectingAndThen(Collectors.toList(), NodeList::new)); if (recentlyFailedNodes.size() < throttlePolicy.allowedToFailOf(nodes.size())) return false; if (!node.parentHostname().isPresent() && recentlyFailedNodes.parents().size() < throttlePolicy.minimumAllowedToFail) return false; log.info(String.format("Want to fail node %s, but throttling is in effect: %s", node.hostname(), throttlePolicy.toHumanReadableString(nodes.size()))); return true; } public enum ThrottlePolicy { hosted(Duration.ofDays(1), 0.02, 2), disabled(Duration.ZERO, 0, 0); private final Duration throttleWindow; private final double fractionAllowedToFail; private final int minimumAllowedToFail; ThrottlePolicy(Duration throttleWindow, double fractionAllowedToFail, int minimumAllowedToFail) { this.throttleWindow = throttleWindow; this.fractionAllowedToFail = fractionAllowedToFail; this.minimumAllowedToFail = minimumAllowedToFail; } public int allowedToFailOf(int totalNodes) { return (int) Math.max(totalNodes * fractionAllowedToFail, minimumAllowedToFail); } public String toHumanReadableString(int totalNodes) { return String.format("Max %.0f%% (%d) or %d nodes can fail over a period of %s", fractionAllowedToFail*100, allowedToFailOf(totalNodes), minimumAllowedToFail, throttleWindow); } } }
Looked at the code again, maybe the do... I thought I saw a configserver in AWS being stuck in retired for a few days, but gone now.
protected void maintain() { int throttledNodeFailures = 0; try (Mutex lock = nodeRepository().lockAllocation()) { updateNodeLivenessEventsForReadyNodes(); for (Map.Entry<Node, String> entry : getReadyNodesByFailureReason().entrySet()) { Node node = entry.getKey(); if (throttle(node)) { throttledNodeFailures++; continue; } String reason = entry.getValue(); nodeRepository().fail(node.hostname(), Agent.NodeFailer, reason); } } updateNodeDownState(); List<Node> activeNodes = nodeRepository().getNodes(Node.State.active); Set<Node> nodesWithFailureReason = new HashSet<>(); for (Map.Entry<Node, String> entry : getActiveNodesByFailureReason(activeNodes).entrySet()) { Node node = entry.getKey(); nodesWithFailureReason.add(node); if (!failAllowedFor(node.type())) { continue; } if (throttle(node)) { throttledNodeFailures++; continue; } String reason = entry.getValue(); failActive(node, reason); } final long maxWantToRetireHosts = 1; List<Node> candidateNodes = activeNodes.stream() .filter(node -> failAllowedFor(node.type())) .filter(node -> !nodesWithFailureReason.contains(node)) .filter(node -> node.parentHostname().isEmpty()) .collect(Collectors.toList()); long currentWantToRetireHosts = candidateNodes.stream().filter(node -> node.status().wantToRetire()).count(); for (int i = 0; i < candidateNodes.size() && currentWantToRetireHosts < maxWantToRetireHosts; ++i) { Node node = candidateNodes.get(i); List<String> reasons = reasonsToRetireActiveParentHost(node); if (reasons.size() > 0) { retireRecursively(node, reasons, activeNodes); ++currentWantToRetireHosts; } } metric.set(throttlingActiveMetric, Math.min( 1, throttledNodeFailures), null); metric.set(throttledNodeFailuresMetric, throttledNodeFailures, null); }
.filter(node -> failAllowedFor(node.type()))
protected void maintain() { int throttledNodeFailures = 0; try (Mutex lock = nodeRepository().lockAllocation()) { updateNodeLivenessEventsForReadyNodes(); for (Map.Entry<Node, String> entry : getReadyNodesByFailureReason().entrySet()) { Node node = entry.getKey(); if (throttle(node)) { throttledNodeFailures++; continue; } String reason = entry.getValue(); nodeRepository().fail(node.hostname(), Agent.NodeFailer, reason); } } updateNodeDownState(); List<Node> activeNodes = nodeRepository().getNodes(Node.State.active); Set<Node> nodesWithFailureReason = new HashSet<>(); for (Map.Entry<Node, String> entry : getActiveNodesByFailureReason(activeNodes).entrySet()) { Node node = entry.getKey(); nodesWithFailureReason.add(node); if (!failAllowedFor(node.type())) { continue; } if (throttle(node)) { throttledNodeFailures++; continue; } String reason = entry.getValue(); failActive(node, reason); } activeNodes.stream() .filter(node -> failAllowedFor(node.type())) .filter(node -> !nodesWithFailureReason.contains(node)) .filter(node -> node.parentHostname().isEmpty()) .sorted(Comparator.comparing(node -> node.status().wantToRetire(), Comparator.reverseOrder())) .filter(node -> { if (node.status().wantToRetire()) return true; if (node.allocation().map(a -> a.membership().retired()).orElse(false)) return true; List<String> reasons = reasonsToRetireActiveParentHost(node); if (reasons.size() > 0) { retireRecursively(node, reasons, activeNodes); return true; } return false; }) .limit(1) .count(); metric.set(throttlingActiveMetric, Math.min( 1, throttledNodeFailures), null); metric.set(throttledNodeFailuresMetric, throttledNodeFailures, null); }
class NodeFailer extends Maintainer { private static final Logger log = Logger.getLogger(NodeFailer.class.getName()); private static final Duration nodeRequestInterval = Duration.ofMinutes(10); /** Metric for number of nodes that we want to fail, but cannot due to throttling */ public static final String throttledNodeFailuresMetric = "throttledNodeFailures"; /** Metric that indicates whether throttling is active where 1 means active and 0 means inactive */ public static final String throttlingActiveMetric = "nodeFailThrottling"; /** Provides information about the status of ready hosts */ private final HostLivenessTracker hostLivenessTracker; /** Provides (more accurate) information about the status of active hosts */ private final ServiceMonitor serviceMonitor; private final Deployer deployer; private final Duration downTimeLimit; private final Clock clock; private final Orchestrator orchestrator; private final Instant constructionTime; private final ThrottlePolicy throttlePolicy; private final Metric metric; private final ConfigserverConfig configserverConfig; public NodeFailer(Deployer deployer, HostLivenessTracker hostLivenessTracker, ServiceMonitor serviceMonitor, NodeRepository nodeRepository, Duration downTimeLimit, Clock clock, Orchestrator orchestrator, ThrottlePolicy throttlePolicy, Metric metric, JobControl jobControl, ConfigserverConfig configserverConfig) { super(nodeRepository, min(downTimeLimit.dividedBy(2), Duration.ofMinutes(5)), jobControl); this.deployer = deployer; this.hostLivenessTracker = hostLivenessTracker; this.serviceMonitor = serviceMonitor; this.downTimeLimit = downTimeLimit; this.clock = clock; this.orchestrator = orchestrator; this.constructionTime = clock.instant(); this.throttlePolicy = throttlePolicy; this.metric = metric; this.configserverConfig = configserverConfig; } @Override private void updateNodeLivenessEventsForReadyNodes() { for (Node node : nodeRepository().getNodes(Node.State.ready)) { Optional<Instant> lastLocalRequest = hostLivenessTracker.lastRequestFrom(node.hostname()); if ( ! lastLocalRequest.isPresent()) continue; if (! node.history().hasEventAfter(History.Event.Type.requested, lastLocalRequest.get())) { History updatedHistory = node.history() .with(new History.Event(History.Event.Type.requested, Agent.NodeFailer, lastLocalRequest.get())); nodeRepository().write(node.with(updatedHistory)); } } } private Map<Node, String> getReadyNodesByFailureReason() { Instant oldestAcceptableRequestTime = constructionTime.isAfter(clock.instant().minus(nodeRequestInterval.multipliedBy(2))) ? Instant.EPOCH : clock.instant().minus(downTimeLimit).minus(nodeRequestInterval); Map<Node, String> nodesByFailureReason = new HashMap<>(); for (Node node : nodeRepository().getNodes(Node.State.ready)) { if (expectConfigRequests(node) && ! hasNodeRequestedConfigAfter(node, oldestAcceptableRequestTime)) { nodesByFailureReason.put(node, "Not receiving config requests from node"); } else if (node.status().hardwareFailureDescription().isPresent()) { nodesByFailureReason.put(node, "Node has hardware failure"); } else if (node.status().hardwareDivergence().isPresent()) { nodesByFailureReason.put(node, "Node has hardware divergence"); } else { Node hostNode = node.parentHostname().flatMap(parent -> nodeRepository().getNode(parent)).orElse(node); List<String> failureReports = reasonsToRetireActiveParentHost(hostNode); if (failureReports.size() > 0) { if (hostNode.equals(node)) { nodesByFailureReason.put(node, "Host has failure reports: " + failureReports); } else { nodesByFailureReason.put(node, "Parent (" + hostNode + ") has failure reports: " + failureReports); } } } } return nodesByFailureReason; } /** * If the node is down (see {@link * Otherwise we remove any "down" history record. */ private void updateNodeDownState() { Map<String, Node> activeNodesByHostname = nodeRepository().getNodes(Node.State.active).stream() .collect(Collectors.toMap(Node::hostname, node -> node)); serviceMonitor.getServiceModelSnapshot().getServiceInstancesByHostName() .forEach((hostName, serviceInstances) -> { Node node = activeNodesByHostname.get(hostName.s()); if (node == null) return; if (badNode(serviceInstances)) { recordAsDown(node); } else { clearDownRecord(node); } }); } private Map<Node, String> getActiveNodesByFailureReason(List<Node> activeNodes) { Instant graceTimeEnd = clock.instant().minus(downTimeLimit); Map<Node, String> nodesByFailureReason = new HashMap<>(); for (Node node : activeNodes) { if (node.history().hasEventBefore(History.Event.Type.down, graceTimeEnd) && ! applicationSuspended(node)) { nodesByFailureReason.put(node, "Node has been down longer than " + downTimeLimit); } else if (node.status().hardwareFailureDescription().isPresent() && nodeSuspended(node)) { nodesByFailureReason.put(node, "Node has hardware failure: " + node.status().hardwareFailureDescription().get()); } } return nodesByFailureReason; } private static List<String> reasonsToRetireActiveParentHost(Node hostNode) { return Stream.of( "badTotalMemorySize", "badTotalDiskSize", "badDiskType", "badInterfaceSpeed", "badCpuCount" ) .map(reportId -> baseReportToString(hostNode, reportId)) .flatMap(Optional::stream) .collect(Collectors.toList()); } /** The generated string is built from the report's ID, created time, and description only. */ static Optional<String> baseReportToString(Node node, String reportId) { return node.reports().getReport(reportId).map(report -> reportId + " reported " + report.getCreatedTime() + ": " + report.getDescription()); } /** * There are reasons why this node should be parked, and we'd like to do it through retiring, * including any child nodes. */ private void retireRecursively(Node node, List<String> reasons, List<Node> activeNodes) { if (activeNodes != null) { List<Node> childNodesToRetire = activeNodes.stream() .filter(n -> n.parentHostname().equals(Optional.of(node.hostname()))) .collect(Collectors.toList()); for (Node childNode : childNodesToRetire) { retireRecursively(childNode, reasons, null); } } if (node.status().wantToRetire()) return; retireActive(node.hostname(), node.allocation().get().owner(), reasons); } private void retireActive(String hostname, ApplicationId owner, List<String> reasons) { Duration lockWait = Duration.ofSeconds(10); try (Mutex lock = nodeRepository().lock(owner, lockWait)) { Optional<Node> node = nodeRepository().getNode(hostname); if (node.isEmpty()) return; if (node.get().state() != Node.State.active) return; if (!node.get().allocation().orElseThrow().owner().equals(owner)) return; if (node.get().status().wantToRetire()) return; log.info("Setting wantToRetire on " + node.get() + " due to these reports: " + reasons); nodeRepository().write(node.get().withWantToRetire(true, Agent.NodeFailer, clock.instant())); } catch (ApplicationLockException e) { log.warning("Failed to get lock on " + owner + " within " + lockWait + " to set wantToRetire, will retry later"); } } /** Returns whether node has any kind of hardware issue */ public static boolean hasHardwareIssue(Node node, NodeRepository nodeRepository) { if (node.status().hardwareFailureDescription().isPresent() || node.status().hardwareDivergence().isPresent()) { return true; } Node hostNode = node.parentHostname().flatMap(parent -> nodeRepository.getNode(parent)).orElse(node); return reasonsToRetireActiveParentHost(hostNode).size() > 0; } private boolean expectConfigRequests(Node node) { return !node.type().isDockerHost() || configserverConfig.nodeAdminInContainer(); } private boolean hasNodeRequestedConfigAfter(Node node, Instant instant) { return !wasMadeReadyBefore(node, instant) || hasRecordedRequestAfter(node, instant); } private boolean wasMadeReadyBefore(Node node, Instant instant) { return node.history().hasEventBefore(History.Event.Type.readied, instant); } private boolean hasRecordedRequestAfter(Node node, Instant instant) { return node.history().hasEventAfter(History.Event.Type.requested, instant); } private boolean applicationSuspended(Node node) { try { return orchestrator.getApplicationInstanceStatus(node.allocation().get().owner()) == ApplicationInstanceStatus.ALLOWED_TO_BE_DOWN; } catch (ApplicationIdNotFoundException e) { return false; } } private boolean nodeSuspended(Node node) { try { return orchestrator.getNodeStatus(new HostName(node.hostname())) == HostStatus.ALLOWED_TO_BE_DOWN; } catch (HostNameNotFoundException e) { return false; } } /** * We can attempt to fail any number of *tenant* and *host* nodes because the operation will not be effected * unless the node is replaced. * However, nodes of other types are not replaced (because all of the type are used by a single application), * so we only allow one to be in failed at any point in time to protect against runaway failing. */ private boolean failAllowedFor(NodeType nodeType) { if (nodeType == NodeType.tenant || nodeType == NodeType.host) return true; return nodeRepository().getNodes(nodeType, Node.State.failed).size() == 0; } /** * Returns true if the node is considered bad: all monitored services services are down. * If a node remains bad for a long time, the NodeFailer will eventually try to fail the node. */ public static boolean badNode(List<ServiceInstance> services) { Map<ServiceStatus, Long> countsByStatus = services.stream() .collect(Collectors.groupingBy(ServiceInstance::serviceStatus, counting())); return countsByStatus.getOrDefault(ServiceStatus.UP, 0L) <= 0L && countsByStatus.getOrDefault(ServiceStatus.DOWN, 0L) > 0L; } /** * Record a node as down if not already recorded and returns the node in the new state. * This assumes the node is found in the node * repo and that the node is allocated. If we get here otherwise something is truly odd. */ private Node recordAsDown(Node node) { if (node.history().event(History.Event.Type.down).isPresent()) return node; try (Mutex lock = nodeRepository().lock(node.allocation().get().owner())) { node = nodeRepository().getNode(node.hostname(), Node.State.active).get(); return nodeRepository().write(node.downAt(clock.instant())); } } private void clearDownRecord(Node node) { if ( ! node.history().event(History.Event.Type.down).isPresent()) return; try (Mutex lock = nodeRepository().lock(node.allocation().get().owner())) { node = nodeRepository().getNode(node.hostname(), Node.State.active).get(); nodeRepository().write(node.up()); } } /** * Called when a node should be moved to the failed state: Do that if it seems safe, * which is when the node repo has available capacity to replace the node (and all its tenant nodes if host). * Otherwise not replacing the node ensures (by Orchestrator check) that no further action will be taken. * * @return whether node was successfully failed */ private boolean failActive(Node node, String reason) { Optional<Deployment> deployment = deployer.deployFromLocalActive(node.allocation().get().owner(), Duration.ofMinutes(30)); if ( ! deployment.isPresent()) return false; try (Mutex lock = nodeRepository().lock(node.allocation().get().owner())) { boolean allTenantNodesFailedOutSuccessfully = true; String reasonForChildFailure = "Failing due to parent host " + node.hostname() + " failure: " + reason; for (Node failingTenantNode : nodeRepository().list().childrenOf(node)) { if (failingTenantNode.state() == Node.State.active) { allTenantNodesFailedOutSuccessfully &= failActive(failingTenantNode, reasonForChildFailure); } else { nodeRepository().fail(failingTenantNode.hostname(), Agent.NodeFailer, reasonForChildFailure); } } if (! allTenantNodesFailedOutSuccessfully) return false; node = nodeRepository().fail(node.hostname(), Agent.NodeFailer, reason); try { deployment.get().activate(); return true; } catch (RuntimeException e) { nodeRepository().reactivate(node.hostname(), Agent.NodeFailer, "Failed to redeploy after being failed by NodeFailer"); log.log(Level.WARNING, "Attempted to fail " + node + " for " + node.allocation().get().owner() + ", but redeploying without the node failed", e); return false; } } } /** Returns true if node failing should be throttled */ private boolean throttle(Node node) { if (throttlePolicy == ThrottlePolicy.disabled) return false; Instant startOfThrottleWindow = clock.instant().minus(throttlePolicy.throttleWindow); List<Node> nodes = nodeRepository().getNodes(); NodeList recentlyFailedNodes = nodes.stream() .filter(n -> n.history().hasEventAfter(History.Event.Type.failed, startOfThrottleWindow)) .collect(collectingAndThen(Collectors.toList(), NodeList::new)); if (recentlyFailedNodes.size() < throttlePolicy.allowedToFailOf(nodes.size())) return false; if (!node.parentHostname().isPresent() && recentlyFailedNodes.parents().size() < throttlePolicy.minimumAllowedToFail) return false; log.info(String.format("Want to fail node %s, but throttling is in effect: %s", node.hostname(), throttlePolicy.toHumanReadableString(nodes.size()))); return true; } public enum ThrottlePolicy { hosted(Duration.ofDays(1), 0.02, 2), disabled(Duration.ZERO, 0, 0); private final Duration throttleWindow; private final double fractionAllowedToFail; private final int minimumAllowedToFail; ThrottlePolicy(Duration throttleWindow, double fractionAllowedToFail, int minimumAllowedToFail) { this.throttleWindow = throttleWindow; this.fractionAllowedToFail = fractionAllowedToFail; this.minimumAllowedToFail = minimumAllowedToFail; } public int allowedToFailOf(int totalNodes) { return (int) Math.max(totalNodes * fractionAllowedToFail, minimumAllowedToFail); } public String toHumanReadableString(int totalNodes) { return String.format("Max %.0f%% (%d) or %d nodes can fail over a period of %s", fractionAllowedToFail*100, allowedToFailOf(totalNodes), minimumAllowedToFail, throttleWindow); } } }
class NodeFailer extends Maintainer { private static final Logger log = Logger.getLogger(NodeFailer.class.getName()); private static final Duration nodeRequestInterval = Duration.ofMinutes(10); /** Metric for number of nodes that we want to fail, but cannot due to throttling */ public static final String throttledNodeFailuresMetric = "throttledNodeFailures"; /** Metric that indicates whether throttling is active where 1 means active and 0 means inactive */ public static final String throttlingActiveMetric = "nodeFailThrottling"; /** Provides information about the status of ready hosts */ private final HostLivenessTracker hostLivenessTracker; /** Provides (more accurate) information about the status of active hosts */ private final ServiceMonitor serviceMonitor; private final Deployer deployer; private final Duration downTimeLimit; private final Clock clock; private final Orchestrator orchestrator; private final Instant constructionTime; private final ThrottlePolicy throttlePolicy; private final Metric metric; private final ConfigserverConfig configserverConfig; public NodeFailer(Deployer deployer, HostLivenessTracker hostLivenessTracker, ServiceMonitor serviceMonitor, NodeRepository nodeRepository, Duration downTimeLimit, Clock clock, Orchestrator orchestrator, ThrottlePolicy throttlePolicy, Metric metric, JobControl jobControl, ConfigserverConfig configserverConfig) { super(nodeRepository, min(downTimeLimit.dividedBy(2), Duration.ofMinutes(5)), jobControl); this.deployer = deployer; this.hostLivenessTracker = hostLivenessTracker; this.serviceMonitor = serviceMonitor; this.downTimeLimit = downTimeLimit; this.clock = clock; this.orchestrator = orchestrator; this.constructionTime = clock.instant(); this.throttlePolicy = throttlePolicy; this.metric = metric; this.configserverConfig = configserverConfig; } @Override private void updateNodeLivenessEventsForReadyNodes() { for (Node node : nodeRepository().getNodes(Node.State.ready)) { Optional<Instant> lastLocalRequest = hostLivenessTracker.lastRequestFrom(node.hostname()); if ( ! lastLocalRequest.isPresent()) continue; if (! node.history().hasEventAfter(History.Event.Type.requested, lastLocalRequest.get())) { History updatedHistory = node.history() .with(new History.Event(History.Event.Type.requested, Agent.NodeFailer, lastLocalRequest.get())); nodeRepository().write(node.with(updatedHistory)); } } } private Map<Node, String> getReadyNodesByFailureReason() { Instant oldestAcceptableRequestTime = constructionTime.isAfter(clock.instant().minus(nodeRequestInterval.multipliedBy(2))) ? Instant.EPOCH : clock.instant().minus(downTimeLimit).minus(nodeRequestInterval); Map<Node, String> nodesByFailureReason = new HashMap<>(); for (Node node : nodeRepository().getNodes(Node.State.ready)) { if (expectConfigRequests(node) && ! hasNodeRequestedConfigAfter(node, oldestAcceptableRequestTime)) { nodesByFailureReason.put(node, "Not receiving config requests from node"); } else if (node.status().hardwareFailureDescription().isPresent()) { nodesByFailureReason.put(node, "Node has hardware failure"); } else if (node.status().hardwareDivergence().isPresent()) { nodesByFailureReason.put(node, "Node has hardware divergence"); } else { Node hostNode = node.parentHostname().flatMap(parent -> nodeRepository().getNode(parent)).orElse(node); List<String> failureReports = reasonsToRetireActiveParentHost(hostNode); if (failureReports.size() > 0) { if (hostNode.equals(node)) { nodesByFailureReason.put(node, "Host has failure reports: " + failureReports); } else { nodesByFailureReason.put(node, "Parent (" + hostNode + ") has failure reports: " + failureReports); } } } } return nodesByFailureReason; } /** * If the node is down (see {@link * Otherwise we remove any "down" history record. */ private void updateNodeDownState() { Map<String, Node> activeNodesByHostname = nodeRepository().getNodes(Node.State.active).stream() .collect(Collectors.toMap(Node::hostname, node -> node)); serviceMonitor.getServiceModelSnapshot().getServiceInstancesByHostName() .forEach((hostName, serviceInstances) -> { Node node = activeNodesByHostname.get(hostName.s()); if (node == null) return; if (badNode(serviceInstances)) { recordAsDown(node); } else { clearDownRecord(node); } }); } private Map<Node, String> getActiveNodesByFailureReason(List<Node> activeNodes) { Instant graceTimeEnd = clock.instant().minus(downTimeLimit); Map<Node, String> nodesByFailureReason = new HashMap<>(); for (Node node : activeNodes) { if (node.history().hasEventBefore(History.Event.Type.down, graceTimeEnd) && ! applicationSuspended(node)) { nodesByFailureReason.put(node, "Node has been down longer than " + downTimeLimit); } else if (node.status().hardwareFailureDescription().isPresent() && nodeSuspended(node)) { nodesByFailureReason.put(node, "Node has hardware failure: " + node.status().hardwareFailureDescription().get()); } } return nodesByFailureReason; } private static List<String> reasonsToRetireActiveParentHost(Node hostNode) { return Stream.of( "badTotalMemorySize", "badTotalDiskSize", "badDiskType", "badInterfaceSpeed", "badCpuCount" ) .map(reportId -> baseReportToString(hostNode, reportId)) .flatMap(Optional::stream) .collect(Collectors.toList()); } /** The generated string is built from the report's ID, created time, and description only. */ static Optional<String> baseReportToString(Node node, String reportId) { return node.reports().getReport(reportId).map(report -> reportId + " reported " + report.getCreatedTime() + ": " + report.getDescription()); } /** * There are reasons why this node should be parked, and we'd like to do it through retiring, * including any child nodes. */ private void retireRecursively(Node node, List<String> reasons, List<Node> activeNodes) { if (activeNodes != null) { List<Node> childNodesToRetire = activeNodes.stream() .filter(n -> n.parentHostname().equals(Optional.of(node.hostname()))) .collect(Collectors.toList()); for (Node childNode : childNodesToRetire) { retireRecursively(childNode, reasons, null); } } if (node.status().wantToRetire()) return; retireActive(node.hostname(), node.allocation().get().owner(), reasons); } private void retireActive(String hostname, ApplicationId owner, List<String> reasons) { Duration lockWait = Duration.ofSeconds(10); try (Mutex lock = nodeRepository().lock(owner, lockWait)) { Optional<Node> node = nodeRepository().getNode(hostname); if (node.isEmpty()) return; if (node.get().state() != Node.State.active) return; if (!node.get().allocation().orElseThrow().owner().equals(owner)) return; if (node.get().status().wantToRetire()) return; log.info("Setting wantToRetire on " + node.get() + " due to these reports: " + reasons); nodeRepository().write(node.get().withWantToRetire(true, Agent.NodeFailer, clock.instant())); } catch (ApplicationLockException e) { log.warning("Failed to get lock on " + owner + " within " + lockWait + " to set wantToRetire, will retry later"); } } /** Returns whether node has any kind of hardware issue */ public static boolean hasHardwareIssue(Node node, NodeRepository nodeRepository) { if (node.status().hardwareFailureDescription().isPresent() || node.status().hardwareDivergence().isPresent()) { return true; } Node hostNode = node.parentHostname().flatMap(parent -> nodeRepository.getNode(parent)).orElse(node); return reasonsToRetireActiveParentHost(hostNode).size() > 0; } private boolean expectConfigRequests(Node node) { return !node.type().isDockerHost() || configserverConfig.nodeAdminInContainer(); } private boolean hasNodeRequestedConfigAfter(Node node, Instant instant) { return !wasMadeReadyBefore(node, instant) || hasRecordedRequestAfter(node, instant); } private boolean wasMadeReadyBefore(Node node, Instant instant) { return node.history().hasEventBefore(History.Event.Type.readied, instant); } private boolean hasRecordedRequestAfter(Node node, Instant instant) { return node.history().hasEventAfter(History.Event.Type.requested, instant); } private boolean applicationSuspended(Node node) { try { return orchestrator.getApplicationInstanceStatus(node.allocation().get().owner()) == ApplicationInstanceStatus.ALLOWED_TO_BE_DOWN; } catch (ApplicationIdNotFoundException e) { return false; } } private boolean nodeSuspended(Node node) { try { return orchestrator.getNodeStatus(new HostName(node.hostname())) == HostStatus.ALLOWED_TO_BE_DOWN; } catch (HostNameNotFoundException e) { return false; } } /** * We can attempt to fail any number of *tenant* and *host* nodes because the operation will not be effected * unless the node is replaced. * However, nodes of other types are not replaced (because all of the type are used by a single application), * so we only allow one to be in failed at any point in time to protect against runaway failing. */ private boolean failAllowedFor(NodeType nodeType) { if (nodeType == NodeType.tenant || nodeType == NodeType.host) return true; return nodeRepository().getNodes(nodeType, Node.State.failed).size() == 0; } /** * Returns true if the node is considered bad: all monitored services services are down. * If a node remains bad for a long time, the NodeFailer will eventually try to fail the node. */ public static boolean badNode(List<ServiceInstance> services) { Map<ServiceStatus, Long> countsByStatus = services.stream() .collect(Collectors.groupingBy(ServiceInstance::serviceStatus, counting())); return countsByStatus.getOrDefault(ServiceStatus.UP, 0L) <= 0L && countsByStatus.getOrDefault(ServiceStatus.DOWN, 0L) > 0L; } /** * Record a node as down if not already recorded and returns the node in the new state. * This assumes the node is found in the node * repo and that the node is allocated. If we get here otherwise something is truly odd. */ private Node recordAsDown(Node node) { if (node.history().event(History.Event.Type.down).isPresent()) return node; try (Mutex lock = nodeRepository().lock(node.allocation().get().owner())) { node = nodeRepository().getNode(node.hostname(), Node.State.active).get(); return nodeRepository().write(node.downAt(clock.instant())); } } private void clearDownRecord(Node node) { if ( ! node.history().event(History.Event.Type.down).isPresent()) return; try (Mutex lock = nodeRepository().lock(node.allocation().get().owner())) { node = nodeRepository().getNode(node.hostname(), Node.State.active).get(); nodeRepository().write(node.up()); } } /** * Called when a node should be moved to the failed state: Do that if it seems safe, * which is when the node repo has available capacity to replace the node (and all its tenant nodes if host). * Otherwise not replacing the node ensures (by Orchestrator check) that no further action will be taken. * * @return whether node was successfully failed */ private boolean failActive(Node node, String reason) { Optional<Deployment> deployment = deployer.deployFromLocalActive(node.allocation().get().owner(), Duration.ofMinutes(30)); if ( ! deployment.isPresent()) return false; try (Mutex lock = nodeRepository().lock(node.allocation().get().owner())) { boolean allTenantNodesFailedOutSuccessfully = true; String reasonForChildFailure = "Failing due to parent host " + node.hostname() + " failure: " + reason; for (Node failingTenantNode : nodeRepository().list().childrenOf(node)) { if (failingTenantNode.state() == Node.State.active) { allTenantNodesFailedOutSuccessfully &= failActive(failingTenantNode, reasonForChildFailure); } else { nodeRepository().fail(failingTenantNode.hostname(), Agent.NodeFailer, reasonForChildFailure); } } if (! allTenantNodesFailedOutSuccessfully) return false; node = nodeRepository().fail(node.hostname(), Agent.NodeFailer, reason); try { deployment.get().activate(); return true; } catch (RuntimeException e) { nodeRepository().reactivate(node.hostname(), Agent.NodeFailer, "Failed to redeploy after being failed by NodeFailer"); log.log(Level.WARNING, "Attempted to fail " + node + " for " + node.allocation().get().owner() + ", but redeploying without the node failed", e); return false; } } } /** Returns true if node failing should be throttled */ private boolean throttle(Node node) { if (throttlePolicy == ThrottlePolicy.disabled) return false; Instant startOfThrottleWindow = clock.instant().minus(throttlePolicy.throttleWindow); List<Node> nodes = nodeRepository().getNodes(); NodeList recentlyFailedNodes = nodes.stream() .filter(n -> n.history().hasEventAfter(History.Event.Type.failed, startOfThrottleWindow)) .collect(collectingAndThen(Collectors.toList(), NodeList::new)); if (recentlyFailedNodes.size() < throttlePolicy.allowedToFailOf(nodes.size())) return false; if (!node.parentHostname().isPresent() && recentlyFailedNodes.parents().size() < throttlePolicy.minimumAllowedToFail) return false; log.info(String.format("Want to fail node %s, but throttling is in effect: %s", node.hostname(), throttlePolicy.toHumanReadableString(nodes.size()))); return true; } public enum ThrottlePolicy { hosted(Duration.ofDays(1), 0.02, 2), disabled(Duration.ZERO, 0, 0); private final Duration throttleWindow; private final double fractionAllowedToFail; private final int minimumAllowedToFail; ThrottlePolicy(Duration throttleWindow, double fractionAllowedToFail, int minimumAllowedToFail) { this.throttleWindow = throttleWindow; this.fractionAllowedToFail = fractionAllowedToFail; this.minimumAllowedToFail = minimumAllowedToFail; } public int allowedToFailOf(int totalNodes) { return (int) Math.max(totalNodes * fractionAllowedToFail, minimumAllowedToFail); } public String toHumanReadableString(int totalNodes) { return String.format("Max %.0f%% (%d) or %d nodes can fail over a period of %s", fractionAllowedToFail*100, allowedToFailOf(totalNodes), minimumAllowedToFail, throttleWindow); } } }
This is for backwards compatibility? Won't this make it impossible for us to use VIP_STATUS_FILE safely?
public void testBootstrapWithVipStatusFile() throws Exception { ConfigserverConfig configserverConfig = createConfigserverConfig(temporaryFolder); InMemoryProvisioner provisioner = new InMemoryProvisioner(true, "host0", "host1", "host3"); DeployTester tester = new DeployTester(configserverConfig, provisioner); tester.deployApp("src/test/apps/hosted/"); File versionFile = temporaryFolder.newFile(); VersionState versionState = new VersionState(versionFile); assertTrue(versionState.isUpgraded()); RpcServer rpcServer = createRpcServer(configserverConfig); VipStatus vipStatus = createVipStatus(VIP_STATUS_FILE); ConfigServerBootstrap bootstrap = new ConfigServerBootstrap(tester.applicationRepository(), rpcServer, versionState, createStateMonitor(), vipStatus, INITIALIZE_ONLY, VIP_STATUS_FILE); assertTrue(vipStatus.isInRotation()); bootstrap.start(); waitUntil(rpcServer::isRunning, "failed waiting for Rpc server running"); waitUntil(() -> bootstrap.status() == StateMonitor.Status.up, "failed waiting for status 'up'"); waitUntil(vipStatus::isInRotation, "failed waiting for server to be in rotation"); bootstrap.deconstruct(); }
assertTrue(vipStatus.isInRotation());
public void testBootstrapWithVipStatusFile() throws Exception { ConfigserverConfig configserverConfig = createConfigserverConfig(temporaryFolder); InMemoryProvisioner provisioner = new InMemoryProvisioner(true, "host0", "host1", "host3"); DeployTester tester = new DeployTester(configserverConfig, provisioner); tester.deployApp("src/test/apps/hosted/"); File versionFile = temporaryFolder.newFile(); VersionState versionState = new VersionState(versionFile); assertTrue(versionState.isUpgraded()); RpcServer rpcServer = createRpcServer(configserverConfig); VipStatus vipStatus = createVipStatus(VIP_STATUS_FILE); ConfigServerBootstrap bootstrap = new ConfigServerBootstrap(tester.applicationRepository(), rpcServer, versionState, createStateMonitor(), vipStatus, INITIALIZE_ONLY, VIP_STATUS_FILE); assertTrue(vipStatus.isInRotation()); bootstrap.start(); waitUntil(rpcServer::isRunning, "failed waiting for Rpc server running"); waitUntil(() -> bootstrap.status() == StateMonitor.Status.up, "failed waiting for status 'up'"); waitUntil(vipStatus::isInRotation, "failed waiting for server to be in rotation"); bootstrap.deconstruct(); }
class ConfigServerBootstrapTest { @Rule public TemporaryFolder temporaryFolder = new TemporaryFolder(); @Test public void testBootstrap() throws Exception { ConfigserverConfig configserverConfig = createConfigserverConfig(temporaryFolder); InMemoryProvisioner provisioner = new InMemoryProvisioner(true, "host0", "host1", "host3"); DeployTester tester = new DeployTester(configserverConfig, provisioner); tester.deployApp("src/test/apps/hosted/"); File versionFile = temporaryFolder.newFile(); VersionState versionState = new VersionState(versionFile); assertTrue(versionState.isUpgraded()); RpcServer rpcServer = createRpcServer(configserverConfig); provisioner.allocations().values().iterator().next().remove(0); VipStatus vipStatus = createVipStatus(VIP_STATUS_PROGRAMMATICALLY); ConfigServerBootstrap bootstrap = new ConfigServerBootstrap(tester.applicationRepository(), rpcServer, versionState, createStateMonitor(), vipStatus, INITIALIZE_ONLY, VIP_STATUS_PROGRAMMATICALLY); assertFalse(vipStatus.isInRotation()); bootstrap.start(); waitUntil(rpcServer::isRunning, "failed waiting for Rpc server running"); waitUntil(() -> bootstrap.status() == StateMonitor.Status.up, "failed waiting for status 'up'"); waitUntil(vipStatus::isInRotation, "failed waiting for server to be in rotation"); bootstrap.deconstruct(); assertEquals(StateMonitor.Status.down, bootstrap.status()); assertFalse(rpcServer.isRunning()); assertFalse(vipStatus.isInRotation()); } @Test @Test public void testBootstrapWhenRedeploymentFails() throws Exception { ConfigserverConfig configserverConfig = createConfigserverConfig(temporaryFolder); DeployTester tester = new DeployTester(configserverConfig); tester.deployApp("src/test/apps/hosted/"); File versionFile = temporaryFolder.newFile(); VersionState versionState = new VersionState(versionFile); assertTrue(versionState.isUpgraded()); java.nio.file.Files.delete(Paths.get(configserverConfig.configServerDBDir()) .resolve("tenants/") .resolve(tester.tenant().getName().value()) .resolve("sessions/2/services.xml")); RpcServer rpcServer = createRpcServer(configserverConfig); VipStatus vipStatus = createVipStatus(VIP_STATUS_PROGRAMMATICALLY); ConfigServerBootstrap bootstrap = new ConfigServerBootstrap(tester.applicationRepository(), rpcServer, versionState, createStateMonitor(), vipStatus, INITIALIZE_ONLY, VIP_STATUS_PROGRAMMATICALLY); assertFalse(vipStatus.isInRotation()); bootstrap.start(); assertEquals(StateMonitor.Status.initializing, bootstrap.status()); assertFalse(rpcServer.isRunning()); assertFalse(vipStatus.isInRotation()); bootstrap.deconstruct(); } @Test public void testBootstrapNonHostedOneConfigModel() throws Exception { ConfigserverConfig configserverConfig = createConfigserverConfigNonHosted(temporaryFolder); String vespaVersion = "1.2.3"; List<ModelFactory> modelFactories = Collections.singletonList(DeployTester.createModelFactory(Version.fromString(vespaVersion))); List<Host> hosts = createHosts(vespaVersion); InMemoryProvisioner provisioner = new InMemoryProvisioner(new Hosts(hosts), true); Curator curator = new MockCurator(); DeployTester tester = new DeployTester(modelFactories, configserverConfig, Clock.systemUTC(), new Zone(Environment.dev, RegionName.defaultName()), provisioner, curator); tester.deployApp("src/test/apps/app/", vespaVersion, Instant.now()); ApplicationId applicationId = tester.applicationId(); File versionFile = temporaryFolder.newFile(); VersionState versionState = new VersionState(versionFile); assertTrue(versionState.isUpgraded()); curator.set(Path.fromString("/config/v2/tenants/" + applicationId.tenant().value() + "/sessions/2/version"), Utf8.toBytes("1.2.2")); RpcServer rpcServer = createRpcServer(configserverConfig); VipStatus vipStatus = createVipStatus(VIP_STATUS_PROGRAMMATICALLY); ConfigServerBootstrap bootstrap = new ConfigServerBootstrap(tester.applicationRepository(), rpcServer, versionState, createStateMonitor(), vipStatus, BOOTSTRAP_IN_SEPARATE_THREAD, VIP_STATUS_PROGRAMMATICALLY); waitUntil(rpcServer::isRunning, "failed waiting for Rpc server running"); waitUntil(() -> bootstrap.status() == StateMonitor.Status.up, "failed waiting for status 'up'"); waitUntil(vipStatus::isInRotation, "failed waiting for server to be in rotation"); } private void waitUntil(BooleanSupplier booleanSupplier, String messageIfWaitingFails) throws InterruptedException { Duration timeout = Duration.ofSeconds(60); Instant endTime = Instant.now().plus(timeout); while (Instant.now().isBefore(endTime)) { if (booleanSupplier.getAsBoolean()) return; Thread.sleep(10); } throw new RuntimeException(messageIfWaitingFails); } private MockRpc createRpcServer(ConfigserverConfig configserverConfig) throws IOException { return new MockRpc(configserverConfig.rpcport(), temporaryFolder.newFolder()); } private StateMonitor createStateMonitor() { return new StateMonitor(new HealthMonitorConfig(new HealthMonitorConfig.Builder().initialStatus("initializing")), new SystemTimer()); } private static ConfigserverConfig createConfigserverConfig(TemporaryFolder temporaryFolder) throws IOException { return createConfigserverConfig(temporaryFolder, true); } private static ConfigserverConfig createConfigserverConfigNonHosted(TemporaryFolder temporaryFolder) throws IOException { return createConfigserverConfig(temporaryFolder, false); } private static ConfigserverConfig createConfigserverConfig(TemporaryFolder temporaryFolder, boolean hosted) throws IOException { return new ConfigserverConfig(new ConfigserverConfig.Builder() .configServerDBDir(temporaryFolder.newFolder("serverdb").getAbsolutePath()) .configDefinitionsDir(temporaryFolder.newFolder("configdefinitions").getAbsolutePath()) .hostedVespa(hosted) .multitenant(hosted) .maxDurationOfBootstrap(2) /* seconds */ .sleepTimeWhenRedeployingFails(0)); /* seconds */ } private List<Host> createHosts(String vespaVersion) { return Arrays.asList(createHost("host1", vespaVersion), createHost("host2", vespaVersion), createHost("host3", vespaVersion)); } private Host createHost(String hostname, String version) { return new Host(hostname, Collections.emptyList(), Optional.empty(), Optional.of(com.yahoo.component.Version.fromString(version))); } private VipStatus createVipStatus(ConfigServerBootstrap.VipStatusMode vipStatusMode) throws IOException { return new VipStatus(new QrSearchersConfig.Builder().build(), new VipStatusConfig.Builder() .initiallyInRotation(vipStatusMode == VIP_STATUS_FILE) .statusfile(temporaryFolder.newFile().getAbsolutePath()) .accessdisk(vipStatusMode == VIP_STATUS_FILE) .build(), new ClustersStatus()); } public static class MockRpc extends com.yahoo.vespa.config.server.rpc.MockRpc { volatile boolean isRunning = false; MockRpc(int port, File tempDir) { super(port, tempDir); } @Override public void run() { isRunning = true; } @Override public void stop() { isRunning = false; } @Override public boolean isRunning() { return isRunning; } } }
class ConfigServerBootstrapTest { @Rule public TemporaryFolder temporaryFolder = new TemporaryFolder(); @Test public void testBootstrap() throws Exception { ConfigserverConfig configserverConfig = createConfigserverConfig(temporaryFolder); InMemoryProvisioner provisioner = new InMemoryProvisioner(true, "host0", "host1", "host3"); DeployTester tester = new DeployTester(configserverConfig, provisioner); tester.deployApp("src/test/apps/hosted/"); File versionFile = temporaryFolder.newFile(); VersionState versionState = new VersionState(versionFile); assertTrue(versionState.isUpgraded()); RpcServer rpcServer = createRpcServer(configserverConfig); provisioner.allocations().values().iterator().next().remove(0); VipStatus vipStatus = createVipStatus(VIP_STATUS_PROGRAMMATICALLY); ConfigServerBootstrap bootstrap = new ConfigServerBootstrap(tester.applicationRepository(), rpcServer, versionState, createStateMonitor(), vipStatus, INITIALIZE_ONLY, VIP_STATUS_PROGRAMMATICALLY); assertFalse(vipStatus.isInRotation()); bootstrap.start(); waitUntil(rpcServer::isRunning, "failed waiting for Rpc server running"); waitUntil(() -> bootstrap.status() == StateMonitor.Status.up, "failed waiting for status 'up'"); waitUntil(vipStatus::isInRotation, "failed waiting for server to be in rotation"); bootstrap.deconstruct(); assertEquals(StateMonitor.Status.down, bootstrap.status()); assertFalse(rpcServer.isRunning()); assertFalse(vipStatus.isInRotation()); } @Test @Test public void testBootstrapWhenRedeploymentFails() throws Exception { ConfigserverConfig configserverConfig = createConfigserverConfig(temporaryFolder); DeployTester tester = new DeployTester(configserverConfig); tester.deployApp("src/test/apps/hosted/"); File versionFile = temporaryFolder.newFile(); VersionState versionState = new VersionState(versionFile); assertTrue(versionState.isUpgraded()); java.nio.file.Files.delete(Paths.get(configserverConfig.configServerDBDir()) .resolve("tenants/") .resolve(tester.tenant().getName().value()) .resolve("sessions/2/services.xml")); RpcServer rpcServer = createRpcServer(configserverConfig); VipStatus vipStatus = createVipStatus(VIP_STATUS_PROGRAMMATICALLY); ConfigServerBootstrap bootstrap = new ConfigServerBootstrap(tester.applicationRepository(), rpcServer, versionState, createStateMonitor(), vipStatus, INITIALIZE_ONLY, VIP_STATUS_PROGRAMMATICALLY); assertFalse(vipStatus.isInRotation()); bootstrap.start(); assertEquals(StateMonitor.Status.initializing, bootstrap.status()); assertFalse(rpcServer.isRunning()); assertFalse(vipStatus.isInRotation()); bootstrap.deconstruct(); } @Test public void testBootstrapNonHostedOneConfigModel() throws Exception { ConfigserverConfig configserverConfig = createConfigserverConfigNonHosted(temporaryFolder); String vespaVersion = "1.2.3"; List<ModelFactory> modelFactories = Collections.singletonList(DeployTester.createModelFactory(Version.fromString(vespaVersion))); List<Host> hosts = createHosts(vespaVersion); InMemoryProvisioner provisioner = new InMemoryProvisioner(new Hosts(hosts), true); Curator curator = new MockCurator(); DeployTester tester = new DeployTester(modelFactories, configserverConfig, Clock.systemUTC(), new Zone(Environment.dev, RegionName.defaultName()), provisioner, curator); tester.deployApp("src/test/apps/app/", vespaVersion, Instant.now()); ApplicationId applicationId = tester.applicationId(); File versionFile = temporaryFolder.newFile(); VersionState versionState = new VersionState(versionFile); assertTrue(versionState.isUpgraded()); curator.set(Path.fromString("/config/v2/tenants/" + applicationId.tenant().value() + "/sessions/2/version"), Utf8.toBytes("1.2.2")); RpcServer rpcServer = createRpcServer(configserverConfig); VipStatus vipStatus = createVipStatus(VIP_STATUS_PROGRAMMATICALLY); ConfigServerBootstrap bootstrap = new ConfigServerBootstrap(tester.applicationRepository(), rpcServer, versionState, createStateMonitor(), vipStatus, BOOTSTRAP_IN_SEPARATE_THREAD, VIP_STATUS_PROGRAMMATICALLY); waitUntil(rpcServer::isRunning, "failed waiting for Rpc server running"); waitUntil(() -> bootstrap.status() == StateMonitor.Status.up, "failed waiting for status 'up'"); waitUntil(vipStatus::isInRotation, "failed waiting for server to be in rotation"); } private void waitUntil(BooleanSupplier booleanSupplier, String messageIfWaitingFails) throws InterruptedException { Duration timeout = Duration.ofSeconds(60); Instant endTime = Instant.now().plus(timeout); while (Instant.now().isBefore(endTime)) { if (booleanSupplier.getAsBoolean()) return; Thread.sleep(10); } throw new RuntimeException(messageIfWaitingFails); } private MockRpc createRpcServer(ConfigserverConfig configserverConfig) throws IOException { return new MockRpc(configserverConfig.rpcport(), temporaryFolder.newFolder()); } private StateMonitor createStateMonitor() { return new StateMonitor(new HealthMonitorConfig(new HealthMonitorConfig.Builder().initialStatus("initializing")), new SystemTimer()); } private static ConfigserverConfig createConfigserverConfig(TemporaryFolder temporaryFolder) throws IOException { return createConfigserverConfig(temporaryFolder, true); } private static ConfigserverConfig createConfigserverConfigNonHosted(TemporaryFolder temporaryFolder) throws IOException { return createConfigserverConfig(temporaryFolder, false); } private static ConfigserverConfig createConfigserverConfig(TemporaryFolder temporaryFolder, boolean hosted) throws IOException { return new ConfigserverConfig(new ConfigserverConfig.Builder() .configServerDBDir(temporaryFolder.newFolder("serverdb").getAbsolutePath()) .configDefinitionsDir(temporaryFolder.newFolder("configdefinitions").getAbsolutePath()) .hostedVespa(hosted) .multitenant(hosted) .maxDurationOfBootstrap(2) /* seconds */ .sleepTimeWhenRedeployingFails(0)); /* seconds */ } private List<Host> createHosts(String vespaVersion) { return Arrays.asList(createHost("host1", vespaVersion), createHost("host2", vespaVersion), createHost("host3", vespaVersion)); } private Host createHost(String hostname, String version) { return new Host(hostname, Collections.emptyList(), Optional.empty(), Optional.of(com.yahoo.component.Version.fromString(version))); } private VipStatus createVipStatus(ConfigServerBootstrap.VipStatusMode vipStatusMode) throws IOException { return new VipStatus(new QrSearchersConfig.Builder().build(), new VipStatusConfig.Builder() .initiallyInRotation(vipStatusMode == VIP_STATUS_FILE) .statusfile(temporaryFolder.newFile().getAbsolutePath()) .accessdisk(vipStatusMode == VIP_STATUS_FILE) .build(), new ClustersStatus()); } public static class MockRpc extends com.yahoo.vespa.config.server.rpc.MockRpc { volatile boolean isRunning = false; MockRpc(int port, File tempDir) { super(port, tempDir); } @Override public void run() { isRunning = true; } @Override public void stop() { isRunning = false; } @Override public boolean isRunning() { return isRunning; } } }
These should throw UnsupportedOperationException to ensure they're not called, and/or don't cause strange NPE later?
public String athenzDnsSuffix() { return null; }
return null;
public String athenzDnsSuffix() { throw new UnsupportedOperationException(); }
class StandaloneContainerModelContextProperties implements ModelContext.Properties { private final CloudConfigInstallVariables cloudConfigInstallVariables; StandaloneContainerModelContextProperties(CloudConfigInstallVariables cloudConfigInstallVariables) { this.cloudConfigInstallVariables = cloudConfigInstallVariables; } @Override public boolean multitenant() { return cloudConfigInstallVariables.multiTenant().orElse(Boolean.FALSE); } @Override public ApplicationId applicationId() { return ApplicationId.defaultId(); } @Override public List<ConfigServerSpec> configServerSpecs() { return null; } @Override public HostName loadBalancerName() { return null; } @Override public URI ztsUrl() { return null; } @Override @Override public boolean hostedVespa() { return cloudConfigInstallVariables.hostedVespa().orElse(Boolean.FALSE); } @Override public Zone zone() { return null; } @Override public Set<Rotation> rotations() { return null; } @Override public boolean isBootstrap() { return false; } @Override public boolean isFirstTimeDeployment() { return false; } @Override public boolean useDedicatedNodeForLogserver() { return false; } @Override public boolean useFdispatchByDefault() { return false; } @Override public boolean useAdaptiveDispatch() { return false; } }
class StandaloneContainerModelContextProperties implements ModelContext.Properties { private final CloudConfigInstallVariables cloudConfigInstallVariables; StandaloneContainerModelContextProperties(CloudConfigInstallVariables cloudConfigInstallVariables) { this.cloudConfigInstallVariables = cloudConfigInstallVariables; } @Override public boolean multitenant() { return cloudConfigInstallVariables.multiTenant().orElse(Boolean.FALSE); } @Override public ApplicationId applicationId() { return ApplicationId.defaultId(); } @Override public List<ConfigServerSpec> configServerSpecs() { throw new UnsupportedOperationException(); } @Override public HostName loadBalancerName() { throw new UnsupportedOperationException(); } @Override public URI ztsUrl() { throw new UnsupportedOperationException(); } @Override @Override public boolean hostedVespa() { return cloudConfigInstallVariables.hostedVespa().orElse(Boolean.FALSE); } @Override public Zone zone() { throw new UnsupportedOperationException(); } @Override public Set<Rotation> rotations() { throw new UnsupportedOperationException(); } @Override public boolean isBootstrap() { throw new UnsupportedOperationException(); } @Override public boolean isFirstTimeDeployment() { throw new UnsupportedOperationException(); } @Override public boolean useDedicatedNodeForLogserver() { throw new UnsupportedOperationException(); } @Override public boolean useFdispatchByDefault() { throw new UnsupportedOperationException(); } @Override public boolean useAdaptiveDispatch() { throw new UnsupportedOperationException(); } }
Good point
public String athenzDnsSuffix() { return null; }
return null;
public String athenzDnsSuffix() { throw new UnsupportedOperationException(); }
class StandaloneContainerModelContextProperties implements ModelContext.Properties { private final CloudConfigInstallVariables cloudConfigInstallVariables; StandaloneContainerModelContextProperties(CloudConfigInstallVariables cloudConfigInstallVariables) { this.cloudConfigInstallVariables = cloudConfigInstallVariables; } @Override public boolean multitenant() { return cloudConfigInstallVariables.multiTenant().orElse(Boolean.FALSE); } @Override public ApplicationId applicationId() { return ApplicationId.defaultId(); } @Override public List<ConfigServerSpec> configServerSpecs() { return null; } @Override public HostName loadBalancerName() { return null; } @Override public URI ztsUrl() { return null; } @Override @Override public boolean hostedVespa() { return cloudConfigInstallVariables.hostedVespa().orElse(Boolean.FALSE); } @Override public Zone zone() { return null; } @Override public Set<Rotation> rotations() { return null; } @Override public boolean isBootstrap() { return false; } @Override public boolean isFirstTimeDeployment() { return false; } @Override public boolean useDedicatedNodeForLogserver() { return false; } @Override public boolean useFdispatchByDefault() { return false; } @Override public boolean useAdaptiveDispatch() { return false; } }
class StandaloneContainerModelContextProperties implements ModelContext.Properties { private final CloudConfigInstallVariables cloudConfigInstallVariables; StandaloneContainerModelContextProperties(CloudConfigInstallVariables cloudConfigInstallVariables) { this.cloudConfigInstallVariables = cloudConfigInstallVariables; } @Override public boolean multitenant() { return cloudConfigInstallVariables.multiTenant().orElse(Boolean.FALSE); } @Override public ApplicationId applicationId() { return ApplicationId.defaultId(); } @Override public List<ConfigServerSpec> configServerSpecs() { throw new UnsupportedOperationException(); } @Override public HostName loadBalancerName() { throw new UnsupportedOperationException(); } @Override public URI ztsUrl() { throw new UnsupportedOperationException(); } @Override @Override public boolean hostedVespa() { return cloudConfigInstallVariables.hostedVespa().orElse(Boolean.FALSE); } @Override public Zone zone() { throw new UnsupportedOperationException(); } @Override public Set<Rotation> rotations() { throw new UnsupportedOperationException(); } @Override public boolean isBootstrap() { throw new UnsupportedOperationException(); } @Override public boolean isFirstTimeDeployment() { throw new UnsupportedOperationException(); } @Override public boolean useDedicatedNodeForLogserver() { throw new UnsupportedOperationException(); } @Override public boolean useFdispatchByDefault() { throw new UnsupportedOperationException(); } @Override public boolean useAdaptiveDispatch() { throw new UnsupportedOperationException(); } }
This is what VipStatus thinks about the status, if a ststus file is used the response is based on whether the file exists or not (see comment for the test).
public void testBootstrapWithVipStatusFile() throws Exception { ConfigserverConfig configserverConfig = createConfigserverConfig(temporaryFolder); InMemoryProvisioner provisioner = new InMemoryProvisioner(true, "host0", "host1", "host3"); DeployTester tester = new DeployTester(configserverConfig, provisioner); tester.deployApp("src/test/apps/hosted/"); File versionFile = temporaryFolder.newFile(); VersionState versionState = new VersionState(versionFile); assertTrue(versionState.isUpgraded()); RpcServer rpcServer = createRpcServer(configserverConfig); VipStatus vipStatus = createVipStatus(VIP_STATUS_FILE); ConfigServerBootstrap bootstrap = new ConfigServerBootstrap(tester.applicationRepository(), rpcServer, versionState, createStateMonitor(), vipStatus, INITIALIZE_ONLY, VIP_STATUS_FILE); assertTrue(vipStatus.isInRotation()); bootstrap.start(); waitUntil(rpcServer::isRunning, "failed waiting for Rpc server running"); waitUntil(() -> bootstrap.status() == StateMonitor.Status.up, "failed waiting for status 'up'"); waitUntil(vipStatus::isInRotation, "failed waiting for server to be in rotation"); bootstrap.deconstruct(); }
assertTrue(vipStatus.isInRotation());
public void testBootstrapWithVipStatusFile() throws Exception { ConfigserverConfig configserverConfig = createConfigserverConfig(temporaryFolder); InMemoryProvisioner provisioner = new InMemoryProvisioner(true, "host0", "host1", "host3"); DeployTester tester = new DeployTester(configserverConfig, provisioner); tester.deployApp("src/test/apps/hosted/"); File versionFile = temporaryFolder.newFile(); VersionState versionState = new VersionState(versionFile); assertTrue(versionState.isUpgraded()); RpcServer rpcServer = createRpcServer(configserverConfig); VipStatus vipStatus = createVipStatus(VIP_STATUS_FILE); ConfigServerBootstrap bootstrap = new ConfigServerBootstrap(tester.applicationRepository(), rpcServer, versionState, createStateMonitor(), vipStatus, INITIALIZE_ONLY, VIP_STATUS_FILE); assertTrue(vipStatus.isInRotation()); bootstrap.start(); waitUntil(rpcServer::isRunning, "failed waiting for Rpc server running"); waitUntil(() -> bootstrap.status() == StateMonitor.Status.up, "failed waiting for status 'up'"); waitUntil(vipStatus::isInRotation, "failed waiting for server to be in rotation"); bootstrap.deconstruct(); }
class ConfigServerBootstrapTest { @Rule public TemporaryFolder temporaryFolder = new TemporaryFolder(); @Test public void testBootstrap() throws Exception { ConfigserverConfig configserverConfig = createConfigserverConfig(temporaryFolder); InMemoryProvisioner provisioner = new InMemoryProvisioner(true, "host0", "host1", "host3"); DeployTester tester = new DeployTester(configserverConfig, provisioner); tester.deployApp("src/test/apps/hosted/"); File versionFile = temporaryFolder.newFile(); VersionState versionState = new VersionState(versionFile); assertTrue(versionState.isUpgraded()); RpcServer rpcServer = createRpcServer(configserverConfig); provisioner.allocations().values().iterator().next().remove(0); VipStatus vipStatus = createVipStatus(VIP_STATUS_PROGRAMMATICALLY); ConfigServerBootstrap bootstrap = new ConfigServerBootstrap(tester.applicationRepository(), rpcServer, versionState, createStateMonitor(), vipStatus, INITIALIZE_ONLY, VIP_STATUS_PROGRAMMATICALLY); assertFalse(vipStatus.isInRotation()); bootstrap.start(); waitUntil(rpcServer::isRunning, "failed waiting for Rpc server running"); waitUntil(() -> bootstrap.status() == StateMonitor.Status.up, "failed waiting for status 'up'"); waitUntil(vipStatus::isInRotation, "failed waiting for server to be in rotation"); bootstrap.deconstruct(); assertEquals(StateMonitor.Status.down, bootstrap.status()); assertFalse(rpcServer.isRunning()); assertFalse(vipStatus.isInRotation()); } @Test @Test public void testBootstrapWhenRedeploymentFails() throws Exception { ConfigserverConfig configserverConfig = createConfigserverConfig(temporaryFolder); DeployTester tester = new DeployTester(configserverConfig); tester.deployApp("src/test/apps/hosted/"); File versionFile = temporaryFolder.newFile(); VersionState versionState = new VersionState(versionFile); assertTrue(versionState.isUpgraded()); java.nio.file.Files.delete(Paths.get(configserverConfig.configServerDBDir()) .resolve("tenants/") .resolve(tester.tenant().getName().value()) .resolve("sessions/2/services.xml")); RpcServer rpcServer = createRpcServer(configserverConfig); VipStatus vipStatus = createVipStatus(VIP_STATUS_PROGRAMMATICALLY); ConfigServerBootstrap bootstrap = new ConfigServerBootstrap(tester.applicationRepository(), rpcServer, versionState, createStateMonitor(), vipStatus, INITIALIZE_ONLY, VIP_STATUS_PROGRAMMATICALLY); assertFalse(vipStatus.isInRotation()); bootstrap.start(); assertEquals(StateMonitor.Status.initializing, bootstrap.status()); assertFalse(rpcServer.isRunning()); assertFalse(vipStatus.isInRotation()); bootstrap.deconstruct(); } @Test public void testBootstrapNonHostedOneConfigModel() throws Exception { ConfigserverConfig configserverConfig = createConfigserverConfigNonHosted(temporaryFolder); String vespaVersion = "1.2.3"; List<ModelFactory> modelFactories = Collections.singletonList(DeployTester.createModelFactory(Version.fromString(vespaVersion))); List<Host> hosts = createHosts(vespaVersion); InMemoryProvisioner provisioner = new InMemoryProvisioner(new Hosts(hosts), true); Curator curator = new MockCurator(); DeployTester tester = new DeployTester(modelFactories, configserverConfig, Clock.systemUTC(), new Zone(Environment.dev, RegionName.defaultName()), provisioner, curator); tester.deployApp("src/test/apps/app/", vespaVersion, Instant.now()); ApplicationId applicationId = tester.applicationId(); File versionFile = temporaryFolder.newFile(); VersionState versionState = new VersionState(versionFile); assertTrue(versionState.isUpgraded()); curator.set(Path.fromString("/config/v2/tenants/" + applicationId.tenant().value() + "/sessions/2/version"), Utf8.toBytes("1.2.2")); RpcServer rpcServer = createRpcServer(configserverConfig); VipStatus vipStatus = createVipStatus(VIP_STATUS_PROGRAMMATICALLY); ConfigServerBootstrap bootstrap = new ConfigServerBootstrap(tester.applicationRepository(), rpcServer, versionState, createStateMonitor(), vipStatus, BOOTSTRAP_IN_SEPARATE_THREAD, VIP_STATUS_PROGRAMMATICALLY); waitUntil(rpcServer::isRunning, "failed waiting for Rpc server running"); waitUntil(() -> bootstrap.status() == StateMonitor.Status.up, "failed waiting for status 'up'"); waitUntil(vipStatus::isInRotation, "failed waiting for server to be in rotation"); } private void waitUntil(BooleanSupplier booleanSupplier, String messageIfWaitingFails) throws InterruptedException { Duration timeout = Duration.ofSeconds(60); Instant endTime = Instant.now().plus(timeout); while (Instant.now().isBefore(endTime)) { if (booleanSupplier.getAsBoolean()) return; Thread.sleep(10); } throw new RuntimeException(messageIfWaitingFails); } private MockRpc createRpcServer(ConfigserverConfig configserverConfig) throws IOException { return new MockRpc(configserverConfig.rpcport(), temporaryFolder.newFolder()); } private StateMonitor createStateMonitor() { return new StateMonitor(new HealthMonitorConfig(new HealthMonitorConfig.Builder().initialStatus("initializing")), new SystemTimer()); } private static ConfigserverConfig createConfigserverConfig(TemporaryFolder temporaryFolder) throws IOException { return createConfigserverConfig(temporaryFolder, true); } private static ConfigserverConfig createConfigserverConfigNonHosted(TemporaryFolder temporaryFolder) throws IOException { return createConfigserverConfig(temporaryFolder, false); } private static ConfigserverConfig createConfigserverConfig(TemporaryFolder temporaryFolder, boolean hosted) throws IOException { return new ConfigserverConfig(new ConfigserverConfig.Builder() .configServerDBDir(temporaryFolder.newFolder("serverdb").getAbsolutePath()) .configDefinitionsDir(temporaryFolder.newFolder("configdefinitions").getAbsolutePath()) .hostedVespa(hosted) .multitenant(hosted) .maxDurationOfBootstrap(2) /* seconds */ .sleepTimeWhenRedeployingFails(0)); /* seconds */ } private List<Host> createHosts(String vespaVersion) { return Arrays.asList(createHost("host1", vespaVersion), createHost("host2", vespaVersion), createHost("host3", vespaVersion)); } private Host createHost(String hostname, String version) { return new Host(hostname, Collections.emptyList(), Optional.empty(), Optional.of(com.yahoo.component.Version.fromString(version))); } private VipStatus createVipStatus(ConfigServerBootstrap.VipStatusMode vipStatusMode) throws IOException { return new VipStatus(new QrSearchersConfig.Builder().build(), new VipStatusConfig.Builder() .initiallyInRotation(vipStatusMode == VIP_STATUS_FILE) .statusfile(temporaryFolder.newFile().getAbsolutePath()) .accessdisk(vipStatusMode == VIP_STATUS_FILE) .build(), new ClustersStatus()); } public static class MockRpc extends com.yahoo.vespa.config.server.rpc.MockRpc { volatile boolean isRunning = false; MockRpc(int port, File tempDir) { super(port, tempDir); } @Override public void run() { isRunning = true; } @Override public void stop() { isRunning = false; } @Override public boolean isRunning() { return isRunning; } } }
class ConfigServerBootstrapTest { @Rule public TemporaryFolder temporaryFolder = new TemporaryFolder(); @Test public void testBootstrap() throws Exception { ConfigserverConfig configserverConfig = createConfigserverConfig(temporaryFolder); InMemoryProvisioner provisioner = new InMemoryProvisioner(true, "host0", "host1", "host3"); DeployTester tester = new DeployTester(configserverConfig, provisioner); tester.deployApp("src/test/apps/hosted/"); File versionFile = temporaryFolder.newFile(); VersionState versionState = new VersionState(versionFile); assertTrue(versionState.isUpgraded()); RpcServer rpcServer = createRpcServer(configserverConfig); provisioner.allocations().values().iterator().next().remove(0); VipStatus vipStatus = createVipStatus(VIP_STATUS_PROGRAMMATICALLY); ConfigServerBootstrap bootstrap = new ConfigServerBootstrap(tester.applicationRepository(), rpcServer, versionState, createStateMonitor(), vipStatus, INITIALIZE_ONLY, VIP_STATUS_PROGRAMMATICALLY); assertFalse(vipStatus.isInRotation()); bootstrap.start(); waitUntil(rpcServer::isRunning, "failed waiting for Rpc server running"); waitUntil(() -> bootstrap.status() == StateMonitor.Status.up, "failed waiting for status 'up'"); waitUntil(vipStatus::isInRotation, "failed waiting for server to be in rotation"); bootstrap.deconstruct(); assertEquals(StateMonitor.Status.down, bootstrap.status()); assertFalse(rpcServer.isRunning()); assertFalse(vipStatus.isInRotation()); } @Test @Test public void testBootstrapWhenRedeploymentFails() throws Exception { ConfigserverConfig configserverConfig = createConfigserverConfig(temporaryFolder); DeployTester tester = new DeployTester(configserverConfig); tester.deployApp("src/test/apps/hosted/"); File versionFile = temporaryFolder.newFile(); VersionState versionState = new VersionState(versionFile); assertTrue(versionState.isUpgraded()); java.nio.file.Files.delete(Paths.get(configserverConfig.configServerDBDir()) .resolve("tenants/") .resolve(tester.tenant().getName().value()) .resolve("sessions/2/services.xml")); RpcServer rpcServer = createRpcServer(configserverConfig); VipStatus vipStatus = createVipStatus(VIP_STATUS_PROGRAMMATICALLY); ConfigServerBootstrap bootstrap = new ConfigServerBootstrap(tester.applicationRepository(), rpcServer, versionState, createStateMonitor(), vipStatus, INITIALIZE_ONLY, VIP_STATUS_PROGRAMMATICALLY); assertFalse(vipStatus.isInRotation()); bootstrap.start(); assertEquals(StateMonitor.Status.initializing, bootstrap.status()); assertFalse(rpcServer.isRunning()); assertFalse(vipStatus.isInRotation()); bootstrap.deconstruct(); } @Test public void testBootstrapNonHostedOneConfigModel() throws Exception { ConfigserverConfig configserverConfig = createConfigserverConfigNonHosted(temporaryFolder); String vespaVersion = "1.2.3"; List<ModelFactory> modelFactories = Collections.singletonList(DeployTester.createModelFactory(Version.fromString(vespaVersion))); List<Host> hosts = createHosts(vespaVersion); InMemoryProvisioner provisioner = new InMemoryProvisioner(new Hosts(hosts), true); Curator curator = new MockCurator(); DeployTester tester = new DeployTester(modelFactories, configserverConfig, Clock.systemUTC(), new Zone(Environment.dev, RegionName.defaultName()), provisioner, curator); tester.deployApp("src/test/apps/app/", vespaVersion, Instant.now()); ApplicationId applicationId = tester.applicationId(); File versionFile = temporaryFolder.newFile(); VersionState versionState = new VersionState(versionFile); assertTrue(versionState.isUpgraded()); curator.set(Path.fromString("/config/v2/tenants/" + applicationId.tenant().value() + "/sessions/2/version"), Utf8.toBytes("1.2.2")); RpcServer rpcServer = createRpcServer(configserverConfig); VipStatus vipStatus = createVipStatus(VIP_STATUS_PROGRAMMATICALLY); ConfigServerBootstrap bootstrap = new ConfigServerBootstrap(tester.applicationRepository(), rpcServer, versionState, createStateMonitor(), vipStatus, BOOTSTRAP_IN_SEPARATE_THREAD, VIP_STATUS_PROGRAMMATICALLY); waitUntil(rpcServer::isRunning, "failed waiting for Rpc server running"); waitUntil(() -> bootstrap.status() == StateMonitor.Status.up, "failed waiting for status 'up'"); waitUntil(vipStatus::isInRotation, "failed waiting for server to be in rotation"); } private void waitUntil(BooleanSupplier booleanSupplier, String messageIfWaitingFails) throws InterruptedException { Duration timeout = Duration.ofSeconds(60); Instant endTime = Instant.now().plus(timeout); while (Instant.now().isBefore(endTime)) { if (booleanSupplier.getAsBoolean()) return; Thread.sleep(10); } throw new RuntimeException(messageIfWaitingFails); } private MockRpc createRpcServer(ConfigserverConfig configserverConfig) throws IOException { return new MockRpc(configserverConfig.rpcport(), temporaryFolder.newFolder()); } private StateMonitor createStateMonitor() { return new StateMonitor(new HealthMonitorConfig(new HealthMonitorConfig.Builder().initialStatus("initializing")), new SystemTimer()); } private static ConfigserverConfig createConfigserverConfig(TemporaryFolder temporaryFolder) throws IOException { return createConfigserverConfig(temporaryFolder, true); } private static ConfigserverConfig createConfigserverConfigNonHosted(TemporaryFolder temporaryFolder) throws IOException { return createConfigserverConfig(temporaryFolder, false); } private static ConfigserverConfig createConfigserverConfig(TemporaryFolder temporaryFolder, boolean hosted) throws IOException { return new ConfigserverConfig(new ConfigserverConfig.Builder() .configServerDBDir(temporaryFolder.newFolder("serverdb").getAbsolutePath()) .configDefinitionsDir(temporaryFolder.newFolder("configdefinitions").getAbsolutePath()) .hostedVespa(hosted) .multitenant(hosted) .maxDurationOfBootstrap(2) /* seconds */ .sleepTimeWhenRedeployingFails(0)); /* seconds */ } private List<Host> createHosts(String vespaVersion) { return Arrays.asList(createHost("host1", vespaVersion), createHost("host2", vespaVersion), createHost("host3", vespaVersion)); } private Host createHost(String hostname, String version) { return new Host(hostname, Collections.emptyList(), Optional.empty(), Optional.of(com.yahoo.component.Version.fromString(version))); } private VipStatus createVipStatus(ConfigServerBootstrap.VipStatusMode vipStatusMode) throws IOException { return new VipStatus(new QrSearchersConfig.Builder().build(), new VipStatusConfig.Builder() .initiallyInRotation(vipStatusMode == VIP_STATUS_FILE) .statusfile(temporaryFolder.newFile().getAbsolutePath()) .accessdisk(vipStatusMode == VIP_STATUS_FILE) .build(), new ClustersStatus()); } public static class MockRpc extends com.yahoo.vespa.config.server.rpc.MockRpc { volatile boolean isRunning = false; MockRpc(int port, File tempDir) { super(port, tempDir); } @Override public void run() { isRunning = true; } @Override public void stop() { isRunning = false; } @Override public boolean isRunning() { return isRunning; } } }
Got it, 👍
public void testBootstrapWithVipStatusFile() throws Exception { ConfigserverConfig configserverConfig = createConfigserverConfig(temporaryFolder); InMemoryProvisioner provisioner = new InMemoryProvisioner(true, "host0", "host1", "host3"); DeployTester tester = new DeployTester(configserverConfig, provisioner); tester.deployApp("src/test/apps/hosted/"); File versionFile = temporaryFolder.newFile(); VersionState versionState = new VersionState(versionFile); assertTrue(versionState.isUpgraded()); RpcServer rpcServer = createRpcServer(configserverConfig); VipStatus vipStatus = createVipStatus(VIP_STATUS_FILE); ConfigServerBootstrap bootstrap = new ConfigServerBootstrap(tester.applicationRepository(), rpcServer, versionState, createStateMonitor(), vipStatus, INITIALIZE_ONLY, VIP_STATUS_FILE); assertTrue(vipStatus.isInRotation()); bootstrap.start(); waitUntil(rpcServer::isRunning, "failed waiting for Rpc server running"); waitUntil(() -> bootstrap.status() == StateMonitor.Status.up, "failed waiting for status 'up'"); waitUntil(vipStatus::isInRotation, "failed waiting for server to be in rotation"); bootstrap.deconstruct(); }
assertTrue(vipStatus.isInRotation());
public void testBootstrapWithVipStatusFile() throws Exception { ConfigserverConfig configserverConfig = createConfigserverConfig(temporaryFolder); InMemoryProvisioner provisioner = new InMemoryProvisioner(true, "host0", "host1", "host3"); DeployTester tester = new DeployTester(configserverConfig, provisioner); tester.deployApp("src/test/apps/hosted/"); File versionFile = temporaryFolder.newFile(); VersionState versionState = new VersionState(versionFile); assertTrue(versionState.isUpgraded()); RpcServer rpcServer = createRpcServer(configserverConfig); VipStatus vipStatus = createVipStatus(VIP_STATUS_FILE); ConfigServerBootstrap bootstrap = new ConfigServerBootstrap(tester.applicationRepository(), rpcServer, versionState, createStateMonitor(), vipStatus, INITIALIZE_ONLY, VIP_STATUS_FILE); assertTrue(vipStatus.isInRotation()); bootstrap.start(); waitUntil(rpcServer::isRunning, "failed waiting for Rpc server running"); waitUntil(() -> bootstrap.status() == StateMonitor.Status.up, "failed waiting for status 'up'"); waitUntil(vipStatus::isInRotation, "failed waiting for server to be in rotation"); bootstrap.deconstruct(); }
class ConfigServerBootstrapTest { @Rule public TemporaryFolder temporaryFolder = new TemporaryFolder(); @Test public void testBootstrap() throws Exception { ConfigserverConfig configserverConfig = createConfigserverConfig(temporaryFolder); InMemoryProvisioner provisioner = new InMemoryProvisioner(true, "host0", "host1", "host3"); DeployTester tester = new DeployTester(configserverConfig, provisioner); tester.deployApp("src/test/apps/hosted/"); File versionFile = temporaryFolder.newFile(); VersionState versionState = new VersionState(versionFile); assertTrue(versionState.isUpgraded()); RpcServer rpcServer = createRpcServer(configserverConfig); provisioner.allocations().values().iterator().next().remove(0); VipStatus vipStatus = createVipStatus(VIP_STATUS_PROGRAMMATICALLY); ConfigServerBootstrap bootstrap = new ConfigServerBootstrap(tester.applicationRepository(), rpcServer, versionState, createStateMonitor(), vipStatus, INITIALIZE_ONLY, VIP_STATUS_PROGRAMMATICALLY); assertFalse(vipStatus.isInRotation()); bootstrap.start(); waitUntil(rpcServer::isRunning, "failed waiting for Rpc server running"); waitUntil(() -> bootstrap.status() == StateMonitor.Status.up, "failed waiting for status 'up'"); waitUntil(vipStatus::isInRotation, "failed waiting for server to be in rotation"); bootstrap.deconstruct(); assertEquals(StateMonitor.Status.down, bootstrap.status()); assertFalse(rpcServer.isRunning()); assertFalse(vipStatus.isInRotation()); } @Test @Test public void testBootstrapWhenRedeploymentFails() throws Exception { ConfigserverConfig configserverConfig = createConfigserverConfig(temporaryFolder); DeployTester tester = new DeployTester(configserverConfig); tester.deployApp("src/test/apps/hosted/"); File versionFile = temporaryFolder.newFile(); VersionState versionState = new VersionState(versionFile); assertTrue(versionState.isUpgraded()); java.nio.file.Files.delete(Paths.get(configserverConfig.configServerDBDir()) .resolve("tenants/") .resolve(tester.tenant().getName().value()) .resolve("sessions/2/services.xml")); RpcServer rpcServer = createRpcServer(configserverConfig); VipStatus vipStatus = createVipStatus(VIP_STATUS_PROGRAMMATICALLY); ConfigServerBootstrap bootstrap = new ConfigServerBootstrap(tester.applicationRepository(), rpcServer, versionState, createStateMonitor(), vipStatus, INITIALIZE_ONLY, VIP_STATUS_PROGRAMMATICALLY); assertFalse(vipStatus.isInRotation()); bootstrap.start(); assertEquals(StateMonitor.Status.initializing, bootstrap.status()); assertFalse(rpcServer.isRunning()); assertFalse(vipStatus.isInRotation()); bootstrap.deconstruct(); } @Test public void testBootstrapNonHostedOneConfigModel() throws Exception { ConfigserverConfig configserverConfig = createConfigserverConfigNonHosted(temporaryFolder); String vespaVersion = "1.2.3"; List<ModelFactory> modelFactories = Collections.singletonList(DeployTester.createModelFactory(Version.fromString(vespaVersion))); List<Host> hosts = createHosts(vespaVersion); InMemoryProvisioner provisioner = new InMemoryProvisioner(new Hosts(hosts), true); Curator curator = new MockCurator(); DeployTester tester = new DeployTester(modelFactories, configserverConfig, Clock.systemUTC(), new Zone(Environment.dev, RegionName.defaultName()), provisioner, curator); tester.deployApp("src/test/apps/app/", vespaVersion, Instant.now()); ApplicationId applicationId = tester.applicationId(); File versionFile = temporaryFolder.newFile(); VersionState versionState = new VersionState(versionFile); assertTrue(versionState.isUpgraded()); curator.set(Path.fromString("/config/v2/tenants/" + applicationId.tenant().value() + "/sessions/2/version"), Utf8.toBytes("1.2.2")); RpcServer rpcServer = createRpcServer(configserverConfig); VipStatus vipStatus = createVipStatus(VIP_STATUS_PROGRAMMATICALLY); ConfigServerBootstrap bootstrap = new ConfigServerBootstrap(tester.applicationRepository(), rpcServer, versionState, createStateMonitor(), vipStatus, BOOTSTRAP_IN_SEPARATE_THREAD, VIP_STATUS_PROGRAMMATICALLY); waitUntil(rpcServer::isRunning, "failed waiting for Rpc server running"); waitUntil(() -> bootstrap.status() == StateMonitor.Status.up, "failed waiting for status 'up'"); waitUntil(vipStatus::isInRotation, "failed waiting for server to be in rotation"); } private void waitUntil(BooleanSupplier booleanSupplier, String messageIfWaitingFails) throws InterruptedException { Duration timeout = Duration.ofSeconds(60); Instant endTime = Instant.now().plus(timeout); while (Instant.now().isBefore(endTime)) { if (booleanSupplier.getAsBoolean()) return; Thread.sleep(10); } throw new RuntimeException(messageIfWaitingFails); } private MockRpc createRpcServer(ConfigserverConfig configserverConfig) throws IOException { return new MockRpc(configserverConfig.rpcport(), temporaryFolder.newFolder()); } private StateMonitor createStateMonitor() { return new StateMonitor(new HealthMonitorConfig(new HealthMonitorConfig.Builder().initialStatus("initializing")), new SystemTimer()); } private static ConfigserverConfig createConfigserverConfig(TemporaryFolder temporaryFolder) throws IOException { return createConfigserverConfig(temporaryFolder, true); } private static ConfigserverConfig createConfigserverConfigNonHosted(TemporaryFolder temporaryFolder) throws IOException { return createConfigserverConfig(temporaryFolder, false); } private static ConfigserverConfig createConfigserverConfig(TemporaryFolder temporaryFolder, boolean hosted) throws IOException { return new ConfigserverConfig(new ConfigserverConfig.Builder() .configServerDBDir(temporaryFolder.newFolder("serverdb").getAbsolutePath()) .configDefinitionsDir(temporaryFolder.newFolder("configdefinitions").getAbsolutePath()) .hostedVespa(hosted) .multitenant(hosted) .maxDurationOfBootstrap(2) /* seconds */ .sleepTimeWhenRedeployingFails(0)); /* seconds */ } private List<Host> createHosts(String vespaVersion) { return Arrays.asList(createHost("host1", vespaVersion), createHost("host2", vespaVersion), createHost("host3", vespaVersion)); } private Host createHost(String hostname, String version) { return new Host(hostname, Collections.emptyList(), Optional.empty(), Optional.of(com.yahoo.component.Version.fromString(version))); } private VipStatus createVipStatus(ConfigServerBootstrap.VipStatusMode vipStatusMode) throws IOException { return new VipStatus(new QrSearchersConfig.Builder().build(), new VipStatusConfig.Builder() .initiallyInRotation(vipStatusMode == VIP_STATUS_FILE) .statusfile(temporaryFolder.newFile().getAbsolutePath()) .accessdisk(vipStatusMode == VIP_STATUS_FILE) .build(), new ClustersStatus()); } public static class MockRpc extends com.yahoo.vespa.config.server.rpc.MockRpc { volatile boolean isRunning = false; MockRpc(int port, File tempDir) { super(port, tempDir); } @Override public void run() { isRunning = true; } @Override public void stop() { isRunning = false; } @Override public boolean isRunning() { return isRunning; } } }
class ConfigServerBootstrapTest { @Rule public TemporaryFolder temporaryFolder = new TemporaryFolder(); @Test public void testBootstrap() throws Exception { ConfigserverConfig configserverConfig = createConfigserverConfig(temporaryFolder); InMemoryProvisioner provisioner = new InMemoryProvisioner(true, "host0", "host1", "host3"); DeployTester tester = new DeployTester(configserverConfig, provisioner); tester.deployApp("src/test/apps/hosted/"); File versionFile = temporaryFolder.newFile(); VersionState versionState = new VersionState(versionFile); assertTrue(versionState.isUpgraded()); RpcServer rpcServer = createRpcServer(configserverConfig); provisioner.allocations().values().iterator().next().remove(0); VipStatus vipStatus = createVipStatus(VIP_STATUS_PROGRAMMATICALLY); ConfigServerBootstrap bootstrap = new ConfigServerBootstrap(tester.applicationRepository(), rpcServer, versionState, createStateMonitor(), vipStatus, INITIALIZE_ONLY, VIP_STATUS_PROGRAMMATICALLY); assertFalse(vipStatus.isInRotation()); bootstrap.start(); waitUntil(rpcServer::isRunning, "failed waiting for Rpc server running"); waitUntil(() -> bootstrap.status() == StateMonitor.Status.up, "failed waiting for status 'up'"); waitUntil(vipStatus::isInRotation, "failed waiting for server to be in rotation"); bootstrap.deconstruct(); assertEquals(StateMonitor.Status.down, bootstrap.status()); assertFalse(rpcServer.isRunning()); assertFalse(vipStatus.isInRotation()); } @Test @Test public void testBootstrapWhenRedeploymentFails() throws Exception { ConfigserverConfig configserverConfig = createConfigserverConfig(temporaryFolder); DeployTester tester = new DeployTester(configserverConfig); tester.deployApp("src/test/apps/hosted/"); File versionFile = temporaryFolder.newFile(); VersionState versionState = new VersionState(versionFile); assertTrue(versionState.isUpgraded()); java.nio.file.Files.delete(Paths.get(configserverConfig.configServerDBDir()) .resolve("tenants/") .resolve(tester.tenant().getName().value()) .resolve("sessions/2/services.xml")); RpcServer rpcServer = createRpcServer(configserverConfig); VipStatus vipStatus = createVipStatus(VIP_STATUS_PROGRAMMATICALLY); ConfigServerBootstrap bootstrap = new ConfigServerBootstrap(tester.applicationRepository(), rpcServer, versionState, createStateMonitor(), vipStatus, INITIALIZE_ONLY, VIP_STATUS_PROGRAMMATICALLY); assertFalse(vipStatus.isInRotation()); bootstrap.start(); assertEquals(StateMonitor.Status.initializing, bootstrap.status()); assertFalse(rpcServer.isRunning()); assertFalse(vipStatus.isInRotation()); bootstrap.deconstruct(); } @Test public void testBootstrapNonHostedOneConfigModel() throws Exception { ConfigserverConfig configserverConfig = createConfigserverConfigNonHosted(temporaryFolder); String vespaVersion = "1.2.3"; List<ModelFactory> modelFactories = Collections.singletonList(DeployTester.createModelFactory(Version.fromString(vespaVersion))); List<Host> hosts = createHosts(vespaVersion); InMemoryProvisioner provisioner = new InMemoryProvisioner(new Hosts(hosts), true); Curator curator = new MockCurator(); DeployTester tester = new DeployTester(modelFactories, configserverConfig, Clock.systemUTC(), new Zone(Environment.dev, RegionName.defaultName()), provisioner, curator); tester.deployApp("src/test/apps/app/", vespaVersion, Instant.now()); ApplicationId applicationId = tester.applicationId(); File versionFile = temporaryFolder.newFile(); VersionState versionState = new VersionState(versionFile); assertTrue(versionState.isUpgraded()); curator.set(Path.fromString("/config/v2/tenants/" + applicationId.tenant().value() + "/sessions/2/version"), Utf8.toBytes("1.2.2")); RpcServer rpcServer = createRpcServer(configserverConfig); VipStatus vipStatus = createVipStatus(VIP_STATUS_PROGRAMMATICALLY); ConfigServerBootstrap bootstrap = new ConfigServerBootstrap(tester.applicationRepository(), rpcServer, versionState, createStateMonitor(), vipStatus, BOOTSTRAP_IN_SEPARATE_THREAD, VIP_STATUS_PROGRAMMATICALLY); waitUntil(rpcServer::isRunning, "failed waiting for Rpc server running"); waitUntil(() -> bootstrap.status() == StateMonitor.Status.up, "failed waiting for status 'up'"); waitUntil(vipStatus::isInRotation, "failed waiting for server to be in rotation"); } private void waitUntil(BooleanSupplier booleanSupplier, String messageIfWaitingFails) throws InterruptedException { Duration timeout = Duration.ofSeconds(60); Instant endTime = Instant.now().plus(timeout); while (Instant.now().isBefore(endTime)) { if (booleanSupplier.getAsBoolean()) return; Thread.sleep(10); } throw new RuntimeException(messageIfWaitingFails); } private MockRpc createRpcServer(ConfigserverConfig configserverConfig) throws IOException { return new MockRpc(configserverConfig.rpcport(), temporaryFolder.newFolder()); } private StateMonitor createStateMonitor() { return new StateMonitor(new HealthMonitorConfig(new HealthMonitorConfig.Builder().initialStatus("initializing")), new SystemTimer()); } private static ConfigserverConfig createConfigserverConfig(TemporaryFolder temporaryFolder) throws IOException { return createConfigserverConfig(temporaryFolder, true); } private static ConfigserverConfig createConfigserverConfigNonHosted(TemporaryFolder temporaryFolder) throws IOException { return createConfigserverConfig(temporaryFolder, false); } private static ConfigserverConfig createConfigserverConfig(TemporaryFolder temporaryFolder, boolean hosted) throws IOException { return new ConfigserverConfig(new ConfigserverConfig.Builder() .configServerDBDir(temporaryFolder.newFolder("serverdb").getAbsolutePath()) .configDefinitionsDir(temporaryFolder.newFolder("configdefinitions").getAbsolutePath()) .hostedVespa(hosted) .multitenant(hosted) .maxDurationOfBootstrap(2) /* seconds */ .sleepTimeWhenRedeployingFails(0)); /* seconds */ } private List<Host> createHosts(String vespaVersion) { return Arrays.asList(createHost("host1", vespaVersion), createHost("host2", vespaVersion), createHost("host3", vespaVersion)); } private Host createHost(String hostname, String version) { return new Host(hostname, Collections.emptyList(), Optional.empty(), Optional.of(com.yahoo.component.Version.fromString(version))); } private VipStatus createVipStatus(ConfigServerBootstrap.VipStatusMode vipStatusMode) throws IOException { return new VipStatus(new QrSearchersConfig.Builder().build(), new VipStatusConfig.Builder() .initiallyInRotation(vipStatusMode == VIP_STATUS_FILE) .statusfile(temporaryFolder.newFile().getAbsolutePath()) .accessdisk(vipStatusMode == VIP_STATUS_FILE) .build(), new ClustersStatus()); } public static class MockRpc extends com.yahoo.vespa.config.server.rpc.MockRpc { volatile boolean isRunning = false; MockRpc(int port, File tempDir) { super(port, tempDir); } @Override public void run() { isRunning = true; } @Override public void stop() { isRunning = false; } @Override public boolean isRunning() { return isRunning; } } }
Even though the operator precedence is well defined here, I think it could be good to have some parentheses to be entirely explicit about the intention
public double getRetryDelay(int retry) { long retryMultiplier = 0l; if (retry > 1) { retryMultiplier = Math.min(10000, 1L << (retry-1)); } return Math.min(10.0, retryMultiplier*baseDelayUS.get()/US); }
return Math.min(10.0, retryMultiplier*baseDelayUS.get()/US);
public double getRetryDelay(int retry) { long retryMultiplier = 0l; if (retry > 1) { retryMultiplier = 1L << Math.min(20, retry-1); } return Math.min(10.0, (retryMultiplier*baseDelayUS.get())/US); }
class RetryTransientErrorsPolicy implements RetryPolicy { private static final double US = 1000000; private final AtomicBoolean enabled = new AtomicBoolean(true); private volatile AtomicLong baseDelayUS = new AtomicLong(1000); /** * Sets whether or not this policy should allow retries or not. * * @param enabled True to allow retries. * @return This, to allow chaining. */ public RetryTransientErrorsPolicy setEnabled(boolean enabled) { this.enabled.set(enabled); return this; } /** * Sets the base delay in seconds to wait between retries. This amount is multiplied by the retry number. * * @param baseDelay The time in seconds. * @return This, to allow chaining. */ public RetryTransientErrorsPolicy setBaseDelay(double baseDelay) { this.baseDelayUS.set((long)(baseDelay*US)); return this; } @Override public boolean canRetry(int errorCode) { return enabled.get() && errorCode < ErrorCode.FATAL_ERROR; } @Override }
class RetryTransientErrorsPolicy implements RetryPolicy { private static final double US = 1000000; private final AtomicBoolean enabled = new AtomicBoolean(true); private volatile AtomicLong baseDelayUS = new AtomicLong(1000); /** * Sets whether or not this policy should allow retries or not. * * @param enabled True to allow retries. * @return This, to allow chaining. */ public RetryTransientErrorsPolicy setEnabled(boolean enabled) { this.enabled.set(enabled); return this; } /** * Sets the base delay in seconds to wait between retries. This amount is multiplied by the retry number. * * @param baseDelay The time in seconds. * @return This, to allow chaining. */ public RetryTransientErrorsPolicy setBaseDelay(double baseDelay) { this.baseDelayUS.set((long)(baseDelay*US)); return this; } @Override public boolean canRetry(int errorCode) { return enabled.get() && errorCode < ErrorCode.FATAL_ERROR; } @Override }
Left shift bit count operand may overflow here with a sufficient number of retries, rolling the multiplier over to the minimum. Consider using `min` on the number of bits shifted by as well.
public double getRetryDelay(int retry) { long retryMultiplier = 0l; if (retry > 1) { retryMultiplier = Math.min(10000, 1L << (retry-1)); } return Math.min(10.0, retryMultiplier*baseDelayUS.get()/US); }
retryMultiplier = Math.min(10000, 1L << (retry-1));
public double getRetryDelay(int retry) { long retryMultiplier = 0l; if (retry > 1) { retryMultiplier = 1L << Math.min(20, retry-1); } return Math.min(10.0, (retryMultiplier*baseDelayUS.get())/US); }
class RetryTransientErrorsPolicy implements RetryPolicy { private static final double US = 1000000; private final AtomicBoolean enabled = new AtomicBoolean(true); private volatile AtomicLong baseDelayUS = new AtomicLong(1000); /** * Sets whether or not this policy should allow retries or not. * * @param enabled True to allow retries. * @return This, to allow chaining. */ public RetryTransientErrorsPolicy setEnabled(boolean enabled) { this.enabled.set(enabled); return this; } /** * Sets the base delay in seconds to wait between retries. This amount is multiplied by the retry number. * * @param baseDelay The time in seconds. * @return This, to allow chaining. */ public RetryTransientErrorsPolicy setBaseDelay(double baseDelay) { this.baseDelayUS.set((long)(baseDelay*US)); return this; } @Override public boolean canRetry(int errorCode) { return enabled.get() && errorCode < ErrorCode.FATAL_ERROR; } @Override }
class RetryTransientErrorsPolicy implements RetryPolicy { private static final double US = 1000000; private final AtomicBoolean enabled = new AtomicBoolean(true); private volatile AtomicLong baseDelayUS = new AtomicLong(1000); /** * Sets whether or not this policy should allow retries or not. * * @param enabled True to allow retries. * @return This, to allow chaining. */ public RetryTransientErrorsPolicy setEnabled(boolean enabled) { this.enabled.set(enabled); return this; } /** * Sets the base delay in seconds to wait between retries. This amount is multiplied by the retry number. * * @param baseDelay The time in seconds. * @return This, to allow chaining. */ public RetryTransientErrorsPolicy setBaseDelay(double baseDelay) { this.baseDelayUS.set((long)(baseDelay*US)); return this; } @Override public boolean canRetry(int errorCode) { return enabled.get() && errorCode < ErrorCode.FATAL_ERROR; } @Override }
Fixed.
public double getRetryDelay(int retry) { long retryMultiplier = 0l; if (retry > 1) { retryMultiplier = Math.min(10000, 1L << (retry-1)); } return Math.min(10.0, retryMultiplier*baseDelayUS.get()/US); }
return Math.min(10.0, retryMultiplier*baseDelayUS.get()/US);
public double getRetryDelay(int retry) { long retryMultiplier = 0l; if (retry > 1) { retryMultiplier = 1L << Math.min(20, retry-1); } return Math.min(10.0, (retryMultiplier*baseDelayUS.get())/US); }
class RetryTransientErrorsPolicy implements RetryPolicy { private static final double US = 1000000; private final AtomicBoolean enabled = new AtomicBoolean(true); private volatile AtomicLong baseDelayUS = new AtomicLong(1000); /** * Sets whether or not this policy should allow retries or not. * * @param enabled True to allow retries. * @return This, to allow chaining. */ public RetryTransientErrorsPolicy setEnabled(boolean enabled) { this.enabled.set(enabled); return this; } /** * Sets the base delay in seconds to wait between retries. This amount is multiplied by the retry number. * * @param baseDelay The time in seconds. * @return This, to allow chaining. */ public RetryTransientErrorsPolicy setBaseDelay(double baseDelay) { this.baseDelayUS.set((long)(baseDelay*US)); return this; } @Override public boolean canRetry(int errorCode) { return enabled.get() && errorCode < ErrorCode.FATAL_ERROR; } @Override }
class RetryTransientErrorsPolicy implements RetryPolicy { private static final double US = 1000000; private final AtomicBoolean enabled = new AtomicBoolean(true); private volatile AtomicLong baseDelayUS = new AtomicLong(1000); /** * Sets whether or not this policy should allow retries or not. * * @param enabled True to allow retries. * @return This, to allow chaining. */ public RetryTransientErrorsPolicy setEnabled(boolean enabled) { this.enabled.set(enabled); return this; } /** * Sets the base delay in seconds to wait between retries. This amount is multiplied by the retry number. * * @param baseDelay The time in seconds. * @return This, to allow chaining. */ public RetryTransientErrorsPolicy setBaseDelay(double baseDelay) { this.baseDelayUS.set((long)(baseDelay*US)); return this; } @Override public boolean canRetry(int errorCode) { return enabled.get() && errorCode < ErrorCode.FATAL_ERROR; } @Override }
Fixed
public double getRetryDelay(int retry) { long retryMultiplier = 0l; if (retry > 1) { retryMultiplier = Math.min(10000, 1L << (retry-1)); } return Math.min(10.0, retryMultiplier*baseDelayUS.get()/US); }
retryMultiplier = Math.min(10000, 1L << (retry-1));
public double getRetryDelay(int retry) { long retryMultiplier = 0l; if (retry > 1) { retryMultiplier = 1L << Math.min(20, retry-1); } return Math.min(10.0, (retryMultiplier*baseDelayUS.get())/US); }
class RetryTransientErrorsPolicy implements RetryPolicy { private static final double US = 1000000; private final AtomicBoolean enabled = new AtomicBoolean(true); private volatile AtomicLong baseDelayUS = new AtomicLong(1000); /** * Sets whether or not this policy should allow retries or not. * * @param enabled True to allow retries. * @return This, to allow chaining. */ public RetryTransientErrorsPolicy setEnabled(boolean enabled) { this.enabled.set(enabled); return this; } /** * Sets the base delay in seconds to wait between retries. This amount is multiplied by the retry number. * * @param baseDelay The time in seconds. * @return This, to allow chaining. */ public RetryTransientErrorsPolicy setBaseDelay(double baseDelay) { this.baseDelayUS.set((long)(baseDelay*US)); return this; } @Override public boolean canRetry(int errorCode) { return enabled.get() && errorCode < ErrorCode.FATAL_ERROR; } @Override }
class RetryTransientErrorsPolicy implements RetryPolicy { private static final double US = 1000000; private final AtomicBoolean enabled = new AtomicBoolean(true); private volatile AtomicLong baseDelayUS = new AtomicLong(1000); /** * Sets whether or not this policy should allow retries or not. * * @param enabled True to allow retries. * @return This, to allow chaining. */ public RetryTransientErrorsPolicy setEnabled(boolean enabled) { this.enabled.set(enabled); return this; } /** * Sets the base delay in seconds to wait between retries. This amount is multiplied by the retry number. * * @param baseDelay The time in seconds. * @return This, to allow chaining. */ public RetryTransientErrorsPolicy setBaseDelay(double baseDelay) { this.baseDelayUS.set((long)(baseDelay*US)); return this; } @Override public boolean canRetry(int errorCode) { return enabled.get() && errorCode < ErrorCode.FATAL_ERROR; } @Override }
`s/Failed reload/Failed to reload/` or `s/Failed reload/Failed reload of/`
public void run() { try { reloadCryptoMaterial(TransportSecurityOptions.fromJsonFile(tlsOptionsConfigFile), trustManager, keyManager); } catch (Throwable t) { log.log(Level.SEVERE, String.format("Failed reload crypto material (path='%s'): %s", tlsOptionsConfigFile, t.getMessage()), t); } }
log.log(Level.SEVERE, String.format("Failed reload crypto material (path='%s'): %s", tlsOptionsConfigFile, t.getMessage()), t);
public void run() { try { reloadCryptoMaterial(TransportSecurityOptions.fromJsonFile(tlsOptionsConfigFile), trustManager, keyManager); } catch (Throwable t) { log.log(Level.SEVERE, String.format("Failed to reload crypto material (path='%s'): %s", tlsOptionsConfigFile, t.getMessage()), t); } }
class CryptoMaterialReloader implements Runnable { @Override }
class CryptoMaterialReloader implements Runnable { @Override }
These shouldn't be equal in any case, though ;)
public void createFromActiveSession() { PrepareResult result = deployApp(testApp); long sessionId = applicationRepository.createSessionFromExisting(applicationId(), new SilentDeployLogger(), false, timeoutBudget); long originalSessionId = result.sessionId(); ApplicationMetaData originalApplicationMetaData = getApplicationMetaData(applicationId(), originalSessionId); ApplicationMetaData applicationMetaData = getApplicationMetaData(applicationId(), sessionId); assertNotEquals(sessionId, originalSessionId); assertEquals(applicationMetaData.getApplicationName(), originalApplicationMetaData.getApplicationName()); assertEquals(applicationMetaData.getPreviousActiveGeneration(), originalApplicationMetaData.getGeneration().longValue()); assertNotEquals(applicationMetaData.getGeneration(), originalApplicationMetaData.getApplicationName()); assertEquals(applicationMetaData.getDeployedByUser(), originalApplicationMetaData.getDeployedByUser()); }
assertNotEquals(applicationMetaData.getGeneration(), originalApplicationMetaData.getApplicationName());
public void createFromActiveSession() { PrepareResult result = deployApp(testApp); long sessionId = applicationRepository.createSessionFromExisting(applicationId(), new SilentDeployLogger(), false, timeoutBudget); long originalSessionId = result.sessionId(); ApplicationMetaData originalApplicationMetaData = getApplicationMetaData(applicationId(), originalSessionId); ApplicationMetaData applicationMetaData = getApplicationMetaData(applicationId(), sessionId); assertNotEquals(sessionId, originalSessionId); assertEquals(applicationMetaData.getApplicationName(), originalApplicationMetaData.getApplicationName()); assertEquals(applicationMetaData.getPreviousActiveGeneration(), originalApplicationMetaData.getGeneration().longValue()); assertNotEquals(applicationMetaData.getGeneration(), originalApplicationMetaData.getGeneration()); assertEquals(applicationMetaData.getDeployedByUser(), originalApplicationMetaData.getDeployedByUser()); }
class ApplicationRepositoryTest { private final static File testApp = new File("src/test/apps/app"); private final static File testAppJdiscOnly = new File("src/test/apps/app-jdisc-only"); private final static File testAppJdiscOnlyRestart = new File("src/test/apps/app-jdisc-only-restart"); private final static File testAppLogServerWithContainer = new File("src/test/apps/app-logserver-with-container"); private final static TenantName tenant1 = TenantName.from("test1"); private final static TenantName tenant2 = TenantName.from("test2"); private final static TenantName tenant3 = TenantName.from("test3"); private final static Clock clock = Clock.systemUTC(); private ApplicationRepository applicationRepository; private TenantRepository tenantRepository; private SessionHandlerTest.MockProvisioner provisioner; private OrchestratorMock orchestrator; private TimeoutBudget timeoutBudget; @Rule public TemporaryFolder temporaryFolder = new TemporaryFolder(); @Before public void setup() { Curator curator = new MockCurator(); tenantRepository = new TenantRepository(new TestComponentRegistry.Builder() .curator(curator) .build()); tenantRepository.addTenant(tenant1); tenantRepository.addTenant(tenant2); tenantRepository.addTenant(tenant3); orchestrator = new OrchestratorMock(); provisioner = new SessionHandlerTest.MockProvisioner(); applicationRepository = new ApplicationRepository(tenantRepository, provisioner, orchestrator, clock); timeoutBudget = new TimeoutBudget(clock, Duration.ofSeconds(60)); } @Test public void prepareAndActivate() throws IOException { PrepareResult result = prepareAndActivateApp(testApp); assertTrue(result.configChangeActions().getRefeedActions().isEmpty()); assertTrue(result.configChangeActions().getRestartActions().isEmpty()); } @Test public void prepareAndActivateWithRestart() throws IOException { prepareAndActivateApp(testAppJdiscOnly); PrepareResult result = prepareAndActivateApp(testAppJdiscOnlyRestart); assertTrue(result.configChangeActions().getRefeedActions().isEmpty()); assertFalse(result.configChangeActions().getRestartActions().isEmpty()); } @Test public void createAndPrepareAndActivate() { PrepareResult result = deployApp(testApp); assertTrue(result.configChangeActions().getRefeedActions().isEmpty()); assertTrue(result.configChangeActions().getRestartActions().isEmpty()); } @Test @Test public void testSuspension() { deployApp(testApp); assertFalse(applicationRepository.isSuspended(applicationId())); orchestrator.suspend(applicationId()); assertTrue(applicationRepository.isSuspended(applicationId())); } @Test public void getLogs() { WireMockServer wireMock = new WireMockServer(wireMockConfig().port(8080)); wireMock.start(); WireMock.configureFor("localhost", wireMock.port()); stubFor(get(urlEqualTo("/logs")) .willReturn(aResponse() .withStatus(200))); wireMock.start(); deployApp(testAppLogServerWithContainer); HttpResponse response = applicationRepository.getLogs(applicationId(), ""); assertEquals(200, response.getStatus()); wireMock.stop(); } @Test public void deleteUnusedTenants() { Instant now = ManualClock.at("1970-01-01T01:00:00"); deployApp(testApp); deployApp(testApp, new PrepareParams.Builder().applicationId(applicationId(tenant2)).build()); Duration ttlForUnusedTenant = Duration.ofHours(1); assertTrue(applicationRepository.deleteUnusedTenants(ttlForUnusedTenant, now).isEmpty()); ttlForUnusedTenant = Duration.ofMillis(1); assertEquals(tenant3, applicationRepository.deleteUnusedTenants(ttlForUnusedTenant, now).iterator().next()); applicationRepository.delete(applicationId()); Set<TenantName> tenantsDeleted = applicationRepository.deleteUnusedTenants(Duration.ofMillis(1), now); assertTrue(tenantsDeleted.contains(tenant1)); assertFalse(tenantsDeleted.contains(tenant2)); } @Test public void decideVersion() { ApplicationId regularApp = ApplicationId.from("tenant1", "application1", "default"); ApplicationId systemApp = ApplicationId.from("hosted-vespa", "routing", "default"); ApplicationId testerApp = ApplicationId.from("tenant1", "application1", "default-t"); Version sessionVersion = new Version(Vtag.currentVersion.getMajor(), 0); Version oldSessionVersion = Version.fromString("5.0"); assertEquals(sessionVersion, ApplicationRepository.decideVersion(systemApp, Environment.prod, sessionVersion, false)); assertEquals(sessionVersion, ApplicationRepository.decideVersion(systemApp, Environment.dev, sessionVersion, false)); assertEquals(sessionVersion, ApplicationRepository.decideVersion(systemApp, Environment.perf, sessionVersion, false)); assertEquals(sessionVersion, ApplicationRepository.decideVersion(testerApp, Environment.prod, sessionVersion, false)); assertEquals(sessionVersion, ApplicationRepository.decideVersion(testerApp, Environment.dev, sessionVersion, false)); assertEquals(sessionVersion, ApplicationRepository.decideVersion(testerApp, Environment.perf, sessionVersion, false)); assertEquals(sessionVersion, ApplicationRepository.decideVersion(regularApp, Environment.prod, sessionVersion, false)); assertEquals(Vtag.currentVersion, ApplicationRepository.decideVersion(regularApp, Environment.dev, sessionVersion, false)); assertEquals(oldSessionVersion, ApplicationRepository.decideVersion(regularApp, Environment.dev, oldSessionVersion, false)); assertEquals(sessionVersion, ApplicationRepository.decideVersion(regularApp, Environment.dev, sessionVersion, true)); assertEquals(Vtag.currentVersion, ApplicationRepository.decideVersion(regularApp, Environment.perf, sessionVersion, false)); } @Test public void deleteUnusedFileReferences() throws IOException { File fileReferencesDir = temporaryFolder.newFolder(); File filereferenceDir = createFilereferenceOnDisk(new File(fileReferencesDir, "foo"), Instant.now().minus(Duration.ofDays(15))); File filereferenceDir2 = createFilereferenceOnDisk(new File(fileReferencesDir, "baz"), Instant.now()); tenantRepository.addTenant(tenant1); Provisioner provisioner = new SessionHandlerTest.MockProvisioner(); applicationRepository = new ApplicationRepository(tenantRepository, provisioner, orchestrator, clock); timeoutBudget = new TimeoutBudget(clock, Duration.ofSeconds(60)); PrepareParams prepareParams = new PrepareParams.Builder().applicationId(applicationId()).ignoreValidationErrors(true).build(); deployApp(new File("src/test/apps/app"), prepareParams); Set<String> toBeDeleted = applicationRepository.deleteUnusedFiledistributionReferences(fileReferencesDir); assertEquals(Collections.singleton("foo"), toBeDeleted); assertFalse(filereferenceDir.exists()); assertTrue(filereferenceDir2.exists()); } private File createFilereferenceOnDisk(File filereferenceDir, Instant lastModifiedTime) { assertTrue(filereferenceDir.mkdir()); File bar = new File(filereferenceDir, "file"); IOUtils.writeFile(bar, Utf8.toBytes("test")); assertTrue(filereferenceDir.setLastModified(lastModifiedTime.toEpochMilli())); return filereferenceDir; } @Test public void delete() { { PrepareResult result = deployApp(testApp); long sessionId = result.sessionId(); Tenant tenant = tenantRepository.getTenant(applicationId().tenant()); LocalSession applicationData = tenant.getLocalSessionRepo().getSession(sessionId); assertNotNull(applicationData); assertNotNull(applicationData.getApplicationId()); assertNotNull(tenant.getRemoteSessionRepo().getSession(sessionId)); assertNotNull(applicationRepository.getActiveSession(applicationId())); assertTrue(applicationRepository.delete(applicationId())); assertNull(applicationRepository.getActiveSession(applicationId())); assertNull(tenant.getLocalSessionRepo().getSession(sessionId)); assertNull(tenant.getRemoteSessionRepo().getSession(sessionId)); assertTrue(provisioner.removed); assertThat(provisioner.lastApplicationId.tenant(), is(tenant.getName())); assertThat(provisioner.lastApplicationId, is(applicationId())); assertFalse(applicationRepository.delete(applicationId())); } { deployApp(testApp); assertTrue(applicationRepository.delete(applicationId())); deployApp(testApp); ApplicationId fooId = applicationId(tenant2); PrepareParams prepareParams2 = new PrepareParams.Builder().applicationId(fooId).build(); deployApp(testApp, prepareParams2); assertNotNull(applicationRepository.getActiveSession(fooId)); assertTrue(applicationRepository.delete(fooId)); assertThat(provisioner.lastApplicationId, is(fooId)); assertNotNull(applicationRepository.getActiveSession(applicationId())); assertTrue(applicationRepository.delete(applicationId())); } } @Test public void testDeletingInactiveSessions() { ManualClock clock = new ManualClock(Instant.now()); ConfigserverConfig configserverConfig = new ConfigserverConfig(new ConfigserverConfig.Builder() .configServerDBDir(Files.createTempDir().getAbsolutePath()) .configDefinitionsDir(Files.createTempDir().getAbsolutePath()) .sessionLifetime(60)); DeployTester tester = new DeployTester(configserverConfig, clock); tester.deployApp("src/test/apps/app", clock.instant()); clock.advance(Duration.ofSeconds(10)); Optional<Deployment> deployment2 = tester.redeployFromLocalActive(); assertTrue(deployment2.isPresent()); deployment2.get().activate(); long activeSessionId = tester.tenant().getApplicationRepo().getSessionIdForApplication(tester.applicationId()); clock.advance(Duration.ofSeconds(10)); Optional<com.yahoo.config.provision.Deployment> deployment3 = tester.redeployFromLocalActive(); assertTrue(deployment3.isPresent()); deployment3.get().prepare(); LocalSession deployment3session = ((com.yahoo.vespa.config.server.deploy.Deployment) deployment3.get()).session(); assertNotEquals(activeSessionId, deployment3session); assertEquals(activeSessionId, tester.tenant().getApplicationRepo().getSessionIdForApplication(tester.applicationId())); assertEquals(3, tester.tenant().getLocalSessionRepo().listSessions().size()); clock.advance(Duration.ofHours(1)); tester.applicationRepository().deleteExpiredLocalSessions(); final Collection<LocalSession> sessions = tester.tenant().getLocalSessionRepo().listSessions(); assertEquals(1, sessions.size()); assertEquals(3, new ArrayList<>(sessions).get(0).getSessionId()); assertEquals(0, applicationRepository.deleteExpiredRemoteSessions(Duration.ofSeconds(0))); } private PrepareResult prepareAndActivateApp(File application) throws IOException { FilesApplicationPackage appDir = FilesApplicationPackage.fromFile(application); ApplicationId applicationId = applicationId(); long sessionId = applicationRepository.createSession(applicationId, timeoutBudget, appDir.getAppDir()); return applicationRepository.prepareAndActivate(tenantRepository.getTenant(applicationId.tenant()), sessionId, prepareParams(), false, Instant.now()); } private PrepareResult deployApp(File applicationPackage) { return deployApp(applicationPackage, prepareParams()); } private PrepareResult deployApp(File applicationPackage, PrepareParams prepareParams) { return applicationRepository.deploy(applicationPackage, prepareParams); } private PrepareParams prepareParams() { return new PrepareParams.Builder().applicationId(applicationId()).build(); } private ApplicationId applicationId() { return ApplicationId.from(tenant1, ApplicationName.from("testapp"), InstanceName.defaultName()); } private ApplicationId applicationId(TenantName tenantName) { return ApplicationId.from(tenantName, ApplicationName.from("testapp"), InstanceName.defaultName()); } private ApplicationMetaData getApplicationMetaData(ApplicationId applicationId, long sessionId) { Tenant tenant = tenantRepository.getTenant(applicationId.tenant()); return applicationRepository.getMetadataFromSession(tenant, sessionId); } }
class ApplicationRepositoryTest { private final static File testApp = new File("src/test/apps/app"); private final static File testAppJdiscOnly = new File("src/test/apps/app-jdisc-only"); private final static File testAppJdiscOnlyRestart = new File("src/test/apps/app-jdisc-only-restart"); private final static File testAppLogServerWithContainer = new File("src/test/apps/app-logserver-with-container"); private final static TenantName tenant1 = TenantName.from("test1"); private final static TenantName tenant2 = TenantName.from("test2"); private final static TenantName tenant3 = TenantName.from("test3"); private final static Clock clock = Clock.systemUTC(); private ApplicationRepository applicationRepository; private TenantRepository tenantRepository; private SessionHandlerTest.MockProvisioner provisioner; private OrchestratorMock orchestrator; private TimeoutBudget timeoutBudget; @Rule public TemporaryFolder temporaryFolder = new TemporaryFolder(); @Before public void setup() { Curator curator = new MockCurator(); tenantRepository = new TenantRepository(new TestComponentRegistry.Builder() .curator(curator) .build()); tenantRepository.addTenant(tenant1); tenantRepository.addTenant(tenant2); tenantRepository.addTenant(tenant3); orchestrator = new OrchestratorMock(); provisioner = new SessionHandlerTest.MockProvisioner(); applicationRepository = new ApplicationRepository(tenantRepository, provisioner, orchestrator, clock); timeoutBudget = new TimeoutBudget(clock, Duration.ofSeconds(60)); } @Test public void prepareAndActivate() throws IOException { PrepareResult result = prepareAndActivateApp(testApp); assertTrue(result.configChangeActions().getRefeedActions().isEmpty()); assertTrue(result.configChangeActions().getRestartActions().isEmpty()); } @Test public void prepareAndActivateWithRestart() throws IOException { prepareAndActivateApp(testAppJdiscOnly); PrepareResult result = prepareAndActivateApp(testAppJdiscOnlyRestart); assertTrue(result.configChangeActions().getRefeedActions().isEmpty()); assertFalse(result.configChangeActions().getRestartActions().isEmpty()); } @Test public void createAndPrepareAndActivate() { PrepareResult result = deployApp(testApp); assertTrue(result.configChangeActions().getRefeedActions().isEmpty()); assertTrue(result.configChangeActions().getRestartActions().isEmpty()); } @Test @Test public void testSuspension() { deployApp(testApp); assertFalse(applicationRepository.isSuspended(applicationId())); orchestrator.suspend(applicationId()); assertTrue(applicationRepository.isSuspended(applicationId())); } @Test public void getLogs() { WireMockServer wireMock = new WireMockServer(wireMockConfig().port(8080)); wireMock.start(); WireMock.configureFor("localhost", wireMock.port()); stubFor(get(urlEqualTo("/logs")) .willReturn(aResponse() .withStatus(200))); wireMock.start(); deployApp(testAppLogServerWithContainer); HttpResponse response = applicationRepository.getLogs(applicationId(), ""); assertEquals(200, response.getStatus()); wireMock.stop(); } @Test public void deleteUnusedTenants() { Instant now = ManualClock.at("1970-01-01T01:00:00"); deployApp(testApp); deployApp(testApp, new PrepareParams.Builder().applicationId(applicationId(tenant2)).build()); Duration ttlForUnusedTenant = Duration.ofHours(1); assertTrue(applicationRepository.deleteUnusedTenants(ttlForUnusedTenant, now).isEmpty()); ttlForUnusedTenant = Duration.ofMillis(1); assertEquals(tenant3, applicationRepository.deleteUnusedTenants(ttlForUnusedTenant, now).iterator().next()); applicationRepository.delete(applicationId()); Set<TenantName> tenantsDeleted = applicationRepository.deleteUnusedTenants(Duration.ofMillis(1), now); assertTrue(tenantsDeleted.contains(tenant1)); assertFalse(tenantsDeleted.contains(tenant2)); } @Test public void decideVersion() { ApplicationId regularApp = ApplicationId.from("tenant1", "application1", "default"); ApplicationId systemApp = ApplicationId.from("hosted-vespa", "routing", "default"); ApplicationId testerApp = ApplicationId.from("tenant1", "application1", "default-t"); Version sessionVersion = new Version(Vtag.currentVersion.getMajor(), 0); Version oldSessionVersion = Version.fromString("5.0"); assertEquals(sessionVersion, ApplicationRepository.decideVersion(systemApp, Environment.prod, sessionVersion, false)); assertEquals(sessionVersion, ApplicationRepository.decideVersion(systemApp, Environment.dev, sessionVersion, false)); assertEquals(sessionVersion, ApplicationRepository.decideVersion(systemApp, Environment.perf, sessionVersion, false)); assertEquals(sessionVersion, ApplicationRepository.decideVersion(testerApp, Environment.prod, sessionVersion, false)); assertEquals(sessionVersion, ApplicationRepository.decideVersion(testerApp, Environment.dev, sessionVersion, false)); assertEquals(sessionVersion, ApplicationRepository.decideVersion(testerApp, Environment.perf, sessionVersion, false)); assertEquals(sessionVersion, ApplicationRepository.decideVersion(regularApp, Environment.prod, sessionVersion, false)); assertEquals(Vtag.currentVersion, ApplicationRepository.decideVersion(regularApp, Environment.dev, sessionVersion, false)); assertEquals(oldSessionVersion, ApplicationRepository.decideVersion(regularApp, Environment.dev, oldSessionVersion, false)); assertEquals(sessionVersion, ApplicationRepository.decideVersion(regularApp, Environment.dev, sessionVersion, true)); assertEquals(Vtag.currentVersion, ApplicationRepository.decideVersion(regularApp, Environment.perf, sessionVersion, false)); } @Test public void deleteUnusedFileReferences() throws IOException { File fileReferencesDir = temporaryFolder.newFolder(); File filereferenceDir = createFilereferenceOnDisk(new File(fileReferencesDir, "foo"), Instant.now().minus(Duration.ofDays(15))); File filereferenceDir2 = createFilereferenceOnDisk(new File(fileReferencesDir, "baz"), Instant.now()); tenantRepository.addTenant(tenant1); Provisioner provisioner = new SessionHandlerTest.MockProvisioner(); applicationRepository = new ApplicationRepository(tenantRepository, provisioner, orchestrator, clock); timeoutBudget = new TimeoutBudget(clock, Duration.ofSeconds(60)); PrepareParams prepareParams = new PrepareParams.Builder().applicationId(applicationId()).ignoreValidationErrors(true).build(); deployApp(new File("src/test/apps/app"), prepareParams); Set<String> toBeDeleted = applicationRepository.deleteUnusedFiledistributionReferences(fileReferencesDir); assertEquals(Collections.singleton("foo"), toBeDeleted); assertFalse(filereferenceDir.exists()); assertTrue(filereferenceDir2.exists()); } private File createFilereferenceOnDisk(File filereferenceDir, Instant lastModifiedTime) { assertTrue(filereferenceDir.mkdir()); File bar = new File(filereferenceDir, "file"); IOUtils.writeFile(bar, Utf8.toBytes("test")); assertTrue(filereferenceDir.setLastModified(lastModifiedTime.toEpochMilli())); return filereferenceDir; } @Test public void delete() { { PrepareResult result = deployApp(testApp); long sessionId = result.sessionId(); Tenant tenant = tenantRepository.getTenant(applicationId().tenant()); LocalSession applicationData = tenant.getLocalSessionRepo().getSession(sessionId); assertNotNull(applicationData); assertNotNull(applicationData.getApplicationId()); assertNotNull(tenant.getRemoteSessionRepo().getSession(sessionId)); assertNotNull(applicationRepository.getActiveSession(applicationId())); assertTrue(applicationRepository.delete(applicationId())); assertNull(applicationRepository.getActiveSession(applicationId())); assertNull(tenant.getLocalSessionRepo().getSession(sessionId)); assertNull(tenant.getRemoteSessionRepo().getSession(sessionId)); assertTrue(provisioner.removed); assertThat(provisioner.lastApplicationId.tenant(), is(tenant.getName())); assertThat(provisioner.lastApplicationId, is(applicationId())); assertFalse(applicationRepository.delete(applicationId())); } { deployApp(testApp); assertTrue(applicationRepository.delete(applicationId())); deployApp(testApp); ApplicationId fooId = applicationId(tenant2); PrepareParams prepareParams2 = new PrepareParams.Builder().applicationId(fooId).build(); deployApp(testApp, prepareParams2); assertNotNull(applicationRepository.getActiveSession(fooId)); assertTrue(applicationRepository.delete(fooId)); assertThat(provisioner.lastApplicationId, is(fooId)); assertNotNull(applicationRepository.getActiveSession(applicationId())); assertTrue(applicationRepository.delete(applicationId())); } } @Test public void testDeletingInactiveSessions() { ManualClock clock = new ManualClock(Instant.now()); ConfigserverConfig configserverConfig = new ConfigserverConfig(new ConfigserverConfig.Builder() .configServerDBDir(Files.createTempDir().getAbsolutePath()) .configDefinitionsDir(Files.createTempDir().getAbsolutePath()) .sessionLifetime(60)); DeployTester tester = new DeployTester(configserverConfig, clock); tester.deployApp("src/test/apps/app", clock.instant()); clock.advance(Duration.ofSeconds(10)); Optional<Deployment> deployment2 = tester.redeployFromLocalActive(); assertTrue(deployment2.isPresent()); deployment2.get().activate(); long activeSessionId = tester.tenant().getApplicationRepo().getSessionIdForApplication(tester.applicationId()); clock.advance(Duration.ofSeconds(10)); Optional<com.yahoo.config.provision.Deployment> deployment3 = tester.redeployFromLocalActive(); assertTrue(deployment3.isPresent()); deployment3.get().prepare(); LocalSession deployment3session = ((com.yahoo.vespa.config.server.deploy.Deployment) deployment3.get()).session(); assertNotEquals(activeSessionId, deployment3session); assertEquals(activeSessionId, tester.tenant().getApplicationRepo().getSessionIdForApplication(tester.applicationId())); assertEquals(3, tester.tenant().getLocalSessionRepo().listSessions().size()); clock.advance(Duration.ofHours(1)); tester.applicationRepository().deleteExpiredLocalSessions(); final Collection<LocalSession> sessions = tester.tenant().getLocalSessionRepo().listSessions(); assertEquals(1, sessions.size()); assertEquals(3, new ArrayList<>(sessions).get(0).getSessionId()); assertEquals(0, applicationRepository.deleteExpiredRemoteSessions(Duration.ofSeconds(0))); } private PrepareResult prepareAndActivateApp(File application) throws IOException { FilesApplicationPackage appDir = FilesApplicationPackage.fromFile(application); ApplicationId applicationId = applicationId(); long sessionId = applicationRepository.createSession(applicationId, timeoutBudget, appDir.getAppDir()); return applicationRepository.prepareAndActivate(tenantRepository.getTenant(applicationId.tenant()), sessionId, prepareParams(), false, Instant.now()); } private PrepareResult deployApp(File applicationPackage) { return deployApp(applicationPackage, prepareParams()); } private PrepareResult deployApp(File applicationPackage, PrepareParams prepareParams) { return applicationRepository.deploy(applicationPackage, prepareParams); } private PrepareParams prepareParams() { return new PrepareParams.Builder().applicationId(applicationId()).build(); } private ApplicationId applicationId() { return ApplicationId.from(tenant1, ApplicationName.from("testapp"), InstanceName.defaultName()); } private ApplicationId applicationId(TenantName tenantName) { return ApplicationId.from(tenantName, ApplicationName.from("testapp"), InstanceName.defaultName()); } private ApplicationMetaData getApplicationMetaData(ApplicationId applicationId, long sessionId) { Tenant tenant = tenantRepository.getTenant(applicationId.tenant()); return applicationRepository.getMetadataFromSession(tenant, sessionId); } }
loose -> lose
void assertParkedCountsByApplication(long... nums) { long expected = LongStream.of(nums).filter(value -> value > 0L).sum(); long actual = (long) nodeRepository.getNodes(Node.State.parked).size(); assertEquals(expected, actual); }
void assertParkedCountsByApplication(long... nums) { long expected = LongStream.of(nums).filter(value -> value > 0L).sum(); long actual = (long) nodeRepository.getNodes(Node.State.parked).size(); assertEquals(expected, actual); }
class NodeRetirerTester { public static final Zone zone = new Zone(Environment.prod, RegionName.from("us-east")); public final ManualClock clock = new ManualClock(); public final NodeRepository nodeRepository; private final FlavorSpareChecker flavorSpareChecker = mock(FlavorSpareChecker.class); private final MockDeployer deployer; private final JobControl jobControl; private final List<Flavor> flavors; private final Map<ApplicationId, MockDeployer.ApplicationContext> apps = new LinkedHashMap<>(); private final Orchestrator orchestrator = mock(Orchestrator.class); private RetiredExpirer retiredExpirer; private InactiveExpirer inactiveExpirer; private int nextNodeId = 0; NodeRetirerTester(NodeFlavors nodeFlavors) { Curator curator = new MockCurator(); nodeRepository = new NodeRepository(nodeFlavors, curator, clock, zone, new MockNameResolver().mockAnyLookup(), new DockerImage("docker-registry.domain.tld:8080/dist/vespa"), true); jobControl = new JobControl(nodeRepository.database()); NodeRepositoryProvisioner provisioner = new NodeRepositoryProvisioner(nodeRepository, nodeFlavors, zone, new MockProvisionServiceProvider(), new InMemoryFlagSource()); deployer = new MockDeployer(provisioner, clock, apps); flavors = nodeFlavors.getFlavors().stream().sorted(Comparator.comparing(Flavor::name)).collect(Collectors.toList()); try { doThrow(new RuntimeException()).when(orchestrator).acquirePermissionToRemove(any()); } catch (OrchestrationException e) { e.printStackTrace(); } } NodeRetirer makeNodeRetirer(RetirementPolicy policy) { return new NodeRetirer(nodeRepository, flavorSpareChecker, Duration.ofDays(1), deployer, jobControl, policy); } void createReadyNodesByFlavor(int... nums) { List<Node> nodes = new ArrayList<>(); for (int i = 0; i < nums.length; i++) { Flavor flavor = flavors.get(i); for (int j = 0; j < nums[i]; j++) { int id = nextNodeId++; nodes.add(nodeRepository.createNode("node" + id, "host" + id + ".test.yahoo.com", Collections.singleton("::1"), Optional.empty(), flavor, NodeType.tenant)); } } nodes = nodeRepository.addNodes(nodes); nodes = nodeRepository.setDirty(nodes, Agent.system, getClass().getSimpleName()); nodeRepository.setReady(nodes, Agent.system, getClass().getSimpleName()); } void deployApp(String tenantName, String applicationName, int[] flavorIds, int[] numNodes) { final ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, "default"); final List<MockDeployer.ClusterContext> clusterContexts = new ArrayList<>(); for (int i = 0; i < flavorIds.length; i++) { Flavor flavor = flavors.get(flavorIds[i]); ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("cluster-" + i), Version.fromString("6.99"), false, Collections.emptySet()); Capacity capacity = Capacity.fromNodeCount(numNodes[i], Optional.of(flavor.name()), false, true); int numGroups = numNodes[i] % 2 == 0 ? 2 : 1; clusterContexts.add(new MockDeployer.ClusterContext(applicationId, cluster, capacity, numGroups)); } apps.put(applicationId, new MockDeployer.ApplicationContext(applicationId, clusterContexts)); deployer.deployFromLocalActive(applicationId, Duration.ZERO).get().activate(); } void iterateMaintainers() { if (retiredExpirer == null) { retiredExpirer = new RetiredExpirer(nodeRepository, orchestrator, deployer, clock, Duration.ofDays(30), Duration.ofMinutes(10), jobControl); inactiveExpirer = new InactiveExpirer(nodeRepository, clock, Duration.ofMinutes(10), jobControl); } clock.advance(Duration.ofMinutes(11)); retiredExpirer.maintain(); clock.advance(Duration.ofMinutes(11)); inactiveExpirer.maintain(); } void setNumberAllowedUnallocatedRetirementsPerFlavor(int... numAllowed) { for (int i = 0; i < numAllowed.length; i++) { Boolean[] responses = new Boolean[numAllowed[i]]; Arrays.fill(responses, true); responses[responses.length - 1 ] = false; when(flavorSpareChecker.canRetireUnallocatedNodeWithFlavor(eq(flavors.get(i)))).thenReturn(true, responses); } } void setNumberAllowedAllocatedRetirementsPerFlavor(int... numAllowed) { for (int i = 0; i < numAllowed.length; i++) { Boolean[] responses = new Boolean[numAllowed[i]]; Arrays.fill(responses, true); responses[responses.length - 1] = false; when(flavorSpareChecker.canRetireAllocatedNodeWithFlavor(eq(flavors.get(i)))).thenReturn(true, responses); } } void assertCountsForStateByFlavor(Node.State state, long... nums) { Map<Flavor, Long> expected = expectedCountsByFlavor(nums); Map<Flavor, Long> actual = nodeRepository.getNodes(state).stream() .collect(Collectors.groupingBy(Node::flavor, Collectors.counting())); assertEquals(expected, actual); } void assertRetiringCountsByApplication(long... nums) { Map<ApplicationId, Long> expected = expectedCountsByApplication(nums); Map<ApplicationId, Long> actual = nodeRepository.getNodes().stream() .filter(node -> node.status().wantToRetire()) .filter(node -> node.allocation().isPresent()) .filter(node -> node.allocation().get().membership().retired()) .filter(node -> node.state() != Node.State.parked) .collect(Collectors.groupingBy(node -> node.allocation().get().owner(), Collectors.counting())); assertEquals(expected, actual); } private Map<Flavor, Long> expectedCountsByFlavor(long... nums) { Map<Flavor, Long> countsByFlavor = new HashMap<>(); for (int i = 0; i < nums.length; i++) { if (nums[i] < 0) continue; Flavor flavor = flavors.get(i); countsByFlavor.put(flavor, nums[i]); } return countsByFlavor; } private Map<ApplicationId, Long> expectedCountsByApplication(long... nums) { Map<ApplicationId, Long> countsByApplicationId = new HashMap<>(); Iterator<ApplicationId> iterator = apps.keySet().iterator(); for (int i = 0; iterator.hasNext(); i++) { ApplicationId applicationId = iterator.next(); if (nums[i] < 0) continue; countsByApplicationId.put(applicationId, nums[i]); } return countsByApplicationId; } static NodeFlavors makeFlavors(int numFlavors) { FlavorConfigBuilder flavorConfigBuilder = new FlavorConfigBuilder(); for (int i = 0; i < numFlavors; i++) { flavorConfigBuilder.addFlavor("flavor-" + i, 1. /* cpu*/, 3. /* mem GB*/, 2. /*disk GB*/, Flavor.Type.BARE_METAL); } return new NodeFlavors(flavorConfigBuilder.build()); } }
class NodeRetirerTester { public static final Zone zone = new Zone(Environment.prod, RegionName.from("us-east")); public final ManualClock clock = new ManualClock(); public final NodeRepository nodeRepository; private final FlavorSpareChecker flavorSpareChecker = mock(FlavorSpareChecker.class); private final MockDeployer deployer; private final JobControl jobControl; private final List<Flavor> flavors; private final Map<ApplicationId, MockDeployer.ApplicationContext> apps = new LinkedHashMap<>(); private final Orchestrator orchestrator = mock(Orchestrator.class); private RetiredExpirer retiredExpirer; private InactiveExpirer inactiveExpirer; private int nextNodeId = 0; NodeRetirerTester(NodeFlavors nodeFlavors) { Curator curator = new MockCurator(); nodeRepository = new NodeRepository(nodeFlavors, curator, clock, zone, new MockNameResolver().mockAnyLookup(), new DockerImage("docker-registry.domain.tld:8080/dist/vespa"), true); jobControl = new JobControl(nodeRepository.database()); NodeRepositoryProvisioner provisioner = new NodeRepositoryProvisioner(nodeRepository, nodeFlavors, zone, new MockProvisionServiceProvider(), new InMemoryFlagSource()); deployer = new MockDeployer(provisioner, clock, apps); flavors = nodeFlavors.getFlavors().stream().sorted(Comparator.comparing(Flavor::name)).collect(Collectors.toList()); try { doThrow(new RuntimeException()).when(orchestrator).acquirePermissionToRemove(any()); } catch (OrchestrationException e) { e.printStackTrace(); } } NodeRetirer makeNodeRetirer(RetirementPolicy policy) { return new NodeRetirer(nodeRepository, flavorSpareChecker, Duration.ofDays(1), deployer, jobControl, policy); } void createReadyNodesByFlavor(int... nums) { List<Node> nodes = new ArrayList<>(); for (int i = 0; i < nums.length; i++) { Flavor flavor = flavors.get(i); for (int j = 0; j < nums[i]; j++) { int id = nextNodeId++; nodes.add(nodeRepository.createNode("node" + id, "host" + id + ".test.yahoo.com", Collections.singleton("::1"), Optional.empty(), flavor, NodeType.tenant)); } } nodes = nodeRepository.addNodes(nodes); nodes = nodeRepository.setDirty(nodes, Agent.system, getClass().getSimpleName()); nodeRepository.setReady(nodes, Agent.system, getClass().getSimpleName()); } void deployApp(String tenantName, String applicationName, int[] flavorIds, int[] numNodes) { final ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, "default"); final List<MockDeployer.ClusterContext> clusterContexts = new ArrayList<>(); for (int i = 0; i < flavorIds.length; i++) { Flavor flavor = flavors.get(flavorIds[i]); ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("cluster-" + i), Version.fromString("6.99"), false, Collections.emptySet()); Capacity capacity = Capacity.fromNodeCount(numNodes[i], Optional.of(flavor.name()), false, true); int numGroups = numNodes[i] % 2 == 0 ? 2 : 1; clusterContexts.add(new MockDeployer.ClusterContext(applicationId, cluster, capacity, numGroups)); } apps.put(applicationId, new MockDeployer.ApplicationContext(applicationId, clusterContexts)); deployer.deployFromLocalActive(applicationId, Duration.ZERO).get().activate(); } void iterateMaintainers() { if (retiredExpirer == null) { retiredExpirer = new RetiredExpirer(nodeRepository, orchestrator, deployer, clock, Duration.ofDays(30), Duration.ofMinutes(10), jobControl); inactiveExpirer = new InactiveExpirer(nodeRepository, clock, Duration.ofMinutes(10), jobControl); } clock.advance(Duration.ofMinutes(11)); retiredExpirer.maintain(); clock.advance(Duration.ofMinutes(11)); inactiveExpirer.maintain(); } void setNumberAllowedUnallocatedRetirementsPerFlavor(int... numAllowed) { for (int i = 0; i < numAllowed.length; i++) { Boolean[] responses = new Boolean[numAllowed[i]]; Arrays.fill(responses, true); responses[responses.length - 1 ] = false; when(flavorSpareChecker.canRetireUnallocatedNodeWithFlavor(eq(flavors.get(i)))).thenReturn(true, responses); } } void setNumberAllowedAllocatedRetirementsPerFlavor(int... numAllowed) { for (int i = 0; i < numAllowed.length; i++) { Boolean[] responses = new Boolean[numAllowed[i]]; Arrays.fill(responses, true); responses[responses.length - 1] = false; when(flavorSpareChecker.canRetireAllocatedNodeWithFlavor(eq(flavors.get(i)))).thenReturn(true, responses); } } void assertCountsForStateByFlavor(Node.State state, long... nums) { Map<Flavor, Long> expected = expectedCountsByFlavor(nums); Map<Flavor, Long> actual = nodeRepository.getNodes(state).stream() .collect(Collectors.groupingBy(Node::flavor, Collectors.counting())); assertEquals(expected, actual); } void assertRetiringCountsByApplication(long... nums) { Map<ApplicationId, Long> expected = expectedCountsByApplication(nums); Map<ApplicationId, Long> actual = nodeRepository.getNodes().stream() .filter(node -> node.status().wantToRetire()) .filter(node -> node.allocation().isPresent()) .filter(node -> node.allocation().get().membership().retired()) .filter(node -> node.state() != Node.State.parked) .collect(Collectors.groupingBy(node -> node.allocation().get().owner(), Collectors.counting())); assertEquals(expected, actual); } private Map<Flavor, Long> expectedCountsByFlavor(long... nums) { Map<Flavor, Long> countsByFlavor = new HashMap<>(); for (int i = 0; i < nums.length; i++) { if (nums[i] < 0) continue; Flavor flavor = flavors.get(i); countsByFlavor.put(flavor, nums[i]); } return countsByFlavor; } private Map<ApplicationId, Long> expectedCountsByApplication(long... nums) { Map<ApplicationId, Long> countsByApplicationId = new HashMap<>(); Iterator<ApplicationId> iterator = apps.keySet().iterator(); for (int i = 0; iterator.hasNext(); i++) { ApplicationId applicationId = iterator.next(); if (nums[i] < 0) continue; countsByApplicationId.put(applicationId, nums[i]); } return countsByApplicationId; } static NodeFlavors makeFlavors(int numFlavors) { FlavorConfigBuilder flavorConfigBuilder = new FlavorConfigBuilder(); for (int i = 0; i < numFlavors; i++) { flavorConfigBuilder.addFlavor("flavor-" + i, 1. /* cpu*/, 3. /* mem GB*/, 2. /*disk GB*/, Flavor.Type.BARE_METAL); } return new NodeFlavors(flavorConfigBuilder.build()); } }
Done
void assertParkedCountsByApplication(long... nums) { long expected = LongStream.of(nums).filter(value -> value > 0L).sum(); long actual = (long) nodeRepository.getNodes(Node.State.parked).size(); assertEquals(expected, actual); }
void assertParkedCountsByApplication(long... nums) { long expected = LongStream.of(nums).filter(value -> value > 0L).sum(); long actual = (long) nodeRepository.getNodes(Node.State.parked).size(); assertEquals(expected, actual); }
class NodeRetirerTester { public static final Zone zone = new Zone(Environment.prod, RegionName.from("us-east")); public final ManualClock clock = new ManualClock(); public final NodeRepository nodeRepository; private final FlavorSpareChecker flavorSpareChecker = mock(FlavorSpareChecker.class); private final MockDeployer deployer; private final JobControl jobControl; private final List<Flavor> flavors; private final Map<ApplicationId, MockDeployer.ApplicationContext> apps = new LinkedHashMap<>(); private final Orchestrator orchestrator = mock(Orchestrator.class); private RetiredExpirer retiredExpirer; private InactiveExpirer inactiveExpirer; private int nextNodeId = 0; NodeRetirerTester(NodeFlavors nodeFlavors) { Curator curator = new MockCurator(); nodeRepository = new NodeRepository(nodeFlavors, curator, clock, zone, new MockNameResolver().mockAnyLookup(), new DockerImage("docker-registry.domain.tld:8080/dist/vespa"), true); jobControl = new JobControl(nodeRepository.database()); NodeRepositoryProvisioner provisioner = new NodeRepositoryProvisioner(nodeRepository, nodeFlavors, zone, new MockProvisionServiceProvider(), new InMemoryFlagSource()); deployer = new MockDeployer(provisioner, clock, apps); flavors = nodeFlavors.getFlavors().stream().sorted(Comparator.comparing(Flavor::name)).collect(Collectors.toList()); try { doThrow(new RuntimeException()).when(orchestrator).acquirePermissionToRemove(any()); } catch (OrchestrationException e) { e.printStackTrace(); } } NodeRetirer makeNodeRetirer(RetirementPolicy policy) { return new NodeRetirer(nodeRepository, flavorSpareChecker, Duration.ofDays(1), deployer, jobControl, policy); } void createReadyNodesByFlavor(int... nums) { List<Node> nodes = new ArrayList<>(); for (int i = 0; i < nums.length; i++) { Flavor flavor = flavors.get(i); for (int j = 0; j < nums[i]; j++) { int id = nextNodeId++; nodes.add(nodeRepository.createNode("node" + id, "host" + id + ".test.yahoo.com", Collections.singleton("::1"), Optional.empty(), flavor, NodeType.tenant)); } } nodes = nodeRepository.addNodes(nodes); nodes = nodeRepository.setDirty(nodes, Agent.system, getClass().getSimpleName()); nodeRepository.setReady(nodes, Agent.system, getClass().getSimpleName()); } void deployApp(String tenantName, String applicationName, int[] flavorIds, int[] numNodes) { final ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, "default"); final List<MockDeployer.ClusterContext> clusterContexts = new ArrayList<>(); for (int i = 0; i < flavorIds.length; i++) { Flavor flavor = flavors.get(flavorIds[i]); ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("cluster-" + i), Version.fromString("6.99"), false, Collections.emptySet()); Capacity capacity = Capacity.fromNodeCount(numNodes[i], Optional.of(flavor.name()), false, true); int numGroups = numNodes[i] % 2 == 0 ? 2 : 1; clusterContexts.add(new MockDeployer.ClusterContext(applicationId, cluster, capacity, numGroups)); } apps.put(applicationId, new MockDeployer.ApplicationContext(applicationId, clusterContexts)); deployer.deployFromLocalActive(applicationId, Duration.ZERO).get().activate(); } void iterateMaintainers() { if (retiredExpirer == null) { retiredExpirer = new RetiredExpirer(nodeRepository, orchestrator, deployer, clock, Duration.ofDays(30), Duration.ofMinutes(10), jobControl); inactiveExpirer = new InactiveExpirer(nodeRepository, clock, Duration.ofMinutes(10), jobControl); } clock.advance(Duration.ofMinutes(11)); retiredExpirer.maintain(); clock.advance(Duration.ofMinutes(11)); inactiveExpirer.maintain(); } void setNumberAllowedUnallocatedRetirementsPerFlavor(int... numAllowed) { for (int i = 0; i < numAllowed.length; i++) { Boolean[] responses = new Boolean[numAllowed[i]]; Arrays.fill(responses, true); responses[responses.length - 1 ] = false; when(flavorSpareChecker.canRetireUnallocatedNodeWithFlavor(eq(flavors.get(i)))).thenReturn(true, responses); } } void setNumberAllowedAllocatedRetirementsPerFlavor(int... numAllowed) { for (int i = 0; i < numAllowed.length; i++) { Boolean[] responses = new Boolean[numAllowed[i]]; Arrays.fill(responses, true); responses[responses.length - 1] = false; when(flavorSpareChecker.canRetireAllocatedNodeWithFlavor(eq(flavors.get(i)))).thenReturn(true, responses); } } void assertCountsForStateByFlavor(Node.State state, long... nums) { Map<Flavor, Long> expected = expectedCountsByFlavor(nums); Map<Flavor, Long> actual = nodeRepository.getNodes(state).stream() .collect(Collectors.groupingBy(Node::flavor, Collectors.counting())); assertEquals(expected, actual); } void assertRetiringCountsByApplication(long... nums) { Map<ApplicationId, Long> expected = expectedCountsByApplication(nums); Map<ApplicationId, Long> actual = nodeRepository.getNodes().stream() .filter(node -> node.status().wantToRetire()) .filter(node -> node.allocation().isPresent()) .filter(node -> node.allocation().get().membership().retired()) .filter(node -> node.state() != Node.State.parked) .collect(Collectors.groupingBy(node -> node.allocation().get().owner(), Collectors.counting())); assertEquals(expected, actual); } private Map<Flavor, Long> expectedCountsByFlavor(long... nums) { Map<Flavor, Long> countsByFlavor = new HashMap<>(); for (int i = 0; i < nums.length; i++) { if (nums[i] < 0) continue; Flavor flavor = flavors.get(i); countsByFlavor.put(flavor, nums[i]); } return countsByFlavor; } private Map<ApplicationId, Long> expectedCountsByApplication(long... nums) { Map<ApplicationId, Long> countsByApplicationId = new HashMap<>(); Iterator<ApplicationId> iterator = apps.keySet().iterator(); for (int i = 0; iterator.hasNext(); i++) { ApplicationId applicationId = iterator.next(); if (nums[i] < 0) continue; countsByApplicationId.put(applicationId, nums[i]); } return countsByApplicationId; } static NodeFlavors makeFlavors(int numFlavors) { FlavorConfigBuilder flavorConfigBuilder = new FlavorConfigBuilder(); for (int i = 0; i < numFlavors; i++) { flavorConfigBuilder.addFlavor("flavor-" + i, 1. /* cpu*/, 3. /* mem GB*/, 2. /*disk GB*/, Flavor.Type.BARE_METAL); } return new NodeFlavors(flavorConfigBuilder.build()); } }
class NodeRetirerTester { public static final Zone zone = new Zone(Environment.prod, RegionName.from("us-east")); public final ManualClock clock = new ManualClock(); public final NodeRepository nodeRepository; private final FlavorSpareChecker flavorSpareChecker = mock(FlavorSpareChecker.class); private final MockDeployer deployer; private final JobControl jobControl; private final List<Flavor> flavors; private final Map<ApplicationId, MockDeployer.ApplicationContext> apps = new LinkedHashMap<>(); private final Orchestrator orchestrator = mock(Orchestrator.class); private RetiredExpirer retiredExpirer; private InactiveExpirer inactiveExpirer; private int nextNodeId = 0; NodeRetirerTester(NodeFlavors nodeFlavors) { Curator curator = new MockCurator(); nodeRepository = new NodeRepository(nodeFlavors, curator, clock, zone, new MockNameResolver().mockAnyLookup(), new DockerImage("docker-registry.domain.tld:8080/dist/vespa"), true); jobControl = new JobControl(nodeRepository.database()); NodeRepositoryProvisioner provisioner = new NodeRepositoryProvisioner(nodeRepository, nodeFlavors, zone, new MockProvisionServiceProvider(), new InMemoryFlagSource()); deployer = new MockDeployer(provisioner, clock, apps); flavors = nodeFlavors.getFlavors().stream().sorted(Comparator.comparing(Flavor::name)).collect(Collectors.toList()); try { doThrow(new RuntimeException()).when(orchestrator).acquirePermissionToRemove(any()); } catch (OrchestrationException e) { e.printStackTrace(); } } NodeRetirer makeNodeRetirer(RetirementPolicy policy) { return new NodeRetirer(nodeRepository, flavorSpareChecker, Duration.ofDays(1), deployer, jobControl, policy); } void createReadyNodesByFlavor(int... nums) { List<Node> nodes = new ArrayList<>(); for (int i = 0; i < nums.length; i++) { Flavor flavor = flavors.get(i); for (int j = 0; j < nums[i]; j++) { int id = nextNodeId++; nodes.add(nodeRepository.createNode("node" + id, "host" + id + ".test.yahoo.com", Collections.singleton("::1"), Optional.empty(), flavor, NodeType.tenant)); } } nodes = nodeRepository.addNodes(nodes); nodes = nodeRepository.setDirty(nodes, Agent.system, getClass().getSimpleName()); nodeRepository.setReady(nodes, Agent.system, getClass().getSimpleName()); } void deployApp(String tenantName, String applicationName, int[] flavorIds, int[] numNodes) { final ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, "default"); final List<MockDeployer.ClusterContext> clusterContexts = new ArrayList<>(); for (int i = 0; i < flavorIds.length; i++) { Flavor flavor = flavors.get(flavorIds[i]); ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("cluster-" + i), Version.fromString("6.99"), false, Collections.emptySet()); Capacity capacity = Capacity.fromNodeCount(numNodes[i], Optional.of(flavor.name()), false, true); int numGroups = numNodes[i] % 2 == 0 ? 2 : 1; clusterContexts.add(new MockDeployer.ClusterContext(applicationId, cluster, capacity, numGroups)); } apps.put(applicationId, new MockDeployer.ApplicationContext(applicationId, clusterContexts)); deployer.deployFromLocalActive(applicationId, Duration.ZERO).get().activate(); } void iterateMaintainers() { if (retiredExpirer == null) { retiredExpirer = new RetiredExpirer(nodeRepository, orchestrator, deployer, clock, Duration.ofDays(30), Duration.ofMinutes(10), jobControl); inactiveExpirer = new InactiveExpirer(nodeRepository, clock, Duration.ofMinutes(10), jobControl); } clock.advance(Duration.ofMinutes(11)); retiredExpirer.maintain(); clock.advance(Duration.ofMinutes(11)); inactiveExpirer.maintain(); } void setNumberAllowedUnallocatedRetirementsPerFlavor(int... numAllowed) { for (int i = 0; i < numAllowed.length; i++) { Boolean[] responses = new Boolean[numAllowed[i]]; Arrays.fill(responses, true); responses[responses.length - 1 ] = false; when(flavorSpareChecker.canRetireUnallocatedNodeWithFlavor(eq(flavors.get(i)))).thenReturn(true, responses); } } void setNumberAllowedAllocatedRetirementsPerFlavor(int... numAllowed) { for (int i = 0; i < numAllowed.length; i++) { Boolean[] responses = new Boolean[numAllowed[i]]; Arrays.fill(responses, true); responses[responses.length - 1] = false; when(flavorSpareChecker.canRetireAllocatedNodeWithFlavor(eq(flavors.get(i)))).thenReturn(true, responses); } } void assertCountsForStateByFlavor(Node.State state, long... nums) { Map<Flavor, Long> expected = expectedCountsByFlavor(nums); Map<Flavor, Long> actual = nodeRepository.getNodes(state).stream() .collect(Collectors.groupingBy(Node::flavor, Collectors.counting())); assertEquals(expected, actual); } void assertRetiringCountsByApplication(long... nums) { Map<ApplicationId, Long> expected = expectedCountsByApplication(nums); Map<ApplicationId, Long> actual = nodeRepository.getNodes().stream() .filter(node -> node.status().wantToRetire()) .filter(node -> node.allocation().isPresent()) .filter(node -> node.allocation().get().membership().retired()) .filter(node -> node.state() != Node.State.parked) .collect(Collectors.groupingBy(node -> node.allocation().get().owner(), Collectors.counting())); assertEquals(expected, actual); } private Map<Flavor, Long> expectedCountsByFlavor(long... nums) { Map<Flavor, Long> countsByFlavor = new HashMap<>(); for (int i = 0; i < nums.length; i++) { if (nums[i] < 0) continue; Flavor flavor = flavors.get(i); countsByFlavor.put(flavor, nums[i]); } return countsByFlavor; } private Map<ApplicationId, Long> expectedCountsByApplication(long... nums) { Map<ApplicationId, Long> countsByApplicationId = new HashMap<>(); Iterator<ApplicationId> iterator = apps.keySet().iterator(); for (int i = 0; iterator.hasNext(); i++) { ApplicationId applicationId = iterator.next(); if (nums[i] < 0) continue; countsByApplicationId.put(applicationId, nums[i]); } return countsByApplicationId; } static NodeFlavors makeFlavors(int numFlavors) { FlavorConfigBuilder flavorConfigBuilder = new FlavorConfigBuilder(); for (int i = 0; i < numFlavors; i++) { flavorConfigBuilder.addFlavor("flavor-" + i, 1. /* cpu*/, 3. /* mem GB*/, 2. /*disk GB*/, Flavor.Type.BARE_METAL); } return new NodeFlavors(flavorConfigBuilder.build()); } }
Consider making the comparator static to avoid creating one for each comparison :)
public int compareTo(Record that) { return Comparator.comparing(Record::type) .thenComparing(Record::name) .thenComparing(Record::data) .compare(this, that); }
return Comparator.comparing(Record::type)
public int compareTo(Record that) { return comparator.compare(this, that); }
class Record implements Comparable<Record> { private final Type type; private final RecordName name; private final RecordData data; public Record(Type type, RecordName name, RecordData data) { this.type = Objects.requireNonNull(type, "type cannot be null"); this.name = Objects.requireNonNull(name, "name cannot be null"); this.data = Objects.requireNonNull(data, "data cannot be null"); } /** DNS type of this */ public Type type() { return type; } /** Data in this, e.g. IP address for records of type A */ public RecordData data() { return data; } /** Name of this, e.g. a FQDN for records of type A */ public RecordName name() { return name; } public enum Type { A, AAAA, ALIAS, CNAME, MX, NS, PTR, SOA, SRV, TXT } @Override public String toString() { return String.format("%s %s -> %s", type, name, data); } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; Record record = (Record) o; return type == record.type && name.equals(record.name) && data.equals(record.data); } @Override public int hashCode() { return Objects.hash(type, name, data); } @Override }
class Record implements Comparable<Record> { private static final Comparator<Record> comparator = Comparator.comparing(Record::type) .thenComparing(Record::name) .thenComparing(Record::data); private final Type type; private final RecordName name; private final RecordData data; public Record(Type type, RecordName name, RecordData data) { this.type = Objects.requireNonNull(type, "type cannot be null"); this.name = Objects.requireNonNull(name, "name cannot be null"); this.data = Objects.requireNonNull(data, "data cannot be null"); } /** DNS type of this */ public Type type() { return type; } /** Data in this, e.g. IP address for records of type A */ public RecordData data() { return data; } /** Name of this, e.g. a FQDN for records of type A */ public RecordName name() { return name; } public enum Type { A, AAAA, ALIAS, CNAME, MX, NS, PTR, SOA, SRV, TXT } @Override public String toString() { return String.format("%s %s -> %s", type, name, data); } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; Record record = (Record) o; return type == record.type && name.equals(record.name) && data.equals(record.data); } @Override public int hashCode() { return Objects.hash(type, name, data); } @Override }
Thanks, fixed
public void createFromActiveSession() { PrepareResult result = deployApp(testApp); long sessionId = applicationRepository.createSessionFromExisting(applicationId(), new SilentDeployLogger(), false, timeoutBudget); long originalSessionId = result.sessionId(); ApplicationMetaData originalApplicationMetaData = getApplicationMetaData(applicationId(), originalSessionId); ApplicationMetaData applicationMetaData = getApplicationMetaData(applicationId(), sessionId); assertNotEquals(sessionId, originalSessionId); assertEquals(applicationMetaData.getApplicationName(), originalApplicationMetaData.getApplicationName()); assertEquals(applicationMetaData.getPreviousActiveGeneration(), originalApplicationMetaData.getGeneration().longValue()); assertNotEquals(applicationMetaData.getGeneration(), originalApplicationMetaData.getApplicationName()); assertEquals(applicationMetaData.getDeployedByUser(), originalApplicationMetaData.getDeployedByUser()); }
assertNotEquals(applicationMetaData.getGeneration(), originalApplicationMetaData.getApplicationName());
public void createFromActiveSession() { PrepareResult result = deployApp(testApp); long sessionId = applicationRepository.createSessionFromExisting(applicationId(), new SilentDeployLogger(), false, timeoutBudget); long originalSessionId = result.sessionId(); ApplicationMetaData originalApplicationMetaData = getApplicationMetaData(applicationId(), originalSessionId); ApplicationMetaData applicationMetaData = getApplicationMetaData(applicationId(), sessionId); assertNotEquals(sessionId, originalSessionId); assertEquals(applicationMetaData.getApplicationName(), originalApplicationMetaData.getApplicationName()); assertEquals(applicationMetaData.getPreviousActiveGeneration(), originalApplicationMetaData.getGeneration().longValue()); assertNotEquals(applicationMetaData.getGeneration(), originalApplicationMetaData.getGeneration()); assertEquals(applicationMetaData.getDeployedByUser(), originalApplicationMetaData.getDeployedByUser()); }
class ApplicationRepositoryTest { private final static File testApp = new File("src/test/apps/app"); private final static File testAppJdiscOnly = new File("src/test/apps/app-jdisc-only"); private final static File testAppJdiscOnlyRestart = new File("src/test/apps/app-jdisc-only-restart"); private final static File testAppLogServerWithContainer = new File("src/test/apps/app-logserver-with-container"); private final static TenantName tenant1 = TenantName.from("test1"); private final static TenantName tenant2 = TenantName.from("test2"); private final static TenantName tenant3 = TenantName.from("test3"); private final static Clock clock = Clock.systemUTC(); private ApplicationRepository applicationRepository; private TenantRepository tenantRepository; private SessionHandlerTest.MockProvisioner provisioner; private OrchestratorMock orchestrator; private TimeoutBudget timeoutBudget; @Rule public TemporaryFolder temporaryFolder = new TemporaryFolder(); @Before public void setup() { Curator curator = new MockCurator(); tenantRepository = new TenantRepository(new TestComponentRegistry.Builder() .curator(curator) .build()); tenantRepository.addTenant(tenant1); tenantRepository.addTenant(tenant2); tenantRepository.addTenant(tenant3); orchestrator = new OrchestratorMock(); provisioner = new SessionHandlerTest.MockProvisioner(); applicationRepository = new ApplicationRepository(tenantRepository, provisioner, orchestrator, clock); timeoutBudget = new TimeoutBudget(clock, Duration.ofSeconds(60)); } @Test public void prepareAndActivate() throws IOException { PrepareResult result = prepareAndActivateApp(testApp); assertTrue(result.configChangeActions().getRefeedActions().isEmpty()); assertTrue(result.configChangeActions().getRestartActions().isEmpty()); } @Test public void prepareAndActivateWithRestart() throws IOException { prepareAndActivateApp(testAppJdiscOnly); PrepareResult result = prepareAndActivateApp(testAppJdiscOnlyRestart); assertTrue(result.configChangeActions().getRefeedActions().isEmpty()); assertFalse(result.configChangeActions().getRestartActions().isEmpty()); } @Test public void createAndPrepareAndActivate() { PrepareResult result = deployApp(testApp); assertTrue(result.configChangeActions().getRefeedActions().isEmpty()); assertTrue(result.configChangeActions().getRestartActions().isEmpty()); } @Test @Test public void testSuspension() { deployApp(testApp); assertFalse(applicationRepository.isSuspended(applicationId())); orchestrator.suspend(applicationId()); assertTrue(applicationRepository.isSuspended(applicationId())); } @Test public void getLogs() { WireMockServer wireMock = new WireMockServer(wireMockConfig().port(8080)); wireMock.start(); WireMock.configureFor("localhost", wireMock.port()); stubFor(get(urlEqualTo("/logs")) .willReturn(aResponse() .withStatus(200))); wireMock.start(); deployApp(testAppLogServerWithContainer); HttpResponse response = applicationRepository.getLogs(applicationId(), ""); assertEquals(200, response.getStatus()); wireMock.stop(); } @Test public void deleteUnusedTenants() { Instant now = ManualClock.at("1970-01-01T01:00:00"); deployApp(testApp); deployApp(testApp, new PrepareParams.Builder().applicationId(applicationId(tenant2)).build()); Duration ttlForUnusedTenant = Duration.ofHours(1); assertTrue(applicationRepository.deleteUnusedTenants(ttlForUnusedTenant, now).isEmpty()); ttlForUnusedTenant = Duration.ofMillis(1); assertEquals(tenant3, applicationRepository.deleteUnusedTenants(ttlForUnusedTenant, now).iterator().next()); applicationRepository.delete(applicationId()); Set<TenantName> tenantsDeleted = applicationRepository.deleteUnusedTenants(Duration.ofMillis(1), now); assertTrue(tenantsDeleted.contains(tenant1)); assertFalse(tenantsDeleted.contains(tenant2)); } @Test public void decideVersion() { ApplicationId regularApp = ApplicationId.from("tenant1", "application1", "default"); ApplicationId systemApp = ApplicationId.from("hosted-vespa", "routing", "default"); ApplicationId testerApp = ApplicationId.from("tenant1", "application1", "default-t"); Version sessionVersion = new Version(Vtag.currentVersion.getMajor(), 0); Version oldSessionVersion = Version.fromString("5.0"); assertEquals(sessionVersion, ApplicationRepository.decideVersion(systemApp, Environment.prod, sessionVersion, false)); assertEquals(sessionVersion, ApplicationRepository.decideVersion(systemApp, Environment.dev, sessionVersion, false)); assertEquals(sessionVersion, ApplicationRepository.decideVersion(systemApp, Environment.perf, sessionVersion, false)); assertEquals(sessionVersion, ApplicationRepository.decideVersion(testerApp, Environment.prod, sessionVersion, false)); assertEquals(sessionVersion, ApplicationRepository.decideVersion(testerApp, Environment.dev, sessionVersion, false)); assertEquals(sessionVersion, ApplicationRepository.decideVersion(testerApp, Environment.perf, sessionVersion, false)); assertEquals(sessionVersion, ApplicationRepository.decideVersion(regularApp, Environment.prod, sessionVersion, false)); assertEquals(Vtag.currentVersion, ApplicationRepository.decideVersion(regularApp, Environment.dev, sessionVersion, false)); assertEquals(oldSessionVersion, ApplicationRepository.decideVersion(regularApp, Environment.dev, oldSessionVersion, false)); assertEquals(sessionVersion, ApplicationRepository.decideVersion(regularApp, Environment.dev, sessionVersion, true)); assertEquals(Vtag.currentVersion, ApplicationRepository.decideVersion(regularApp, Environment.perf, sessionVersion, false)); } @Test public void deleteUnusedFileReferences() throws IOException { File fileReferencesDir = temporaryFolder.newFolder(); File filereferenceDir = createFilereferenceOnDisk(new File(fileReferencesDir, "foo"), Instant.now().minus(Duration.ofDays(15))); File filereferenceDir2 = createFilereferenceOnDisk(new File(fileReferencesDir, "baz"), Instant.now()); tenantRepository.addTenant(tenant1); Provisioner provisioner = new SessionHandlerTest.MockProvisioner(); applicationRepository = new ApplicationRepository(tenantRepository, provisioner, orchestrator, clock); timeoutBudget = new TimeoutBudget(clock, Duration.ofSeconds(60)); PrepareParams prepareParams = new PrepareParams.Builder().applicationId(applicationId()).ignoreValidationErrors(true).build(); deployApp(new File("src/test/apps/app"), prepareParams); Set<String> toBeDeleted = applicationRepository.deleteUnusedFiledistributionReferences(fileReferencesDir); assertEquals(Collections.singleton("foo"), toBeDeleted); assertFalse(filereferenceDir.exists()); assertTrue(filereferenceDir2.exists()); } private File createFilereferenceOnDisk(File filereferenceDir, Instant lastModifiedTime) { assertTrue(filereferenceDir.mkdir()); File bar = new File(filereferenceDir, "file"); IOUtils.writeFile(bar, Utf8.toBytes("test")); assertTrue(filereferenceDir.setLastModified(lastModifiedTime.toEpochMilli())); return filereferenceDir; } @Test public void delete() { { PrepareResult result = deployApp(testApp); long sessionId = result.sessionId(); Tenant tenant = tenantRepository.getTenant(applicationId().tenant()); LocalSession applicationData = tenant.getLocalSessionRepo().getSession(sessionId); assertNotNull(applicationData); assertNotNull(applicationData.getApplicationId()); assertNotNull(tenant.getRemoteSessionRepo().getSession(sessionId)); assertNotNull(applicationRepository.getActiveSession(applicationId())); assertTrue(applicationRepository.delete(applicationId())); assertNull(applicationRepository.getActiveSession(applicationId())); assertNull(tenant.getLocalSessionRepo().getSession(sessionId)); assertNull(tenant.getRemoteSessionRepo().getSession(sessionId)); assertTrue(provisioner.removed); assertThat(provisioner.lastApplicationId.tenant(), is(tenant.getName())); assertThat(provisioner.lastApplicationId, is(applicationId())); assertFalse(applicationRepository.delete(applicationId())); } { deployApp(testApp); assertTrue(applicationRepository.delete(applicationId())); deployApp(testApp); ApplicationId fooId = applicationId(tenant2); PrepareParams prepareParams2 = new PrepareParams.Builder().applicationId(fooId).build(); deployApp(testApp, prepareParams2); assertNotNull(applicationRepository.getActiveSession(fooId)); assertTrue(applicationRepository.delete(fooId)); assertThat(provisioner.lastApplicationId, is(fooId)); assertNotNull(applicationRepository.getActiveSession(applicationId())); assertTrue(applicationRepository.delete(applicationId())); } } @Test public void testDeletingInactiveSessions() { ManualClock clock = new ManualClock(Instant.now()); ConfigserverConfig configserverConfig = new ConfigserverConfig(new ConfigserverConfig.Builder() .configServerDBDir(Files.createTempDir().getAbsolutePath()) .configDefinitionsDir(Files.createTempDir().getAbsolutePath()) .sessionLifetime(60)); DeployTester tester = new DeployTester(configserverConfig, clock); tester.deployApp("src/test/apps/app", clock.instant()); clock.advance(Duration.ofSeconds(10)); Optional<Deployment> deployment2 = tester.redeployFromLocalActive(); assertTrue(deployment2.isPresent()); deployment2.get().activate(); long activeSessionId = tester.tenant().getApplicationRepo().getSessionIdForApplication(tester.applicationId()); clock.advance(Duration.ofSeconds(10)); Optional<com.yahoo.config.provision.Deployment> deployment3 = tester.redeployFromLocalActive(); assertTrue(deployment3.isPresent()); deployment3.get().prepare(); LocalSession deployment3session = ((com.yahoo.vespa.config.server.deploy.Deployment) deployment3.get()).session(); assertNotEquals(activeSessionId, deployment3session); assertEquals(activeSessionId, tester.tenant().getApplicationRepo().getSessionIdForApplication(tester.applicationId())); assertEquals(3, tester.tenant().getLocalSessionRepo().listSessions().size()); clock.advance(Duration.ofHours(1)); tester.applicationRepository().deleteExpiredLocalSessions(); final Collection<LocalSession> sessions = tester.tenant().getLocalSessionRepo().listSessions(); assertEquals(1, sessions.size()); assertEquals(3, new ArrayList<>(sessions).get(0).getSessionId()); assertEquals(0, applicationRepository.deleteExpiredRemoteSessions(Duration.ofSeconds(0))); } private PrepareResult prepareAndActivateApp(File application) throws IOException { FilesApplicationPackage appDir = FilesApplicationPackage.fromFile(application); ApplicationId applicationId = applicationId(); long sessionId = applicationRepository.createSession(applicationId, timeoutBudget, appDir.getAppDir()); return applicationRepository.prepareAndActivate(tenantRepository.getTenant(applicationId.tenant()), sessionId, prepareParams(), false, Instant.now()); } private PrepareResult deployApp(File applicationPackage) { return deployApp(applicationPackage, prepareParams()); } private PrepareResult deployApp(File applicationPackage, PrepareParams prepareParams) { return applicationRepository.deploy(applicationPackage, prepareParams); } private PrepareParams prepareParams() { return new PrepareParams.Builder().applicationId(applicationId()).build(); } private ApplicationId applicationId() { return ApplicationId.from(tenant1, ApplicationName.from("testapp"), InstanceName.defaultName()); } private ApplicationId applicationId(TenantName tenantName) { return ApplicationId.from(tenantName, ApplicationName.from("testapp"), InstanceName.defaultName()); } private ApplicationMetaData getApplicationMetaData(ApplicationId applicationId, long sessionId) { Tenant tenant = tenantRepository.getTenant(applicationId.tenant()); return applicationRepository.getMetadataFromSession(tenant, sessionId); } }
class ApplicationRepositoryTest { private final static File testApp = new File("src/test/apps/app"); private final static File testAppJdiscOnly = new File("src/test/apps/app-jdisc-only"); private final static File testAppJdiscOnlyRestart = new File("src/test/apps/app-jdisc-only-restart"); private final static File testAppLogServerWithContainer = new File("src/test/apps/app-logserver-with-container"); private final static TenantName tenant1 = TenantName.from("test1"); private final static TenantName tenant2 = TenantName.from("test2"); private final static TenantName tenant3 = TenantName.from("test3"); private final static Clock clock = Clock.systemUTC(); private ApplicationRepository applicationRepository; private TenantRepository tenantRepository; private SessionHandlerTest.MockProvisioner provisioner; private OrchestratorMock orchestrator; private TimeoutBudget timeoutBudget; @Rule public TemporaryFolder temporaryFolder = new TemporaryFolder(); @Before public void setup() { Curator curator = new MockCurator(); tenantRepository = new TenantRepository(new TestComponentRegistry.Builder() .curator(curator) .build()); tenantRepository.addTenant(tenant1); tenantRepository.addTenant(tenant2); tenantRepository.addTenant(tenant3); orchestrator = new OrchestratorMock(); provisioner = new SessionHandlerTest.MockProvisioner(); applicationRepository = new ApplicationRepository(tenantRepository, provisioner, orchestrator, clock); timeoutBudget = new TimeoutBudget(clock, Duration.ofSeconds(60)); } @Test public void prepareAndActivate() throws IOException { PrepareResult result = prepareAndActivateApp(testApp); assertTrue(result.configChangeActions().getRefeedActions().isEmpty()); assertTrue(result.configChangeActions().getRestartActions().isEmpty()); } @Test public void prepareAndActivateWithRestart() throws IOException { prepareAndActivateApp(testAppJdiscOnly); PrepareResult result = prepareAndActivateApp(testAppJdiscOnlyRestart); assertTrue(result.configChangeActions().getRefeedActions().isEmpty()); assertFalse(result.configChangeActions().getRestartActions().isEmpty()); } @Test public void createAndPrepareAndActivate() { PrepareResult result = deployApp(testApp); assertTrue(result.configChangeActions().getRefeedActions().isEmpty()); assertTrue(result.configChangeActions().getRestartActions().isEmpty()); } @Test @Test public void testSuspension() { deployApp(testApp); assertFalse(applicationRepository.isSuspended(applicationId())); orchestrator.suspend(applicationId()); assertTrue(applicationRepository.isSuspended(applicationId())); } @Test public void getLogs() { WireMockServer wireMock = new WireMockServer(wireMockConfig().port(8080)); wireMock.start(); WireMock.configureFor("localhost", wireMock.port()); stubFor(get(urlEqualTo("/logs")) .willReturn(aResponse() .withStatus(200))); wireMock.start(); deployApp(testAppLogServerWithContainer); HttpResponse response = applicationRepository.getLogs(applicationId(), ""); assertEquals(200, response.getStatus()); wireMock.stop(); } @Test public void deleteUnusedTenants() { Instant now = ManualClock.at("1970-01-01T01:00:00"); deployApp(testApp); deployApp(testApp, new PrepareParams.Builder().applicationId(applicationId(tenant2)).build()); Duration ttlForUnusedTenant = Duration.ofHours(1); assertTrue(applicationRepository.deleteUnusedTenants(ttlForUnusedTenant, now).isEmpty()); ttlForUnusedTenant = Duration.ofMillis(1); assertEquals(tenant3, applicationRepository.deleteUnusedTenants(ttlForUnusedTenant, now).iterator().next()); applicationRepository.delete(applicationId()); Set<TenantName> tenantsDeleted = applicationRepository.deleteUnusedTenants(Duration.ofMillis(1), now); assertTrue(tenantsDeleted.contains(tenant1)); assertFalse(tenantsDeleted.contains(tenant2)); } @Test public void decideVersion() { ApplicationId regularApp = ApplicationId.from("tenant1", "application1", "default"); ApplicationId systemApp = ApplicationId.from("hosted-vespa", "routing", "default"); ApplicationId testerApp = ApplicationId.from("tenant1", "application1", "default-t"); Version sessionVersion = new Version(Vtag.currentVersion.getMajor(), 0); Version oldSessionVersion = Version.fromString("5.0"); assertEquals(sessionVersion, ApplicationRepository.decideVersion(systemApp, Environment.prod, sessionVersion, false)); assertEquals(sessionVersion, ApplicationRepository.decideVersion(systemApp, Environment.dev, sessionVersion, false)); assertEquals(sessionVersion, ApplicationRepository.decideVersion(systemApp, Environment.perf, sessionVersion, false)); assertEquals(sessionVersion, ApplicationRepository.decideVersion(testerApp, Environment.prod, sessionVersion, false)); assertEquals(sessionVersion, ApplicationRepository.decideVersion(testerApp, Environment.dev, sessionVersion, false)); assertEquals(sessionVersion, ApplicationRepository.decideVersion(testerApp, Environment.perf, sessionVersion, false)); assertEquals(sessionVersion, ApplicationRepository.decideVersion(regularApp, Environment.prod, sessionVersion, false)); assertEquals(Vtag.currentVersion, ApplicationRepository.decideVersion(regularApp, Environment.dev, sessionVersion, false)); assertEquals(oldSessionVersion, ApplicationRepository.decideVersion(regularApp, Environment.dev, oldSessionVersion, false)); assertEquals(sessionVersion, ApplicationRepository.decideVersion(regularApp, Environment.dev, sessionVersion, true)); assertEquals(Vtag.currentVersion, ApplicationRepository.decideVersion(regularApp, Environment.perf, sessionVersion, false)); } @Test public void deleteUnusedFileReferences() throws IOException { File fileReferencesDir = temporaryFolder.newFolder(); File filereferenceDir = createFilereferenceOnDisk(new File(fileReferencesDir, "foo"), Instant.now().minus(Duration.ofDays(15))); File filereferenceDir2 = createFilereferenceOnDisk(new File(fileReferencesDir, "baz"), Instant.now()); tenantRepository.addTenant(tenant1); Provisioner provisioner = new SessionHandlerTest.MockProvisioner(); applicationRepository = new ApplicationRepository(tenantRepository, provisioner, orchestrator, clock); timeoutBudget = new TimeoutBudget(clock, Duration.ofSeconds(60)); PrepareParams prepareParams = new PrepareParams.Builder().applicationId(applicationId()).ignoreValidationErrors(true).build(); deployApp(new File("src/test/apps/app"), prepareParams); Set<String> toBeDeleted = applicationRepository.deleteUnusedFiledistributionReferences(fileReferencesDir); assertEquals(Collections.singleton("foo"), toBeDeleted); assertFalse(filereferenceDir.exists()); assertTrue(filereferenceDir2.exists()); } private File createFilereferenceOnDisk(File filereferenceDir, Instant lastModifiedTime) { assertTrue(filereferenceDir.mkdir()); File bar = new File(filereferenceDir, "file"); IOUtils.writeFile(bar, Utf8.toBytes("test")); assertTrue(filereferenceDir.setLastModified(lastModifiedTime.toEpochMilli())); return filereferenceDir; } @Test public void delete() { { PrepareResult result = deployApp(testApp); long sessionId = result.sessionId(); Tenant tenant = tenantRepository.getTenant(applicationId().tenant()); LocalSession applicationData = tenant.getLocalSessionRepo().getSession(sessionId); assertNotNull(applicationData); assertNotNull(applicationData.getApplicationId()); assertNotNull(tenant.getRemoteSessionRepo().getSession(sessionId)); assertNotNull(applicationRepository.getActiveSession(applicationId())); assertTrue(applicationRepository.delete(applicationId())); assertNull(applicationRepository.getActiveSession(applicationId())); assertNull(tenant.getLocalSessionRepo().getSession(sessionId)); assertNull(tenant.getRemoteSessionRepo().getSession(sessionId)); assertTrue(provisioner.removed); assertThat(provisioner.lastApplicationId.tenant(), is(tenant.getName())); assertThat(provisioner.lastApplicationId, is(applicationId())); assertFalse(applicationRepository.delete(applicationId())); } { deployApp(testApp); assertTrue(applicationRepository.delete(applicationId())); deployApp(testApp); ApplicationId fooId = applicationId(tenant2); PrepareParams prepareParams2 = new PrepareParams.Builder().applicationId(fooId).build(); deployApp(testApp, prepareParams2); assertNotNull(applicationRepository.getActiveSession(fooId)); assertTrue(applicationRepository.delete(fooId)); assertThat(provisioner.lastApplicationId, is(fooId)); assertNotNull(applicationRepository.getActiveSession(applicationId())); assertTrue(applicationRepository.delete(applicationId())); } } @Test public void testDeletingInactiveSessions() { ManualClock clock = new ManualClock(Instant.now()); ConfigserverConfig configserverConfig = new ConfigserverConfig(new ConfigserverConfig.Builder() .configServerDBDir(Files.createTempDir().getAbsolutePath()) .configDefinitionsDir(Files.createTempDir().getAbsolutePath()) .sessionLifetime(60)); DeployTester tester = new DeployTester(configserverConfig, clock); tester.deployApp("src/test/apps/app", clock.instant()); clock.advance(Duration.ofSeconds(10)); Optional<Deployment> deployment2 = tester.redeployFromLocalActive(); assertTrue(deployment2.isPresent()); deployment2.get().activate(); long activeSessionId = tester.tenant().getApplicationRepo().getSessionIdForApplication(tester.applicationId()); clock.advance(Duration.ofSeconds(10)); Optional<com.yahoo.config.provision.Deployment> deployment3 = tester.redeployFromLocalActive(); assertTrue(deployment3.isPresent()); deployment3.get().prepare(); LocalSession deployment3session = ((com.yahoo.vespa.config.server.deploy.Deployment) deployment3.get()).session(); assertNotEquals(activeSessionId, deployment3session); assertEquals(activeSessionId, tester.tenant().getApplicationRepo().getSessionIdForApplication(tester.applicationId())); assertEquals(3, tester.tenant().getLocalSessionRepo().listSessions().size()); clock.advance(Duration.ofHours(1)); tester.applicationRepository().deleteExpiredLocalSessions(); final Collection<LocalSession> sessions = tester.tenant().getLocalSessionRepo().listSessions(); assertEquals(1, sessions.size()); assertEquals(3, new ArrayList<>(sessions).get(0).getSessionId()); assertEquals(0, applicationRepository.deleteExpiredRemoteSessions(Duration.ofSeconds(0))); } private PrepareResult prepareAndActivateApp(File application) throws IOException { FilesApplicationPackage appDir = FilesApplicationPackage.fromFile(application); ApplicationId applicationId = applicationId(); long sessionId = applicationRepository.createSession(applicationId, timeoutBudget, appDir.getAppDir()); return applicationRepository.prepareAndActivate(tenantRepository.getTenant(applicationId.tenant()), sessionId, prepareParams(), false, Instant.now()); } private PrepareResult deployApp(File applicationPackage) { return deployApp(applicationPackage, prepareParams()); } private PrepareResult deployApp(File applicationPackage, PrepareParams prepareParams) { return applicationRepository.deploy(applicationPackage, prepareParams); } private PrepareParams prepareParams() { return new PrepareParams.Builder().applicationId(applicationId()).build(); } private ApplicationId applicationId() { return ApplicationId.from(tenant1, ApplicationName.from("testapp"), InstanceName.defaultName()); } private ApplicationId applicationId(TenantName tenantName) { return ApplicationId.from(tenantName, ApplicationName.from("testapp"), InstanceName.defaultName()); } private ApplicationMetaData getApplicationMetaData(ApplicationId applicationId, long sessionId) { Tenant tenant = tenantRepository.getTenant(applicationId.tenant()); return applicationRepository.getMetadataFromSession(tenant, sessionId); } }
Follows the same argument as remove() and merge(). The detailed tests should be moved to the tensor library. Only the integration in applyTo() should be tested here.
public void apply_modify_update_operations() { assertApplyTo("tensor(x{},y{})", Operation.REPLACE, "{{x:0,y:0}:1, {x:0,y:1}:2}", "{{x:0,y:1}:0}", "{{x:0,y:0}:1,{x:0,y:1}:0}"); assertApplyTo("tensor(x{},y{})", Operation.ADD, "{{x:0,y:0}:1, {x:0,y:1}:2}", "{{x:0,y:1}:3}", "{{x:0,y:0}:1,{x:0,y:1}:5}"); assertApplyTo("tensor(x{},y{})", Operation.MULTIPLY, "{{x:0,y:0}:3, {x:0,y:1}:2}", "{{x:0,y:1}:3}", "{{x:0,y:0}:3,{x:0,y:1}:6}"); assertApplyTo("tensor(x[1],y[2])", Operation.REPLACE, "{{x:0,y:0}:1, {x:0,y:1}:2}", "{{x:0,y:1}:0}", "{{x:0,y:0}:1,{x:0,y:1}:0}"); assertApplyTo("tensor(x[1],y[2])", Operation.ADD, "{{x:0,y:0}:1, {x:0,y:1}:2}", "{{x:0,y:1}:3}", "{{x:0,y:0}:1,{x:0,y:1}:5}"); assertApplyTo("tensor(x[1],y[2])", Operation.MULTIPLY, "{{x:0,y:0}:3, {x:0,y:1}:2}", "{{x:0,y:1}:3}", "{{x:0,y:0}:3,{x:0,y:1}:6}"); assertApplyTo("tensor(x{},y[2])", Operation.REPLACE, "{{x:0,y:0}:1, {x:0,y:1}:2}", "{{x:0,y:1}:0}", "{{x:0,y:0}:1,{x:0,y:1}:0}"); assertApplyTo("tensor(x{},y[2])", Operation.ADD, "{{x:0,y:0}:1, {x:0,y:1}:2}", "{{x:0,y:1}:3}", "{{x:0,y:0}:1,{x:0,y:1}:5}"); assertApplyTo("tensor(x{},y[2])", Operation.MULTIPLY, "{{x:0,y:0}:3, {x:0,y:1}:2}", "{{x:0,y:1}:3}", "{{x:0,y:0}:3,{x:0,y:1}:6}"); }
assertApplyTo("tensor(x{},y[2])", Operation.REPLACE,
public void apply_modify_update_operations() { assertApplyTo("tensor(x{},y{})", Operation.REPLACE, "{{x:0,y:0}:1, {x:0,y:1}:2}", "{{x:0,y:1}:0}", "{{x:0,y:0}:1,{x:0,y:1}:0}"); assertApplyTo("tensor(x[1],y[2])", Operation.ADD, "{{x:0,y:0}:1, {x:0,y:1}:2}", "{{x:0,y:1}:3}", "{{x:0,y:0}:1,{x:0,y:1}:5}"); assertApplyTo("tensor(x{},y[2])", Operation.MULTIPLY, "{{x:0,y:0}:3, {x:0,y:1}:2}", "{{x:0,y:1}:3}", "{{x:0,y:0}:3,{x:0,y:1}:6}"); }
class TensorModifyUpdateTest { @Rule public ExpectedException exception = ExpectedException.none(); @Test public void convert_to_compatible_type_with_only_mapped_dimensions() { assertConvertToCompatible("tensor(x{})", "tensor(x[])"); assertConvertToCompatible("tensor(x{})", "tensor(x[10])"); assertConvertToCompatible("tensor(x{})", "tensor(x{})"); assertConvertToCompatible("tensor(x{},y{},z{})", "tensor(x[],y[10],z{})"); assertConvertToCompatible("tensor(x{},y{})", "tensor(x{},y[3])"); } private static void assertConvertToCompatible(String expectedType, String inputType) { assertEquals(expectedType, TensorModifyUpdate.convertDimensionsToMapped(TensorType.fromSpec(inputType)).toString()); } @Test public void use_of_incompatible_tensor_type_throws() { exception.expect(IllegalArgumentException.class); exception.expectMessage("Tensor type 'tensor(x[3])' is not compatible as it contains some indexed dimensions"); new TensorModifyUpdate(TensorModifyUpdate.Operation.REPLACE, new TensorFieldValue(Tensor.from("tensor(x[3])", "{{x:1}:3}"))); } @Test private void assertApplyTo(String spec, Operation op, String init, String update, String expected) { TensorFieldValue initialFieldValue = new TensorFieldValue(Tensor.from(spec, init)); TensorModifyUpdate modifyUpdate = new TensorModifyUpdate(op, new TensorFieldValue(Tensor.from("tensor(x{},y{})", update))); TensorFieldValue updatedFieldValue = (TensorFieldValue) modifyUpdate.applyTo(initialFieldValue); assertEquals(Tensor.from(spec, expected), updatedFieldValue.getTensor().get()); } }
class TensorModifyUpdateTest { @Rule public ExpectedException exception = ExpectedException.none(); @Test public void convert_to_compatible_type_with_only_mapped_dimensions() { assertConvertToCompatible("tensor(x{})", "tensor(x[])"); assertConvertToCompatible("tensor(x{})", "tensor(x[10])"); assertConvertToCompatible("tensor(x{})", "tensor(x{})"); assertConvertToCompatible("tensor(x{},y{},z{})", "tensor(x[],y[10],z{})"); assertConvertToCompatible("tensor(x{},y{})", "tensor(x{},y[3])"); } private static void assertConvertToCompatible(String expectedType, String inputType) { assertEquals(expectedType, TensorModifyUpdate.convertDimensionsToMapped(TensorType.fromSpec(inputType)).toString()); } @Test public void use_of_incompatible_tensor_type_throws() { exception.expect(IllegalArgumentException.class); exception.expectMessage("Tensor type 'tensor(x[3])' is not compatible as it contains some indexed dimensions"); new TensorModifyUpdate(TensorModifyUpdate.Operation.REPLACE, new TensorFieldValue(Tensor.from("tensor(x[3])", "{{x:1}:3}"))); } @Test private void assertApplyTo(String spec, Operation op, String init, String update, String expected) { TensorFieldValue initialFieldValue = new TensorFieldValue(Tensor.from(spec, init)); TensorModifyUpdate modifyUpdate = new TensorModifyUpdate(op, new TensorFieldValue(Tensor.from("tensor(x{},y{})", update))); TensorFieldValue updatedFieldValue = (TensorFieldValue) modifyUpdate.applyTo(initialFieldValue); assertEquals(Tensor.from(spec, expected), updatedFieldValue.getTensor().get()); } }
This seems to be something closer to an `Event`? Consider renaming. That would also make for nicer signatures imo, e.g. `Notifications#emailAddressesFor(Event)`.
private void sendNotification(Run run, DualLogger logger) { Application application = controller.applications().require(run.id().application()); Notifications notifications = application.deploymentSpec().notifications(); boolean newCommit = application.change().application() .map(run.versions().targetApplication()::equals) .orElse(false); When when = newCommit ? failingCommit : failing; List<String> recipients = new ArrayList<>(notifications.emailAddressesFor(when)); if (notifications.emailRolesFor(when).contains(author)) run.versions().targetApplication().authorEmail().ifPresent(recipients::add); if (recipients.isEmpty()) return; try { if (run.status() == outOfCapacity && run.id().type().isProduction()) controller.mailer().send(mails.outOfCapacity(run.id(), recipients)); if (run.status() == deploymentFailed) controller.mailer().send(mails.deploymentFailure(run.id(), recipients)); if (run.status() == installationFailed) controller.mailer().send(mails.installationFailure(run.id(), recipients)); if (run.status() == testFailure) controller.mailer().send(mails.testFailure(run.id(), recipients)); if (run.status() == error) controller.mailer().send(mails.systemError(run.id(), recipients)); } catch (RuntimeException e) { logger.log(INFO, "Exception trying to send mail for " + run.id(), e); } }
When when = newCommit ? failingCommit : failing;
private void sendNotification(Run run, DualLogger logger) { Application application = controller.applications().require(run.id().application()); Notifications notifications = application.deploymentSpec().notifications(); boolean newCommit = application.change().application() .map(run.versions().targetApplication()::equals) .orElse(false); When when = newCommit ? failingCommit : failing; List<String> recipients = new ArrayList<>(notifications.emailAddressesFor(when)); if (notifications.emailRolesFor(when).contains(author)) run.versions().targetApplication().authorEmail().ifPresent(recipients::add); if (recipients.isEmpty()) return; try { if (run.status() == outOfCapacity && run.id().type().isProduction()) controller.mailer().send(mails.outOfCapacity(run.id(), recipients)); if (run.status() == deploymentFailed) controller.mailer().send(mails.deploymentFailure(run.id(), recipients)); if (run.status() == installationFailed) controller.mailer().send(mails.installationFailure(run.id(), recipients)); if (run.status() == testFailure) controller.mailer().send(mails.testFailure(run.id(), recipients)); if (run.status() == error) controller.mailer().send(mails.systemError(run.id(), recipients)); } catch (RuntimeException e) { logger.log(INFO, "Exception trying to send mail for " + run.id(), e); } }
class InternalStepRunner implements StepRunner { private static final Logger logger = Logger.getLogger(InternalStepRunner.class.getName()); static final Duration endpointTimeout = Duration.ofMinutes(15); static final Duration installationTimeout = Duration.ofMinutes(150); private final Controller controller; private final DeploymentFailureMails mails; public InternalStepRunner(Controller controller) { this.controller = controller; this.mails = new DeploymentFailureMails(controller.zoneRegistry()); } @Override public Optional<RunStatus> run(LockedStep step, RunId id) { DualLogger logger = new DualLogger(id, step.get()); try { switch (step.get()) { case deployInitialReal: return deployInitialReal(id, logger); case installInitialReal: return installInitialReal(id, logger); case deployReal: return deployReal(id, logger); case deployTester: return deployTester(id, logger); case installReal: return installReal(id, logger); case installTester: return installTester(id, logger); case startTests: return startTests(id, logger); case endTests: return endTests(id, logger); case deactivateReal: return deactivateReal(id, logger); case deactivateTester: return deactivateTester(id, logger); case report: return report(id, logger); default: throw new AssertionError("Unknown step '" + step + "'!"); } } catch (UncheckedIOException e) { logger.log(INFO, "IO exception running " + id + ": " + Exceptions.toMessageString(e)); return Optional.empty(); } catch (RuntimeException e) { logger.log(WARNING, "Unexpected exception running " + id, e); if (JobProfile.of(id.type()).alwaysRun().contains(step.get())) { logger.log("Will keep trying, as this is a cleanup step."); return Optional.empty(); } return Optional.of(error); } } private Optional<RunStatus> deployInitialReal(RunId id, DualLogger logger) { Versions versions = controller.jobController().run(id).get().versions(); logger.log("Deploying platform version " + versions.sourcePlatform().orElse(versions.targetPlatform()) + " and application version " + versions.sourceApplication().orElse(versions.targetApplication()).id() + " ..."); return deployReal(id, true, logger); } private Optional<RunStatus> deployReal(RunId id, DualLogger logger) { Versions versions = controller.jobController().run(id).get().versions(); logger.log("Deploying platform version " + versions.targetPlatform() + " and application version " + versions.targetApplication().id() + " ..."); return deployReal(id, false, logger); } private Optional<RunStatus> deployReal(RunId id, boolean setTheStage, DualLogger logger) { return deploy(id.application(), id.type(), () -> controller.applications().deploy(id.application(), id.type().zone(controller.system()), Optional.empty(), new DeployOptions(false, Optional.empty(), false, setTheStage)), logger); } private Optional<RunStatus> deployTester(RunId id, DualLogger logger) { Version platform = controller.jobController().run(id).get().versions().targetPlatform(); logger.log("Deploying the tester container on platform " + platform + " ..."); return deploy(id.tester().id(), id.type(), () -> controller.applications().deployTester(id.tester(), testerPackage(id), id.type().zone(controller.system()), new DeployOptions(true, Optional.of(platform), false, false)), logger); } private Optional<RunStatus> deploy(ApplicationId id, JobType type, Supplier<ActivateResult> deployment, DualLogger logger) { try { PrepareResponse prepareResponse = deployment.get().prepareResponse(); if ( ! prepareResponse.configChangeActions.refeedActions.stream().allMatch(action -> action.allowed)) { List<String> messages = new ArrayList<>(); messages.add("Deploy failed due to non-compatible changes that require re-feed."); messages.add("Your options are:"); messages.add("1. Revert the incompatible changes."); messages.add("2. If you think it is safe in your case, you can override this validation, see"); messages.add(" http: messages.add("3. Deploy as a new application under a different name."); messages.add("Illegal actions:"); prepareResponse.configChangeActions.refeedActions.stream() .filter(action -> ! action.allowed) .flatMap(action -> action.messages.stream()) .forEach(messages::add); messages.add("Details:"); prepareResponse.log.stream() .map(entry -> entry.message) .forEach(messages::add); logger.log(messages); return Optional.of(deploymentFailed); } if (prepareResponse.configChangeActions.restartActions.isEmpty()) logger.log("No services requiring restart."); else prepareResponse.configChangeActions.restartActions.stream() .flatMap(action -> action.services.stream()) .map(service -> service.hostName) .sorted().distinct() .map(Hostname::new) .forEach(hostname -> { controller.applications().restart(new DeploymentId(id, type.zone(controller.system())), Optional.of(hostname)); logger.log("Restarting services on host " + hostname.id() + "."); }); logger.log("Deployment successful."); return Optional.of(running); } catch (ConfigServerException e) { if ( e.getErrorCode() == OUT_OF_CAPACITY && type.isTest() || e.getErrorCode() == ACTIVATION_CONFLICT || e.getErrorCode() == APPLICATION_LOCK_FAILURE || e.getErrorCode() == PARENT_HOST_NOT_READY) { logger.log("Will retry, because of '" + e.getErrorCode() + "' deploying:\n" + e.getMessage()); return Optional.empty(); } if ( e.getErrorCode() == INVALID_APPLICATION_PACKAGE || e.getErrorCode() == BAD_REQUEST) { logger.log("Deployment failed: " + e.getMessage()); return Optional.of(deploymentFailed); } throw e; } } private Optional<RunStatus> installInitialReal(RunId id, DualLogger logger) { return installReal(id, true, logger); } private Optional<RunStatus> installReal(RunId id, DualLogger logger) { return installReal(id, false, logger); } private Optional<RunStatus> installReal(RunId id, boolean setTheStage, DualLogger logger) { Optional<Deployment> deployment = deployment(id.application(), id.type()); if ( ! deployment.isPresent()) { logger.log(INFO, "Deployment expired before installation was successful."); return Optional.of(installationFailed); } Versions versions = controller.jobController().run(id).get().versions(); Version platform = setTheStage ? versions.sourcePlatform().orElse(versions.targetPlatform()) : versions.targetPlatform(); ApplicationVersion application = setTheStage ? versions.sourceApplication().orElse(versions.targetApplication()) : versions.targetApplication(); logger.log("Checking installation of " + platform + " and " + application.id() + " ..."); if ( nodesConverged(id.application(), id.type(), platform, logger) && servicesConverged(id.application(), id.type(), logger)) { logger.log("Installation succeeded!"); return Optional.of(running); } if (timedOut(deployment.get(), installationTimeout)) { logger.log(INFO, "Installation failed to complete within " + installationTimeout.toMinutes() + " minutes!"); return Optional.of(installationFailed); } logger.log("Installation not yet complete."); return Optional.empty(); } private Optional<RunStatus> installTester(RunId id, DualLogger logger) { Optional<Deployment> deployment = deployment(id.application(), id.type()); if ( ! deployment.isPresent()) { logger.log(WARNING, "Deployment expired before installation of tester was successful."); return Optional.of(error); } Version platform = controller.jobController().run(id).get().versions().targetPlatform(); logger.log("Checking installation of tester container ..."); if ( nodesConverged(id.tester().id(), id.type(), platform, logger) && servicesConverged(id.tester().id(), id.type(), logger)) { logger.log("Tester container successfully installed!"); return Optional.of(running); } if (timedOut(deployment.get(), installationTimeout)) { logger.log(WARNING, "Installation of tester failed to complete within " + installationTimeout.toMinutes() + " minutes of real deployment!"); return Optional.of(error); } logger.log("Installation of tester not yet complete."); return Optional.empty(); } private boolean nodesConverged(ApplicationId id, JobType type, Version target, DualLogger logger) { List<Node> nodes = controller.configServer().nodeRepository().list(type.zone(controller.system()), id, ImmutableSet.of(active, reserved)); List<String> statuses = nodes.stream() .map(node -> String.format("%70s: %-16s%-25s%-32s%s", node.hostname(), node.serviceState(), node.wantedVersion() + (node.currentVersion().equals(node.wantedVersion()) ? "" : " <-- " + node.currentVersion()), node.restartGeneration() >= node.wantedRestartGeneration() ? "" : "restart pending (" + node.wantedRestartGeneration() + " <-- " + node.restartGeneration() + ")", node.rebootGeneration() >= node.wantedRebootGeneration() ? "" : "reboot pending (" + node.wantedRebootGeneration() + " <-- " + node.rebootGeneration() + ")")) .collect(Collectors.toList()); logger.log(statuses); return nodes.stream().allMatch(node -> node.currentVersion().equals(target) && node.restartGeneration() >= node.wantedRestartGeneration() && node.rebootGeneration() >= node.wantedRebootGeneration()); } private boolean servicesConverged(ApplicationId id, JobType type, DualLogger logger) { Optional<ServiceConvergence> convergence = controller.configServer().serviceConvergence(new DeploymentId(id, type.zone(controller.system()))); if ( ! convergence.isPresent()) { logger.log("Config status not currently available -- will retry."); return false; } logger.log("Wanted config generation is " + convergence.get().wantedGeneration()); List<String> statuses = convergence.get().services().stream() .filter(serviceStatus -> serviceStatus.currentGeneration() != convergence.get().wantedGeneration()) .map(serviceStatus -> String.format("%70s: %11s on port %4d has %s", serviceStatus.host().value(), serviceStatus.type(), serviceStatus.port(), serviceStatus.currentGeneration() == -1 ? "not started!" : Long.toString(serviceStatus.currentGeneration()))) .collect(Collectors.toList()); logger.log(statuses); return convergence.get().converged(); } private Optional<RunStatus> startTests(RunId id, DualLogger logger) { Optional<Deployment> deployment = deployment(id.application(), id.type()); if ( ! deployment.isPresent()) { logger.log(INFO, "Deployment expired before tests could start."); return Optional.of(aborted); } Set<ZoneId> zones = testedZoneAndProductionZones(id); logger.log("Attempting to find endpoints ..."); Map<ZoneId, List<URI>> endpoints = deploymentEndpoints(id.application(), zones); List<String> messages = new ArrayList<>(); messages.add("Found endpoints"); endpoints.forEach((zone, uris) -> { messages.add("- " + zone); uris.forEach(uri -> messages.add(" |-- " + uri)); }); logger.log(messages); if ( ! endpoints.containsKey(id.type().zone(controller.system()))) { if (timedOut(deployment.get(), endpointTimeout)) { logger.log(WARNING, "Endpoints failed to show up within " + endpointTimeout.toMinutes() + " minutes!"); return Optional.of(error); } logger.log("Endpoints for the deployment to test are not yet ready."); return Optional.empty(); } Map<ZoneId, List<String>> clusters = listClusters(id.application(), zones); Optional<URI> testerEndpoint = controller.jobController().testerEndpoint(id); if (testerEndpoint.isPresent() && controller.jobController().cloud().ready(testerEndpoint.get())) { logger.log("Starting tests ..."); controller.jobController().cloud().startTests(testerEndpoint.get(), TesterCloud.Suite.of(id.type()), testConfig(id.application(), id.type().zone(controller.system()), controller.system(), endpoints, clusters)); return Optional.of(running); } if (timedOut(deployment.get(), endpointTimeout)) { logger.log(WARNING, "Endpoint for tester failed to show up within " + endpointTimeout.toMinutes() + " minutes of real deployment!"); return Optional.of(error); } logger.log("Endpoints of tester container not yet available."); return Optional.empty(); } private Optional<RunStatus> endTests(RunId id, DualLogger logger) { if ( ! deployment(id.application(), id.type()).isPresent()) { logger.log(INFO, "Deployment expired before tests could complete."); return Optional.of(aborted); } Optional<URI> testerEndpoint = controller.jobController().testerEndpoint(id); if ( ! testerEndpoint.isPresent()) { logger.log("Endpoints for tester not found -- trying again later."); return Optional.empty(); } controller.jobController().updateTestLog(id); RunStatus status; TesterCloud.Status testStatus = controller.jobController().cloud().getStatus(testerEndpoint.get()); switch (testStatus) { case NOT_STARTED: throw new IllegalStateException("Tester reports tests not started, even though they should have!"); case RUNNING: return Optional.empty(); case FAILURE: logger.log("Tests failed."); status = testFailure; break; case ERROR: logger.log(INFO, "Tester failed running its tests!"); status = error; break; case SUCCESS: logger.log("Tests completed successfully."); status = running; break; default: throw new IllegalStateException("Unknown status '" + testStatus + "'!"); } return Optional.of(status); } private Optional<RunStatus> deactivateReal(RunId id, DualLogger logger) { logger.log("Deactivating deployment of " + id.application() + " in " + id.type().zone(controller.system()) + " ..."); controller.applications().deactivate(id.application(), id.type().zone(controller.system())); return Optional.of(running); } private Optional<RunStatus> deactivateTester(RunId id, DualLogger logger) { logger.log("Deactivating tester of " + id.application() + " in " + id.type().zone(controller.system()) + " ..."); controller.jobController().deactivateTester(id.tester(), id.type()); return Optional.of(running); } private Optional<RunStatus> report(RunId id, DualLogger logger) { try { controller.jobController().active(id).ifPresent(run -> { JobReport report = JobReport.ofJob(run.id().application(), run.id().type(), run.id().number(), run.hasFailed() ? Optional.of(DeploymentJobs.JobError.unknown) : Optional.empty()); controller.applications().deploymentTrigger().notifyOfCompletion(report); if (run.hasFailed()) sendNotification(run, logger); }); } catch (IllegalStateException e) { logger.log(INFO, "Job '" + id.type() + "'no longer supposed to run?:", e); } return Optional.of(running); } /** Sends a mail with a notification of a failed run, if one should be sent. */ /** Returns the deployment of the real application in the zone of the given job, if it exists. */ private Optional<Deployment> deployment(ApplicationId id, JobType type) { return Optional.ofNullable(application(id).deployments().get(type.zone(controller.system()))); } /** Returns the real application with the given id. */ private Application application(ApplicationId id) { return controller.applications().require(id); } /** Returns whether the time elapsed since the last real deployment in the given zone is more than the given timeout. */ private boolean timedOut(Deployment deployment, Duration timeout) { return deployment.at().isBefore(controller.clock().instant().minus(timeout)); } /** Returns the application package for the tester application, assembled from a generated config, fat-jar and services.xml. */ private ApplicationPackage testerPackage(RunId id) { ApplicationVersion version = controller.jobController().run(id).get().versions().targetApplication(); byte[] testPackage = controller.applications().applicationStore().get(id.tester(), version); byte[] servicesXml = servicesXml(controller.system()); DeploymentSpec spec = controller.applications().require(id.application()).deploymentSpec(); ZoneId zone = id.type().zone(controller.system()); byte[] deploymentXml = deploymentXml(spec.athenzDomain(), spec.athenzService(zone.environment(), zone.region())); try (ZipBuilder zipBuilder = new ZipBuilder(testPackage.length + servicesXml.length + 1000)) { zipBuilder.add(testPackage); zipBuilder.add("services.xml", servicesXml); zipBuilder.add("deployment.xml", deploymentXml); zipBuilder.close(); return new ApplicationPackage(zipBuilder.toByteArray()); } } /** Returns a stream containing the zone of the deployment tested in the given run, and all production zones for the application. */ private Set<ZoneId> testedZoneAndProductionZones(RunId id) { return Stream.concat(Stream.of(id.type().zone(controller.system())), application(id.application()).productionDeployments().keySet().stream()) .collect(Collectors.toSet()); } /** Returns all endpoints for all current deployments of the given real application. */ private Map<ZoneId, List<URI>> deploymentEndpoints(ApplicationId id, Iterable<ZoneId> zones) { ImmutableMap.Builder<ZoneId, List<URI>> deployments = ImmutableMap.builder(); for (ZoneId zone : zones) controller.applications().getDeploymentEndpoints(new DeploymentId(id, zone)) .filter(endpoints -> ! endpoints.isEmpty()) .ifPresent(endpoints -> deployments.put(zone, endpoints)); return deployments.build(); } /** Returns all content clusters in all current deployments of the given real application. */ private Map<ZoneId, List<String>> listClusters(ApplicationId id, Iterable<ZoneId> zones) { ImmutableMap.Builder<ZoneId, List<String>> clusters = ImmutableMap.builder(); for (ZoneId zone : zones) clusters.put(zone, ImmutableList.copyOf(controller.configServer().getContentClusters(new DeploymentId(id, zone)))); return clusters.build(); } /** Returns the generated services.xml content for the tester application. */ static byte[] servicesXml(SystemName systemName) { String domain = systemName == SystemName.main ? "vespa.vespa" : "vespa.vespa.cd"; String servicesXml = "<?xml version='1.0' encoding='UTF-8'?>\n" + "<services xmlns:deploy='vespa' version='1.0'>\n" + " <container version='1.0' id='default'>\n" + "\n" + " <component id=\"com.yahoo.vespa.hosted.testrunner.TestRunner\" bundle=\"vespa-testrunner-components\">\n" + " <config name=\"com.yahoo.vespa.hosted.testrunner.test-runner\">\n" + " <artifactsPath>artifacts</artifactsPath>\n" + " </config>\n" + " </component>\n" + "\n" + " <handler id=\"com.yahoo.vespa.hosted.testrunner.TestRunnerHandler\" bundle=\"vespa-testrunner-components\">\n" + " <binding>http: " </handler>\n" + "\n" + " <http>\n" + " <server id='default' port='4080'/>\n" + " <filtering>\n" + " <access-control domain='" + domain + "'>\n" + " <exclude>\n" + " <binding>http: " </exclude>\n" + " </access-control>\n" + " <request-chain id=\"testrunner-api\">\n" + " <filter id='authz-filter' class='com.yahoo.jdisc.http.filter.security.athenz.AthenzAuthorizationFilter' bundle=\"jdisc-security-filters\">\n" + " <config name=\"jdisc.http.filter.security.athenz.athenz-authorization-filter\">\n" + " <credentialsToVerify>TOKEN_ONLY</credentialsToVerify>\n" + " <roleTokenHeaderName>Yahoo-Role-Auth</roleTokenHeaderName>\n" + " </config>\n" + " <component id=\"com.yahoo.jdisc.http.filter.security.athenz.StaticRequestResourceMapper\" bundle=\"jdisc-security-filters\">\n" + " <config name=\"jdisc.http.filter.security.athenz.static-request-resource-mapper\">\n" + " <resourceName>" + domain + ":tester-application</resourceName>\n" + " <action>deploy</action>\n" + " </config>\n" + " </component>\n" + " </filter>\n" + " </request-chain>\n" + " </filtering>\n" + " </http>\n" + "\n" + " <nodes count=\"1\" flavor=\"d-1-4-50\" />\n" + " </container>\n" + "</services>\n"; return servicesXml.getBytes(); } /** Returns a dummy deployment xml which sets up the service identity for the tester, if present. */ private static byte[] deploymentXml(Optional<AthenzDomain> athenzDomain, Optional<AthenzService> athenzService) { String deploymentSpec = "<?xml version='1.0' encoding='UTF-8'?>\n" + "<deployment version=\"1.0\" " + athenzDomain.map(domain -> "athenz-domain=\"" + domain.value() + "\" ").orElse("") + athenzService.map(service -> "athenz-service=\"" + service.value() + "\" ").orElse("") + "/>"; return deploymentSpec.getBytes(StandardCharsets.UTF_8); } /** Returns the config for the tests to run for the given job. */ private static byte[] testConfig(ApplicationId id, ZoneId testerZone, SystemName system, Map<ZoneId, List<URI>> deployments, Map<ZoneId, List<String>> clusters) { Slime slime = new Slime(); Cursor root = slime.setObject(); root.setString("application", id.serializedForm()); root.setString("zone", testerZone.value()); root.setString("system", system.name()); Cursor endpointsObject = root.setObject("endpoints"); deployments.forEach((zone, endpoints) -> { Cursor endpointArray = endpointsObject.setArray(zone.value()); for (URI endpoint : endpoints) endpointArray.addString(endpoint.toString()); }); Cursor clustersObject = root.setObject("clusters"); clusters.forEach((zone, clusterList) -> { Cursor clusterArray = clustersObject.setArray(zone.value()); for (String cluster : clusterList) clusterArray.addString(cluster); }); try { return SlimeUtils.toJsonBytes(slime); } catch (IOException e) { throw new UncheckedIOException(e); } } /** Logger which logs to a {@link JobController}, as well as to the parent class' {@link Logger}. */ private class DualLogger { private final RunId id; private final Step step; private DualLogger(RunId id, Step step) { this.id = id; this.step = step; } private void log(String... messages) { log(List.of(messages)); } private void log(List<String> messages) { controller.jobController().log(id, step, DEBUG, messages); } private void log(Level level, String message) { log(level, message, null); } private void log(Level level, String message, Throwable thrown) { logger.log(level, id + " at " + step + ": " + message, thrown); if (thrown != null) { ByteArrayOutputStream traceBuffer = new ByteArrayOutputStream(); thrown.printStackTrace(new PrintStream(traceBuffer)); message += "\n" + traceBuffer; } controller.jobController().log(id, step, level, message); } } }
class InternalStepRunner implements StepRunner { private static final Logger logger = Logger.getLogger(InternalStepRunner.class.getName()); static final Duration endpointTimeout = Duration.ofMinutes(15); static final Duration installationTimeout = Duration.ofMinutes(150); private final Controller controller; private final DeploymentFailureMails mails; public InternalStepRunner(Controller controller) { this.controller = controller; this.mails = new DeploymentFailureMails(controller.zoneRegistry()); } @Override public Optional<RunStatus> run(LockedStep step, RunId id) { DualLogger logger = new DualLogger(id, step.get()); try { switch (step.get()) { case deployInitialReal: return deployInitialReal(id, logger); case installInitialReal: return installInitialReal(id, logger); case deployReal: return deployReal(id, logger); case deployTester: return deployTester(id, logger); case installReal: return installReal(id, logger); case installTester: return installTester(id, logger); case startTests: return startTests(id, logger); case endTests: return endTests(id, logger); case deactivateReal: return deactivateReal(id, logger); case deactivateTester: return deactivateTester(id, logger); case report: return report(id, logger); default: throw new AssertionError("Unknown step '" + step + "'!"); } } catch (UncheckedIOException e) { logger.log(INFO, "IO exception running " + id + ": " + Exceptions.toMessageString(e)); return Optional.empty(); } catch (RuntimeException e) { logger.log(WARNING, "Unexpected exception running " + id, e); if (JobProfile.of(id.type()).alwaysRun().contains(step.get())) { logger.log("Will keep trying, as this is a cleanup step."); return Optional.empty(); } return Optional.of(error); } } private Optional<RunStatus> deployInitialReal(RunId id, DualLogger logger) { Versions versions = controller.jobController().run(id).get().versions(); logger.log("Deploying platform version " + versions.sourcePlatform().orElse(versions.targetPlatform()) + " and application version " + versions.sourceApplication().orElse(versions.targetApplication()).id() + " ..."); return deployReal(id, true, logger); } private Optional<RunStatus> deployReal(RunId id, DualLogger logger) { Versions versions = controller.jobController().run(id).get().versions(); logger.log("Deploying platform version " + versions.targetPlatform() + " and application version " + versions.targetApplication().id() + " ..."); return deployReal(id, false, logger); } private Optional<RunStatus> deployReal(RunId id, boolean setTheStage, DualLogger logger) { return deploy(id.application(), id.type(), () -> controller.applications().deploy(id.application(), id.type().zone(controller.system()), Optional.empty(), new DeployOptions(false, Optional.empty(), false, setTheStage)), logger); } private Optional<RunStatus> deployTester(RunId id, DualLogger logger) { Version platform = controller.jobController().run(id).get().versions().targetPlatform(); logger.log("Deploying the tester container on platform " + platform + " ..."); return deploy(id.tester().id(), id.type(), () -> controller.applications().deployTester(id.tester(), testerPackage(id), id.type().zone(controller.system()), new DeployOptions(true, Optional.of(platform), false, false)), logger); } private Optional<RunStatus> deploy(ApplicationId id, JobType type, Supplier<ActivateResult> deployment, DualLogger logger) { try { PrepareResponse prepareResponse = deployment.get().prepareResponse(); if ( ! prepareResponse.configChangeActions.refeedActions.stream().allMatch(action -> action.allowed)) { List<String> messages = new ArrayList<>(); messages.add("Deploy failed due to non-compatible changes that require re-feed."); messages.add("Your options are:"); messages.add("1. Revert the incompatible changes."); messages.add("2. If you think it is safe in your case, you can override this validation, see"); messages.add(" http: messages.add("3. Deploy as a new application under a different name."); messages.add("Illegal actions:"); prepareResponse.configChangeActions.refeedActions.stream() .filter(action -> ! action.allowed) .flatMap(action -> action.messages.stream()) .forEach(messages::add); messages.add("Details:"); prepareResponse.log.stream() .map(entry -> entry.message) .forEach(messages::add); logger.log(messages); return Optional.of(deploymentFailed); } if (prepareResponse.configChangeActions.restartActions.isEmpty()) logger.log("No services requiring restart."); else prepareResponse.configChangeActions.restartActions.stream() .flatMap(action -> action.services.stream()) .map(service -> service.hostName) .sorted().distinct() .map(Hostname::new) .forEach(hostname -> { controller.applications().restart(new DeploymentId(id, type.zone(controller.system())), Optional.of(hostname)); logger.log("Restarting services on host " + hostname.id() + "."); }); logger.log("Deployment successful."); return Optional.of(running); } catch (ConfigServerException e) { if ( e.getErrorCode() == OUT_OF_CAPACITY && type.isTest() || e.getErrorCode() == ACTIVATION_CONFLICT || e.getErrorCode() == APPLICATION_LOCK_FAILURE || e.getErrorCode() == PARENT_HOST_NOT_READY) { logger.log("Will retry, because of '" + e.getErrorCode() + "' deploying:\n" + e.getMessage()); return Optional.empty(); } if ( e.getErrorCode() == INVALID_APPLICATION_PACKAGE || e.getErrorCode() == BAD_REQUEST) { logger.log("Deployment failed: " + e.getMessage()); return Optional.of(deploymentFailed); } throw e; } } private Optional<RunStatus> installInitialReal(RunId id, DualLogger logger) { return installReal(id, true, logger); } private Optional<RunStatus> installReal(RunId id, DualLogger logger) { return installReal(id, false, logger); } private Optional<RunStatus> installReal(RunId id, boolean setTheStage, DualLogger logger) { Optional<Deployment> deployment = deployment(id.application(), id.type()); if ( ! deployment.isPresent()) { logger.log(INFO, "Deployment expired before installation was successful."); return Optional.of(installationFailed); } Versions versions = controller.jobController().run(id).get().versions(); Version platform = setTheStage ? versions.sourcePlatform().orElse(versions.targetPlatform()) : versions.targetPlatform(); ApplicationVersion application = setTheStage ? versions.sourceApplication().orElse(versions.targetApplication()) : versions.targetApplication(); logger.log("Checking installation of " + platform + " and " + application.id() + " ..."); if ( nodesConverged(id.application(), id.type(), platform, logger) && servicesConverged(id.application(), id.type(), logger)) { logger.log("Installation succeeded!"); return Optional.of(running); } if (timedOut(deployment.get(), installationTimeout)) { logger.log(INFO, "Installation failed to complete within " + installationTimeout.toMinutes() + " minutes!"); return Optional.of(installationFailed); } logger.log("Installation not yet complete."); return Optional.empty(); } private Optional<RunStatus> installTester(RunId id, DualLogger logger) { Optional<Deployment> deployment = deployment(id.application(), id.type()); if ( ! deployment.isPresent()) { logger.log(WARNING, "Deployment expired before installation of tester was successful."); return Optional.of(error); } Version platform = controller.jobController().run(id).get().versions().targetPlatform(); logger.log("Checking installation of tester container ..."); if ( nodesConverged(id.tester().id(), id.type(), platform, logger) && servicesConverged(id.tester().id(), id.type(), logger)) { logger.log("Tester container successfully installed!"); return Optional.of(running); } if (timedOut(deployment.get(), installationTimeout)) { logger.log(WARNING, "Installation of tester failed to complete within " + installationTimeout.toMinutes() + " minutes of real deployment!"); return Optional.of(error); } logger.log("Installation of tester not yet complete."); return Optional.empty(); } private boolean nodesConverged(ApplicationId id, JobType type, Version target, DualLogger logger) { List<Node> nodes = controller.configServer().nodeRepository().list(type.zone(controller.system()), id, ImmutableSet.of(active, reserved)); List<String> statuses = nodes.stream() .map(node -> String.format("%70s: %-16s%-25s%-32s%s", node.hostname(), node.serviceState(), node.wantedVersion() + (node.currentVersion().equals(node.wantedVersion()) ? "" : " <-- " + node.currentVersion()), node.restartGeneration() >= node.wantedRestartGeneration() ? "" : "restart pending (" + node.wantedRestartGeneration() + " <-- " + node.restartGeneration() + ")", node.rebootGeneration() >= node.wantedRebootGeneration() ? "" : "reboot pending (" + node.wantedRebootGeneration() + " <-- " + node.rebootGeneration() + ")")) .collect(Collectors.toList()); logger.log(statuses); return nodes.stream().allMatch(node -> node.currentVersion().equals(target) && node.restartGeneration() >= node.wantedRestartGeneration() && node.rebootGeneration() >= node.wantedRebootGeneration()); } private boolean servicesConverged(ApplicationId id, JobType type, DualLogger logger) { Optional<ServiceConvergence> convergence = controller.configServer().serviceConvergence(new DeploymentId(id, type.zone(controller.system()))); if ( ! convergence.isPresent()) { logger.log("Config status not currently available -- will retry."); return false; } logger.log("Wanted config generation is " + convergence.get().wantedGeneration()); List<String> statuses = convergence.get().services().stream() .filter(serviceStatus -> serviceStatus.currentGeneration() != convergence.get().wantedGeneration()) .map(serviceStatus -> String.format("%70s: %11s on port %4d has %s", serviceStatus.host().value(), serviceStatus.type(), serviceStatus.port(), serviceStatus.currentGeneration() == -1 ? "not started!" : Long.toString(serviceStatus.currentGeneration()))) .collect(Collectors.toList()); logger.log(statuses); return convergence.get().converged(); } private Optional<RunStatus> startTests(RunId id, DualLogger logger) { Optional<Deployment> deployment = deployment(id.application(), id.type()); if ( ! deployment.isPresent()) { logger.log(INFO, "Deployment expired before tests could start."); return Optional.of(aborted); } Set<ZoneId> zones = testedZoneAndProductionZones(id); logger.log("Attempting to find endpoints ..."); Map<ZoneId, List<URI>> endpoints = deploymentEndpoints(id.application(), zones); List<String> messages = new ArrayList<>(); messages.add("Found endpoints"); endpoints.forEach((zone, uris) -> { messages.add("- " + zone); uris.forEach(uri -> messages.add(" |-- " + uri)); }); logger.log(messages); if ( ! endpoints.containsKey(id.type().zone(controller.system()))) { if (timedOut(deployment.get(), endpointTimeout)) { logger.log(WARNING, "Endpoints failed to show up within " + endpointTimeout.toMinutes() + " minutes!"); return Optional.of(error); } logger.log("Endpoints for the deployment to test are not yet ready."); return Optional.empty(); } Map<ZoneId, List<String>> clusters = listClusters(id.application(), zones); Optional<URI> testerEndpoint = controller.jobController().testerEndpoint(id); if (testerEndpoint.isPresent() && controller.jobController().cloud().ready(testerEndpoint.get())) { logger.log("Starting tests ..."); controller.jobController().cloud().startTests(testerEndpoint.get(), TesterCloud.Suite.of(id.type()), testConfig(id.application(), id.type().zone(controller.system()), controller.system(), endpoints, clusters)); return Optional.of(running); } if (timedOut(deployment.get(), endpointTimeout)) { logger.log(WARNING, "Endpoint for tester failed to show up within " + endpointTimeout.toMinutes() + " minutes of real deployment!"); return Optional.of(error); } logger.log("Endpoints of tester container not yet available."); return Optional.empty(); } private Optional<RunStatus> endTests(RunId id, DualLogger logger) { if ( ! deployment(id.application(), id.type()).isPresent()) { logger.log(INFO, "Deployment expired before tests could complete."); return Optional.of(aborted); } Optional<URI> testerEndpoint = controller.jobController().testerEndpoint(id); if ( ! testerEndpoint.isPresent()) { logger.log("Endpoints for tester not found -- trying again later."); return Optional.empty(); } controller.jobController().updateTestLog(id); RunStatus status; TesterCloud.Status testStatus = controller.jobController().cloud().getStatus(testerEndpoint.get()); switch (testStatus) { case NOT_STARTED: throw new IllegalStateException("Tester reports tests not started, even though they should have!"); case RUNNING: return Optional.empty(); case FAILURE: logger.log("Tests failed."); status = testFailure; break; case ERROR: logger.log(INFO, "Tester failed running its tests!"); status = error; break; case SUCCESS: logger.log("Tests completed successfully."); status = running; break; default: throw new IllegalStateException("Unknown status '" + testStatus + "'!"); } return Optional.of(status); } private Optional<RunStatus> deactivateReal(RunId id, DualLogger logger) { logger.log("Deactivating deployment of " + id.application() + " in " + id.type().zone(controller.system()) + " ..."); controller.applications().deactivate(id.application(), id.type().zone(controller.system())); return Optional.of(running); } private Optional<RunStatus> deactivateTester(RunId id, DualLogger logger) { logger.log("Deactivating tester of " + id.application() + " in " + id.type().zone(controller.system()) + " ..."); controller.jobController().deactivateTester(id.tester(), id.type()); return Optional.of(running); } private Optional<RunStatus> report(RunId id, DualLogger logger) { try { controller.jobController().active(id).ifPresent(run -> { JobReport report = JobReport.ofJob(run.id().application(), run.id().type(), run.id().number(), run.hasFailed() ? Optional.of(DeploymentJobs.JobError.unknown) : Optional.empty()); controller.applications().deploymentTrigger().notifyOfCompletion(report); if (run.hasFailed()) sendNotification(run, logger); }); } catch (IllegalStateException e) { logger.log(INFO, "Job '" + id.type() + "'no longer supposed to run?:", e); } return Optional.of(running); } /** Sends a mail with a notification of a failed run, if one should be sent. */ /** Returns the deployment of the real application in the zone of the given job, if it exists. */ private Optional<Deployment> deployment(ApplicationId id, JobType type) { return Optional.ofNullable(application(id).deployments().get(type.zone(controller.system()))); } /** Returns the real application with the given id. */ private Application application(ApplicationId id) { return controller.applications().require(id); } /** Returns whether the time elapsed since the last real deployment in the given zone is more than the given timeout. */ private boolean timedOut(Deployment deployment, Duration timeout) { return deployment.at().isBefore(controller.clock().instant().minus(timeout)); } /** Returns the application package for the tester application, assembled from a generated config, fat-jar and services.xml. */ private ApplicationPackage testerPackage(RunId id) { ApplicationVersion version = controller.jobController().run(id).get().versions().targetApplication(); byte[] testPackage = controller.applications().applicationStore().get(id.tester(), version); byte[] servicesXml = servicesXml(controller.system()); DeploymentSpec spec = controller.applications().require(id.application()).deploymentSpec(); ZoneId zone = id.type().zone(controller.system()); byte[] deploymentXml = deploymentXml(spec.athenzDomain(), spec.athenzService(zone.environment(), zone.region())); try (ZipBuilder zipBuilder = new ZipBuilder(testPackage.length + servicesXml.length + 1000)) { zipBuilder.add(testPackage); zipBuilder.add("services.xml", servicesXml); zipBuilder.add("deployment.xml", deploymentXml); zipBuilder.close(); return new ApplicationPackage(zipBuilder.toByteArray()); } } /** Returns a stream containing the zone of the deployment tested in the given run, and all production zones for the application. */ private Set<ZoneId> testedZoneAndProductionZones(RunId id) { return Stream.concat(Stream.of(id.type().zone(controller.system())), application(id.application()).productionDeployments().keySet().stream()) .collect(Collectors.toSet()); } /** Returns all endpoints for all current deployments of the given real application. */ private Map<ZoneId, List<URI>> deploymentEndpoints(ApplicationId id, Iterable<ZoneId> zones) { ImmutableMap.Builder<ZoneId, List<URI>> deployments = ImmutableMap.builder(); for (ZoneId zone : zones) controller.applications().getDeploymentEndpoints(new DeploymentId(id, zone)) .filter(endpoints -> ! endpoints.isEmpty()) .ifPresent(endpoints -> deployments.put(zone, endpoints)); return deployments.build(); } /** Returns all content clusters in all current deployments of the given real application. */ private Map<ZoneId, List<String>> listClusters(ApplicationId id, Iterable<ZoneId> zones) { ImmutableMap.Builder<ZoneId, List<String>> clusters = ImmutableMap.builder(); for (ZoneId zone : zones) clusters.put(zone, ImmutableList.copyOf(controller.configServer().getContentClusters(new DeploymentId(id, zone)))); return clusters.build(); } /** Returns the generated services.xml content for the tester application. */ static byte[] servicesXml(SystemName systemName) { String domain = systemName == SystemName.main ? "vespa.vespa" : "vespa.vespa.cd"; String servicesXml = "<?xml version='1.0' encoding='UTF-8'?>\n" + "<services xmlns:deploy='vespa' version='1.0'>\n" + " <container version='1.0' id='default'>\n" + "\n" + " <component id=\"com.yahoo.vespa.hosted.testrunner.TestRunner\" bundle=\"vespa-testrunner-components\">\n" + " <config name=\"com.yahoo.vespa.hosted.testrunner.test-runner\">\n" + " <artifactsPath>artifacts</artifactsPath>\n" + " </config>\n" + " </component>\n" + "\n" + " <handler id=\"com.yahoo.vespa.hosted.testrunner.TestRunnerHandler\" bundle=\"vespa-testrunner-components\">\n" + " <binding>http: " </handler>\n" + "\n" + " <http>\n" + " <server id='default' port='4080'/>\n" + " <filtering>\n" + " <access-control domain='" + domain + "'>\n" + " <exclude>\n" + " <binding>http: " </exclude>\n" + " </access-control>\n" + " <request-chain id=\"testrunner-api\">\n" + " <filter id='authz-filter' class='com.yahoo.jdisc.http.filter.security.athenz.AthenzAuthorizationFilter' bundle=\"jdisc-security-filters\">\n" + " <config name=\"jdisc.http.filter.security.athenz.athenz-authorization-filter\">\n" + " <credentialsToVerify>TOKEN_ONLY</credentialsToVerify>\n" + " <roleTokenHeaderName>Yahoo-Role-Auth</roleTokenHeaderName>\n" + " </config>\n" + " <component id=\"com.yahoo.jdisc.http.filter.security.athenz.StaticRequestResourceMapper\" bundle=\"jdisc-security-filters\">\n" + " <config name=\"jdisc.http.filter.security.athenz.static-request-resource-mapper\">\n" + " <resourceName>" + domain + ":tester-application</resourceName>\n" + " <action>deploy</action>\n" + " </config>\n" + " </component>\n" + " </filter>\n" + " </request-chain>\n" + " </filtering>\n" + " </http>\n" + "\n" + " <nodes count=\"1\" flavor=\"d-1-4-50\" />\n" + " </container>\n" + "</services>\n"; return servicesXml.getBytes(); } /** Returns a dummy deployment xml which sets up the service identity for the tester, if present. */ private static byte[] deploymentXml(Optional<AthenzDomain> athenzDomain, Optional<AthenzService> athenzService) { String deploymentSpec = "<?xml version='1.0' encoding='UTF-8'?>\n" + "<deployment version=\"1.0\" " + athenzDomain.map(domain -> "athenz-domain=\"" + domain.value() + "\" ").orElse("") + athenzService.map(service -> "athenz-service=\"" + service.value() + "\" ").orElse("") + "/>"; return deploymentSpec.getBytes(StandardCharsets.UTF_8); } /** Returns the config for the tests to run for the given job. */ private static byte[] testConfig(ApplicationId id, ZoneId testerZone, SystemName system, Map<ZoneId, List<URI>> deployments, Map<ZoneId, List<String>> clusters) { Slime slime = new Slime(); Cursor root = slime.setObject(); root.setString("application", id.serializedForm()); root.setString("zone", testerZone.value()); root.setString("system", system.name()); Cursor endpointsObject = root.setObject("endpoints"); deployments.forEach((zone, endpoints) -> { Cursor endpointArray = endpointsObject.setArray(zone.value()); for (URI endpoint : endpoints) endpointArray.addString(endpoint.toString()); }); Cursor clustersObject = root.setObject("clusters"); clusters.forEach((zone, clusterList) -> { Cursor clusterArray = clustersObject.setArray(zone.value()); for (String cluster : clusterList) clusterArray.addString(cluster); }); try { return SlimeUtils.toJsonBytes(slime); } catch (IOException e) { throw new UncheckedIOException(e); } } /** Logger which logs to a {@link JobController}, as well as to the parent class' {@link Logger}. */ private class DualLogger { private final RunId id; private final Step step; private DualLogger(RunId id, Step step) { this.id = id; this.step = step; } private void log(String... messages) { log(List.of(messages)); } private void log(List<String> messages) { controller.jobController().log(id, step, DEBUG, messages); } private void log(Level level, String message) { log(level, message, null); } private void log(Level level, String message, Throwable thrown) { logger.log(level, id + " at " + step + ": " + message, thrown); if (thrown != null) { ByteArrayOutputStream traceBuffer = new ByteArrayOutputStream(); thrown.printStackTrace(new PrintStream(traceBuffer)); message += "\n" + traceBuffer; } controller.jobController().log(id, step, level, message); } } }
It represents an XML attribute called `when`, and I think the two are similar enough it's OK to keep this name.
private void sendNotification(Run run, DualLogger logger) { Application application = controller.applications().require(run.id().application()); Notifications notifications = application.deploymentSpec().notifications(); boolean newCommit = application.change().application() .map(run.versions().targetApplication()::equals) .orElse(false); When when = newCommit ? failingCommit : failing; List<String> recipients = new ArrayList<>(notifications.emailAddressesFor(when)); if (notifications.emailRolesFor(when).contains(author)) run.versions().targetApplication().authorEmail().ifPresent(recipients::add); if (recipients.isEmpty()) return; try { if (run.status() == outOfCapacity && run.id().type().isProduction()) controller.mailer().send(mails.outOfCapacity(run.id(), recipients)); if (run.status() == deploymentFailed) controller.mailer().send(mails.deploymentFailure(run.id(), recipients)); if (run.status() == installationFailed) controller.mailer().send(mails.installationFailure(run.id(), recipients)); if (run.status() == testFailure) controller.mailer().send(mails.testFailure(run.id(), recipients)); if (run.status() == error) controller.mailer().send(mails.systemError(run.id(), recipients)); } catch (RuntimeException e) { logger.log(INFO, "Exception trying to send mail for " + run.id(), e); } }
When when = newCommit ? failingCommit : failing;
private void sendNotification(Run run, DualLogger logger) { Application application = controller.applications().require(run.id().application()); Notifications notifications = application.deploymentSpec().notifications(); boolean newCommit = application.change().application() .map(run.versions().targetApplication()::equals) .orElse(false); When when = newCommit ? failingCommit : failing; List<String> recipients = new ArrayList<>(notifications.emailAddressesFor(when)); if (notifications.emailRolesFor(when).contains(author)) run.versions().targetApplication().authorEmail().ifPresent(recipients::add); if (recipients.isEmpty()) return; try { if (run.status() == outOfCapacity && run.id().type().isProduction()) controller.mailer().send(mails.outOfCapacity(run.id(), recipients)); if (run.status() == deploymentFailed) controller.mailer().send(mails.deploymentFailure(run.id(), recipients)); if (run.status() == installationFailed) controller.mailer().send(mails.installationFailure(run.id(), recipients)); if (run.status() == testFailure) controller.mailer().send(mails.testFailure(run.id(), recipients)); if (run.status() == error) controller.mailer().send(mails.systemError(run.id(), recipients)); } catch (RuntimeException e) { logger.log(INFO, "Exception trying to send mail for " + run.id(), e); } }
class InternalStepRunner implements StepRunner { private static final Logger logger = Logger.getLogger(InternalStepRunner.class.getName()); static final Duration endpointTimeout = Duration.ofMinutes(15); static final Duration installationTimeout = Duration.ofMinutes(150); private final Controller controller; private final DeploymentFailureMails mails; public InternalStepRunner(Controller controller) { this.controller = controller; this.mails = new DeploymentFailureMails(controller.zoneRegistry()); } @Override public Optional<RunStatus> run(LockedStep step, RunId id) { DualLogger logger = new DualLogger(id, step.get()); try { switch (step.get()) { case deployInitialReal: return deployInitialReal(id, logger); case installInitialReal: return installInitialReal(id, logger); case deployReal: return deployReal(id, logger); case deployTester: return deployTester(id, logger); case installReal: return installReal(id, logger); case installTester: return installTester(id, logger); case startTests: return startTests(id, logger); case endTests: return endTests(id, logger); case deactivateReal: return deactivateReal(id, logger); case deactivateTester: return deactivateTester(id, logger); case report: return report(id, logger); default: throw new AssertionError("Unknown step '" + step + "'!"); } } catch (UncheckedIOException e) { logger.log(INFO, "IO exception running " + id + ": " + Exceptions.toMessageString(e)); return Optional.empty(); } catch (RuntimeException e) { logger.log(WARNING, "Unexpected exception running " + id, e); if (JobProfile.of(id.type()).alwaysRun().contains(step.get())) { logger.log("Will keep trying, as this is a cleanup step."); return Optional.empty(); } return Optional.of(error); } } private Optional<RunStatus> deployInitialReal(RunId id, DualLogger logger) { Versions versions = controller.jobController().run(id).get().versions(); logger.log("Deploying platform version " + versions.sourcePlatform().orElse(versions.targetPlatform()) + " and application version " + versions.sourceApplication().orElse(versions.targetApplication()).id() + " ..."); return deployReal(id, true, logger); } private Optional<RunStatus> deployReal(RunId id, DualLogger logger) { Versions versions = controller.jobController().run(id).get().versions(); logger.log("Deploying platform version " + versions.targetPlatform() + " and application version " + versions.targetApplication().id() + " ..."); return deployReal(id, false, logger); } private Optional<RunStatus> deployReal(RunId id, boolean setTheStage, DualLogger logger) { return deploy(id.application(), id.type(), () -> controller.applications().deploy(id.application(), id.type().zone(controller.system()), Optional.empty(), new DeployOptions(false, Optional.empty(), false, setTheStage)), logger); } private Optional<RunStatus> deployTester(RunId id, DualLogger logger) { Version platform = controller.jobController().run(id).get().versions().targetPlatform(); logger.log("Deploying the tester container on platform " + platform + " ..."); return deploy(id.tester().id(), id.type(), () -> controller.applications().deployTester(id.tester(), testerPackage(id), id.type().zone(controller.system()), new DeployOptions(true, Optional.of(platform), false, false)), logger); } private Optional<RunStatus> deploy(ApplicationId id, JobType type, Supplier<ActivateResult> deployment, DualLogger logger) { try { PrepareResponse prepareResponse = deployment.get().prepareResponse(); if ( ! prepareResponse.configChangeActions.refeedActions.stream().allMatch(action -> action.allowed)) { List<String> messages = new ArrayList<>(); messages.add("Deploy failed due to non-compatible changes that require re-feed."); messages.add("Your options are:"); messages.add("1. Revert the incompatible changes."); messages.add("2. If you think it is safe in your case, you can override this validation, see"); messages.add(" http: messages.add("3. Deploy as a new application under a different name."); messages.add("Illegal actions:"); prepareResponse.configChangeActions.refeedActions.stream() .filter(action -> ! action.allowed) .flatMap(action -> action.messages.stream()) .forEach(messages::add); messages.add("Details:"); prepareResponse.log.stream() .map(entry -> entry.message) .forEach(messages::add); logger.log(messages); return Optional.of(deploymentFailed); } if (prepareResponse.configChangeActions.restartActions.isEmpty()) logger.log("No services requiring restart."); else prepareResponse.configChangeActions.restartActions.stream() .flatMap(action -> action.services.stream()) .map(service -> service.hostName) .sorted().distinct() .map(Hostname::new) .forEach(hostname -> { controller.applications().restart(new DeploymentId(id, type.zone(controller.system())), Optional.of(hostname)); logger.log("Restarting services on host " + hostname.id() + "."); }); logger.log("Deployment successful."); return Optional.of(running); } catch (ConfigServerException e) { if ( e.getErrorCode() == OUT_OF_CAPACITY && type.isTest() || e.getErrorCode() == ACTIVATION_CONFLICT || e.getErrorCode() == APPLICATION_LOCK_FAILURE || e.getErrorCode() == PARENT_HOST_NOT_READY) { logger.log("Will retry, because of '" + e.getErrorCode() + "' deploying:\n" + e.getMessage()); return Optional.empty(); } if ( e.getErrorCode() == INVALID_APPLICATION_PACKAGE || e.getErrorCode() == BAD_REQUEST) { logger.log("Deployment failed: " + e.getMessage()); return Optional.of(deploymentFailed); } throw e; } } private Optional<RunStatus> installInitialReal(RunId id, DualLogger logger) { return installReal(id, true, logger); } private Optional<RunStatus> installReal(RunId id, DualLogger logger) { return installReal(id, false, logger); } private Optional<RunStatus> installReal(RunId id, boolean setTheStage, DualLogger logger) { Optional<Deployment> deployment = deployment(id.application(), id.type()); if ( ! deployment.isPresent()) { logger.log(INFO, "Deployment expired before installation was successful."); return Optional.of(installationFailed); } Versions versions = controller.jobController().run(id).get().versions(); Version platform = setTheStage ? versions.sourcePlatform().orElse(versions.targetPlatform()) : versions.targetPlatform(); ApplicationVersion application = setTheStage ? versions.sourceApplication().orElse(versions.targetApplication()) : versions.targetApplication(); logger.log("Checking installation of " + platform + " and " + application.id() + " ..."); if ( nodesConverged(id.application(), id.type(), platform, logger) && servicesConverged(id.application(), id.type(), logger)) { logger.log("Installation succeeded!"); return Optional.of(running); } if (timedOut(deployment.get(), installationTimeout)) { logger.log(INFO, "Installation failed to complete within " + installationTimeout.toMinutes() + " minutes!"); return Optional.of(installationFailed); } logger.log("Installation not yet complete."); return Optional.empty(); } private Optional<RunStatus> installTester(RunId id, DualLogger logger) { Optional<Deployment> deployment = deployment(id.application(), id.type()); if ( ! deployment.isPresent()) { logger.log(WARNING, "Deployment expired before installation of tester was successful."); return Optional.of(error); } Version platform = controller.jobController().run(id).get().versions().targetPlatform(); logger.log("Checking installation of tester container ..."); if ( nodesConverged(id.tester().id(), id.type(), platform, logger) && servicesConverged(id.tester().id(), id.type(), logger)) { logger.log("Tester container successfully installed!"); return Optional.of(running); } if (timedOut(deployment.get(), installationTimeout)) { logger.log(WARNING, "Installation of tester failed to complete within " + installationTimeout.toMinutes() + " minutes of real deployment!"); return Optional.of(error); } logger.log("Installation of tester not yet complete."); return Optional.empty(); } private boolean nodesConverged(ApplicationId id, JobType type, Version target, DualLogger logger) { List<Node> nodes = controller.configServer().nodeRepository().list(type.zone(controller.system()), id, ImmutableSet.of(active, reserved)); List<String> statuses = nodes.stream() .map(node -> String.format("%70s: %-16s%-25s%-32s%s", node.hostname(), node.serviceState(), node.wantedVersion() + (node.currentVersion().equals(node.wantedVersion()) ? "" : " <-- " + node.currentVersion()), node.restartGeneration() >= node.wantedRestartGeneration() ? "" : "restart pending (" + node.wantedRestartGeneration() + " <-- " + node.restartGeneration() + ")", node.rebootGeneration() >= node.wantedRebootGeneration() ? "" : "reboot pending (" + node.wantedRebootGeneration() + " <-- " + node.rebootGeneration() + ")")) .collect(Collectors.toList()); logger.log(statuses); return nodes.stream().allMatch(node -> node.currentVersion().equals(target) && node.restartGeneration() >= node.wantedRestartGeneration() && node.rebootGeneration() >= node.wantedRebootGeneration()); } private boolean servicesConverged(ApplicationId id, JobType type, DualLogger logger) { Optional<ServiceConvergence> convergence = controller.configServer().serviceConvergence(new DeploymentId(id, type.zone(controller.system()))); if ( ! convergence.isPresent()) { logger.log("Config status not currently available -- will retry."); return false; } logger.log("Wanted config generation is " + convergence.get().wantedGeneration()); List<String> statuses = convergence.get().services().stream() .filter(serviceStatus -> serviceStatus.currentGeneration() != convergence.get().wantedGeneration()) .map(serviceStatus -> String.format("%70s: %11s on port %4d has %s", serviceStatus.host().value(), serviceStatus.type(), serviceStatus.port(), serviceStatus.currentGeneration() == -1 ? "not started!" : Long.toString(serviceStatus.currentGeneration()))) .collect(Collectors.toList()); logger.log(statuses); return convergence.get().converged(); } private Optional<RunStatus> startTests(RunId id, DualLogger logger) { Optional<Deployment> deployment = deployment(id.application(), id.type()); if ( ! deployment.isPresent()) { logger.log(INFO, "Deployment expired before tests could start."); return Optional.of(aborted); } Set<ZoneId> zones = testedZoneAndProductionZones(id); logger.log("Attempting to find endpoints ..."); Map<ZoneId, List<URI>> endpoints = deploymentEndpoints(id.application(), zones); List<String> messages = new ArrayList<>(); messages.add("Found endpoints"); endpoints.forEach((zone, uris) -> { messages.add("- " + zone); uris.forEach(uri -> messages.add(" |-- " + uri)); }); logger.log(messages); if ( ! endpoints.containsKey(id.type().zone(controller.system()))) { if (timedOut(deployment.get(), endpointTimeout)) { logger.log(WARNING, "Endpoints failed to show up within " + endpointTimeout.toMinutes() + " minutes!"); return Optional.of(error); } logger.log("Endpoints for the deployment to test are not yet ready."); return Optional.empty(); } Map<ZoneId, List<String>> clusters = listClusters(id.application(), zones); Optional<URI> testerEndpoint = controller.jobController().testerEndpoint(id); if (testerEndpoint.isPresent() && controller.jobController().cloud().ready(testerEndpoint.get())) { logger.log("Starting tests ..."); controller.jobController().cloud().startTests(testerEndpoint.get(), TesterCloud.Suite.of(id.type()), testConfig(id.application(), id.type().zone(controller.system()), controller.system(), endpoints, clusters)); return Optional.of(running); } if (timedOut(deployment.get(), endpointTimeout)) { logger.log(WARNING, "Endpoint for tester failed to show up within " + endpointTimeout.toMinutes() + " minutes of real deployment!"); return Optional.of(error); } logger.log("Endpoints of tester container not yet available."); return Optional.empty(); } private Optional<RunStatus> endTests(RunId id, DualLogger logger) { if ( ! deployment(id.application(), id.type()).isPresent()) { logger.log(INFO, "Deployment expired before tests could complete."); return Optional.of(aborted); } Optional<URI> testerEndpoint = controller.jobController().testerEndpoint(id); if ( ! testerEndpoint.isPresent()) { logger.log("Endpoints for tester not found -- trying again later."); return Optional.empty(); } controller.jobController().updateTestLog(id); RunStatus status; TesterCloud.Status testStatus = controller.jobController().cloud().getStatus(testerEndpoint.get()); switch (testStatus) { case NOT_STARTED: throw new IllegalStateException("Tester reports tests not started, even though they should have!"); case RUNNING: return Optional.empty(); case FAILURE: logger.log("Tests failed."); status = testFailure; break; case ERROR: logger.log(INFO, "Tester failed running its tests!"); status = error; break; case SUCCESS: logger.log("Tests completed successfully."); status = running; break; default: throw new IllegalStateException("Unknown status '" + testStatus + "'!"); } return Optional.of(status); } private Optional<RunStatus> deactivateReal(RunId id, DualLogger logger) { logger.log("Deactivating deployment of " + id.application() + " in " + id.type().zone(controller.system()) + " ..."); controller.applications().deactivate(id.application(), id.type().zone(controller.system())); return Optional.of(running); } private Optional<RunStatus> deactivateTester(RunId id, DualLogger logger) { logger.log("Deactivating tester of " + id.application() + " in " + id.type().zone(controller.system()) + " ..."); controller.jobController().deactivateTester(id.tester(), id.type()); return Optional.of(running); } private Optional<RunStatus> report(RunId id, DualLogger logger) { try { controller.jobController().active(id).ifPresent(run -> { JobReport report = JobReport.ofJob(run.id().application(), run.id().type(), run.id().number(), run.hasFailed() ? Optional.of(DeploymentJobs.JobError.unknown) : Optional.empty()); controller.applications().deploymentTrigger().notifyOfCompletion(report); if (run.hasFailed()) sendNotification(run, logger); }); } catch (IllegalStateException e) { logger.log(INFO, "Job '" + id.type() + "'no longer supposed to run?:", e); } return Optional.of(running); } /** Sends a mail with a notification of a failed run, if one should be sent. */ /** Returns the deployment of the real application in the zone of the given job, if it exists. */ private Optional<Deployment> deployment(ApplicationId id, JobType type) { return Optional.ofNullable(application(id).deployments().get(type.zone(controller.system()))); } /** Returns the real application with the given id. */ private Application application(ApplicationId id) { return controller.applications().require(id); } /** Returns whether the time elapsed since the last real deployment in the given zone is more than the given timeout. */ private boolean timedOut(Deployment deployment, Duration timeout) { return deployment.at().isBefore(controller.clock().instant().minus(timeout)); } /** Returns the application package for the tester application, assembled from a generated config, fat-jar and services.xml. */ private ApplicationPackage testerPackage(RunId id) { ApplicationVersion version = controller.jobController().run(id).get().versions().targetApplication(); byte[] testPackage = controller.applications().applicationStore().get(id.tester(), version); byte[] servicesXml = servicesXml(controller.system()); DeploymentSpec spec = controller.applications().require(id.application()).deploymentSpec(); ZoneId zone = id.type().zone(controller.system()); byte[] deploymentXml = deploymentXml(spec.athenzDomain(), spec.athenzService(zone.environment(), zone.region())); try (ZipBuilder zipBuilder = new ZipBuilder(testPackage.length + servicesXml.length + 1000)) { zipBuilder.add(testPackage); zipBuilder.add("services.xml", servicesXml); zipBuilder.add("deployment.xml", deploymentXml); zipBuilder.close(); return new ApplicationPackage(zipBuilder.toByteArray()); } } /** Returns a stream containing the zone of the deployment tested in the given run, and all production zones for the application. */ private Set<ZoneId> testedZoneAndProductionZones(RunId id) { return Stream.concat(Stream.of(id.type().zone(controller.system())), application(id.application()).productionDeployments().keySet().stream()) .collect(Collectors.toSet()); } /** Returns all endpoints for all current deployments of the given real application. */ private Map<ZoneId, List<URI>> deploymentEndpoints(ApplicationId id, Iterable<ZoneId> zones) { ImmutableMap.Builder<ZoneId, List<URI>> deployments = ImmutableMap.builder(); for (ZoneId zone : zones) controller.applications().getDeploymentEndpoints(new DeploymentId(id, zone)) .filter(endpoints -> ! endpoints.isEmpty()) .ifPresent(endpoints -> deployments.put(zone, endpoints)); return deployments.build(); } /** Returns all content clusters in all current deployments of the given real application. */ private Map<ZoneId, List<String>> listClusters(ApplicationId id, Iterable<ZoneId> zones) { ImmutableMap.Builder<ZoneId, List<String>> clusters = ImmutableMap.builder(); for (ZoneId zone : zones) clusters.put(zone, ImmutableList.copyOf(controller.configServer().getContentClusters(new DeploymentId(id, zone)))); return clusters.build(); } /** Returns the generated services.xml content for the tester application. */ static byte[] servicesXml(SystemName systemName) { String domain = systemName == SystemName.main ? "vespa.vespa" : "vespa.vespa.cd"; String servicesXml = "<?xml version='1.0' encoding='UTF-8'?>\n" + "<services xmlns:deploy='vespa' version='1.0'>\n" + " <container version='1.0' id='default'>\n" + "\n" + " <component id=\"com.yahoo.vespa.hosted.testrunner.TestRunner\" bundle=\"vespa-testrunner-components\">\n" + " <config name=\"com.yahoo.vespa.hosted.testrunner.test-runner\">\n" + " <artifactsPath>artifacts</artifactsPath>\n" + " </config>\n" + " </component>\n" + "\n" + " <handler id=\"com.yahoo.vespa.hosted.testrunner.TestRunnerHandler\" bundle=\"vespa-testrunner-components\">\n" + " <binding>http: " </handler>\n" + "\n" + " <http>\n" + " <server id='default' port='4080'/>\n" + " <filtering>\n" + " <access-control domain='" + domain + "'>\n" + " <exclude>\n" + " <binding>http: " </exclude>\n" + " </access-control>\n" + " <request-chain id=\"testrunner-api\">\n" + " <filter id='authz-filter' class='com.yahoo.jdisc.http.filter.security.athenz.AthenzAuthorizationFilter' bundle=\"jdisc-security-filters\">\n" + " <config name=\"jdisc.http.filter.security.athenz.athenz-authorization-filter\">\n" + " <credentialsToVerify>TOKEN_ONLY</credentialsToVerify>\n" + " <roleTokenHeaderName>Yahoo-Role-Auth</roleTokenHeaderName>\n" + " </config>\n" + " <component id=\"com.yahoo.jdisc.http.filter.security.athenz.StaticRequestResourceMapper\" bundle=\"jdisc-security-filters\">\n" + " <config name=\"jdisc.http.filter.security.athenz.static-request-resource-mapper\">\n" + " <resourceName>" + domain + ":tester-application</resourceName>\n" + " <action>deploy</action>\n" + " </config>\n" + " </component>\n" + " </filter>\n" + " </request-chain>\n" + " </filtering>\n" + " </http>\n" + "\n" + " <nodes count=\"1\" flavor=\"d-1-4-50\" />\n" + " </container>\n" + "</services>\n"; return servicesXml.getBytes(); } /** Returns a dummy deployment xml which sets up the service identity for the tester, if present. */ private static byte[] deploymentXml(Optional<AthenzDomain> athenzDomain, Optional<AthenzService> athenzService) { String deploymentSpec = "<?xml version='1.0' encoding='UTF-8'?>\n" + "<deployment version=\"1.0\" " + athenzDomain.map(domain -> "athenz-domain=\"" + domain.value() + "\" ").orElse("") + athenzService.map(service -> "athenz-service=\"" + service.value() + "\" ").orElse("") + "/>"; return deploymentSpec.getBytes(StandardCharsets.UTF_8); } /** Returns the config for the tests to run for the given job. */ private static byte[] testConfig(ApplicationId id, ZoneId testerZone, SystemName system, Map<ZoneId, List<URI>> deployments, Map<ZoneId, List<String>> clusters) { Slime slime = new Slime(); Cursor root = slime.setObject(); root.setString("application", id.serializedForm()); root.setString("zone", testerZone.value()); root.setString("system", system.name()); Cursor endpointsObject = root.setObject("endpoints"); deployments.forEach((zone, endpoints) -> { Cursor endpointArray = endpointsObject.setArray(zone.value()); for (URI endpoint : endpoints) endpointArray.addString(endpoint.toString()); }); Cursor clustersObject = root.setObject("clusters"); clusters.forEach((zone, clusterList) -> { Cursor clusterArray = clustersObject.setArray(zone.value()); for (String cluster : clusterList) clusterArray.addString(cluster); }); try { return SlimeUtils.toJsonBytes(slime); } catch (IOException e) { throw new UncheckedIOException(e); } } /** Logger which logs to a {@link JobController}, as well as to the parent class' {@link Logger}. */ private class DualLogger { private final RunId id; private final Step step; private DualLogger(RunId id, Step step) { this.id = id; this.step = step; } private void log(String... messages) { log(List.of(messages)); } private void log(List<String> messages) { controller.jobController().log(id, step, DEBUG, messages); } private void log(Level level, String message) { log(level, message, null); } private void log(Level level, String message, Throwable thrown) { logger.log(level, id + " at " + step + ": " + message, thrown); if (thrown != null) { ByteArrayOutputStream traceBuffer = new ByteArrayOutputStream(); thrown.printStackTrace(new PrintStream(traceBuffer)); message += "\n" + traceBuffer; } controller.jobController().log(id, step, level, message); } } }
class InternalStepRunner implements StepRunner { private static final Logger logger = Logger.getLogger(InternalStepRunner.class.getName()); static final Duration endpointTimeout = Duration.ofMinutes(15); static final Duration installationTimeout = Duration.ofMinutes(150); private final Controller controller; private final DeploymentFailureMails mails; public InternalStepRunner(Controller controller) { this.controller = controller; this.mails = new DeploymentFailureMails(controller.zoneRegistry()); } @Override public Optional<RunStatus> run(LockedStep step, RunId id) { DualLogger logger = new DualLogger(id, step.get()); try { switch (step.get()) { case deployInitialReal: return deployInitialReal(id, logger); case installInitialReal: return installInitialReal(id, logger); case deployReal: return deployReal(id, logger); case deployTester: return deployTester(id, logger); case installReal: return installReal(id, logger); case installTester: return installTester(id, logger); case startTests: return startTests(id, logger); case endTests: return endTests(id, logger); case deactivateReal: return deactivateReal(id, logger); case deactivateTester: return deactivateTester(id, logger); case report: return report(id, logger); default: throw new AssertionError("Unknown step '" + step + "'!"); } } catch (UncheckedIOException e) { logger.log(INFO, "IO exception running " + id + ": " + Exceptions.toMessageString(e)); return Optional.empty(); } catch (RuntimeException e) { logger.log(WARNING, "Unexpected exception running " + id, e); if (JobProfile.of(id.type()).alwaysRun().contains(step.get())) { logger.log("Will keep trying, as this is a cleanup step."); return Optional.empty(); } return Optional.of(error); } } private Optional<RunStatus> deployInitialReal(RunId id, DualLogger logger) { Versions versions = controller.jobController().run(id).get().versions(); logger.log("Deploying platform version " + versions.sourcePlatform().orElse(versions.targetPlatform()) + " and application version " + versions.sourceApplication().orElse(versions.targetApplication()).id() + " ..."); return deployReal(id, true, logger); } private Optional<RunStatus> deployReal(RunId id, DualLogger logger) { Versions versions = controller.jobController().run(id).get().versions(); logger.log("Deploying platform version " + versions.targetPlatform() + " and application version " + versions.targetApplication().id() + " ..."); return deployReal(id, false, logger); } private Optional<RunStatus> deployReal(RunId id, boolean setTheStage, DualLogger logger) { return deploy(id.application(), id.type(), () -> controller.applications().deploy(id.application(), id.type().zone(controller.system()), Optional.empty(), new DeployOptions(false, Optional.empty(), false, setTheStage)), logger); } private Optional<RunStatus> deployTester(RunId id, DualLogger logger) { Version platform = controller.jobController().run(id).get().versions().targetPlatform(); logger.log("Deploying the tester container on platform " + platform + " ..."); return deploy(id.tester().id(), id.type(), () -> controller.applications().deployTester(id.tester(), testerPackage(id), id.type().zone(controller.system()), new DeployOptions(true, Optional.of(platform), false, false)), logger); } private Optional<RunStatus> deploy(ApplicationId id, JobType type, Supplier<ActivateResult> deployment, DualLogger logger) { try { PrepareResponse prepareResponse = deployment.get().prepareResponse(); if ( ! prepareResponse.configChangeActions.refeedActions.stream().allMatch(action -> action.allowed)) { List<String> messages = new ArrayList<>(); messages.add("Deploy failed due to non-compatible changes that require re-feed."); messages.add("Your options are:"); messages.add("1. Revert the incompatible changes."); messages.add("2. If you think it is safe in your case, you can override this validation, see"); messages.add(" http: messages.add("3. Deploy as a new application under a different name."); messages.add("Illegal actions:"); prepareResponse.configChangeActions.refeedActions.stream() .filter(action -> ! action.allowed) .flatMap(action -> action.messages.stream()) .forEach(messages::add); messages.add("Details:"); prepareResponse.log.stream() .map(entry -> entry.message) .forEach(messages::add); logger.log(messages); return Optional.of(deploymentFailed); } if (prepareResponse.configChangeActions.restartActions.isEmpty()) logger.log("No services requiring restart."); else prepareResponse.configChangeActions.restartActions.stream() .flatMap(action -> action.services.stream()) .map(service -> service.hostName) .sorted().distinct() .map(Hostname::new) .forEach(hostname -> { controller.applications().restart(new DeploymentId(id, type.zone(controller.system())), Optional.of(hostname)); logger.log("Restarting services on host " + hostname.id() + "."); }); logger.log("Deployment successful."); return Optional.of(running); } catch (ConfigServerException e) { if ( e.getErrorCode() == OUT_OF_CAPACITY && type.isTest() || e.getErrorCode() == ACTIVATION_CONFLICT || e.getErrorCode() == APPLICATION_LOCK_FAILURE || e.getErrorCode() == PARENT_HOST_NOT_READY) { logger.log("Will retry, because of '" + e.getErrorCode() + "' deploying:\n" + e.getMessage()); return Optional.empty(); } if ( e.getErrorCode() == INVALID_APPLICATION_PACKAGE || e.getErrorCode() == BAD_REQUEST) { logger.log("Deployment failed: " + e.getMessage()); return Optional.of(deploymentFailed); } throw e; } } private Optional<RunStatus> installInitialReal(RunId id, DualLogger logger) { return installReal(id, true, logger); } private Optional<RunStatus> installReal(RunId id, DualLogger logger) { return installReal(id, false, logger); } private Optional<RunStatus> installReal(RunId id, boolean setTheStage, DualLogger logger) { Optional<Deployment> deployment = deployment(id.application(), id.type()); if ( ! deployment.isPresent()) { logger.log(INFO, "Deployment expired before installation was successful."); return Optional.of(installationFailed); } Versions versions = controller.jobController().run(id).get().versions(); Version platform = setTheStage ? versions.sourcePlatform().orElse(versions.targetPlatform()) : versions.targetPlatform(); ApplicationVersion application = setTheStage ? versions.sourceApplication().orElse(versions.targetApplication()) : versions.targetApplication(); logger.log("Checking installation of " + platform + " and " + application.id() + " ..."); if ( nodesConverged(id.application(), id.type(), platform, logger) && servicesConverged(id.application(), id.type(), logger)) { logger.log("Installation succeeded!"); return Optional.of(running); } if (timedOut(deployment.get(), installationTimeout)) { logger.log(INFO, "Installation failed to complete within " + installationTimeout.toMinutes() + " minutes!"); return Optional.of(installationFailed); } logger.log("Installation not yet complete."); return Optional.empty(); } private Optional<RunStatus> installTester(RunId id, DualLogger logger) { Optional<Deployment> deployment = deployment(id.application(), id.type()); if ( ! deployment.isPresent()) { logger.log(WARNING, "Deployment expired before installation of tester was successful."); return Optional.of(error); } Version platform = controller.jobController().run(id).get().versions().targetPlatform(); logger.log("Checking installation of tester container ..."); if ( nodesConverged(id.tester().id(), id.type(), platform, logger) && servicesConverged(id.tester().id(), id.type(), logger)) { logger.log("Tester container successfully installed!"); return Optional.of(running); } if (timedOut(deployment.get(), installationTimeout)) { logger.log(WARNING, "Installation of tester failed to complete within " + installationTimeout.toMinutes() + " minutes of real deployment!"); return Optional.of(error); } logger.log("Installation of tester not yet complete."); return Optional.empty(); } private boolean nodesConverged(ApplicationId id, JobType type, Version target, DualLogger logger) { List<Node> nodes = controller.configServer().nodeRepository().list(type.zone(controller.system()), id, ImmutableSet.of(active, reserved)); List<String> statuses = nodes.stream() .map(node -> String.format("%70s: %-16s%-25s%-32s%s", node.hostname(), node.serviceState(), node.wantedVersion() + (node.currentVersion().equals(node.wantedVersion()) ? "" : " <-- " + node.currentVersion()), node.restartGeneration() >= node.wantedRestartGeneration() ? "" : "restart pending (" + node.wantedRestartGeneration() + " <-- " + node.restartGeneration() + ")", node.rebootGeneration() >= node.wantedRebootGeneration() ? "" : "reboot pending (" + node.wantedRebootGeneration() + " <-- " + node.rebootGeneration() + ")")) .collect(Collectors.toList()); logger.log(statuses); return nodes.stream().allMatch(node -> node.currentVersion().equals(target) && node.restartGeneration() >= node.wantedRestartGeneration() && node.rebootGeneration() >= node.wantedRebootGeneration()); } private boolean servicesConverged(ApplicationId id, JobType type, DualLogger logger) { Optional<ServiceConvergence> convergence = controller.configServer().serviceConvergence(new DeploymentId(id, type.zone(controller.system()))); if ( ! convergence.isPresent()) { logger.log("Config status not currently available -- will retry."); return false; } logger.log("Wanted config generation is " + convergence.get().wantedGeneration()); List<String> statuses = convergence.get().services().stream() .filter(serviceStatus -> serviceStatus.currentGeneration() != convergence.get().wantedGeneration()) .map(serviceStatus -> String.format("%70s: %11s on port %4d has %s", serviceStatus.host().value(), serviceStatus.type(), serviceStatus.port(), serviceStatus.currentGeneration() == -1 ? "not started!" : Long.toString(serviceStatus.currentGeneration()))) .collect(Collectors.toList()); logger.log(statuses); return convergence.get().converged(); } private Optional<RunStatus> startTests(RunId id, DualLogger logger) { Optional<Deployment> deployment = deployment(id.application(), id.type()); if ( ! deployment.isPresent()) { logger.log(INFO, "Deployment expired before tests could start."); return Optional.of(aborted); } Set<ZoneId> zones = testedZoneAndProductionZones(id); logger.log("Attempting to find endpoints ..."); Map<ZoneId, List<URI>> endpoints = deploymentEndpoints(id.application(), zones); List<String> messages = new ArrayList<>(); messages.add("Found endpoints"); endpoints.forEach((zone, uris) -> { messages.add("- " + zone); uris.forEach(uri -> messages.add(" |-- " + uri)); }); logger.log(messages); if ( ! endpoints.containsKey(id.type().zone(controller.system()))) { if (timedOut(deployment.get(), endpointTimeout)) { logger.log(WARNING, "Endpoints failed to show up within " + endpointTimeout.toMinutes() + " minutes!"); return Optional.of(error); } logger.log("Endpoints for the deployment to test are not yet ready."); return Optional.empty(); } Map<ZoneId, List<String>> clusters = listClusters(id.application(), zones); Optional<URI> testerEndpoint = controller.jobController().testerEndpoint(id); if (testerEndpoint.isPresent() && controller.jobController().cloud().ready(testerEndpoint.get())) { logger.log("Starting tests ..."); controller.jobController().cloud().startTests(testerEndpoint.get(), TesterCloud.Suite.of(id.type()), testConfig(id.application(), id.type().zone(controller.system()), controller.system(), endpoints, clusters)); return Optional.of(running); } if (timedOut(deployment.get(), endpointTimeout)) { logger.log(WARNING, "Endpoint for tester failed to show up within " + endpointTimeout.toMinutes() + " minutes of real deployment!"); return Optional.of(error); } logger.log("Endpoints of tester container not yet available."); return Optional.empty(); } private Optional<RunStatus> endTests(RunId id, DualLogger logger) { if ( ! deployment(id.application(), id.type()).isPresent()) { logger.log(INFO, "Deployment expired before tests could complete."); return Optional.of(aborted); } Optional<URI> testerEndpoint = controller.jobController().testerEndpoint(id); if ( ! testerEndpoint.isPresent()) { logger.log("Endpoints for tester not found -- trying again later."); return Optional.empty(); } controller.jobController().updateTestLog(id); RunStatus status; TesterCloud.Status testStatus = controller.jobController().cloud().getStatus(testerEndpoint.get()); switch (testStatus) { case NOT_STARTED: throw new IllegalStateException("Tester reports tests not started, even though they should have!"); case RUNNING: return Optional.empty(); case FAILURE: logger.log("Tests failed."); status = testFailure; break; case ERROR: logger.log(INFO, "Tester failed running its tests!"); status = error; break; case SUCCESS: logger.log("Tests completed successfully."); status = running; break; default: throw new IllegalStateException("Unknown status '" + testStatus + "'!"); } return Optional.of(status); } private Optional<RunStatus> deactivateReal(RunId id, DualLogger logger) { logger.log("Deactivating deployment of " + id.application() + " in " + id.type().zone(controller.system()) + " ..."); controller.applications().deactivate(id.application(), id.type().zone(controller.system())); return Optional.of(running); } private Optional<RunStatus> deactivateTester(RunId id, DualLogger logger) { logger.log("Deactivating tester of " + id.application() + " in " + id.type().zone(controller.system()) + " ..."); controller.jobController().deactivateTester(id.tester(), id.type()); return Optional.of(running); } private Optional<RunStatus> report(RunId id, DualLogger logger) { try { controller.jobController().active(id).ifPresent(run -> { JobReport report = JobReport.ofJob(run.id().application(), run.id().type(), run.id().number(), run.hasFailed() ? Optional.of(DeploymentJobs.JobError.unknown) : Optional.empty()); controller.applications().deploymentTrigger().notifyOfCompletion(report); if (run.hasFailed()) sendNotification(run, logger); }); } catch (IllegalStateException e) { logger.log(INFO, "Job '" + id.type() + "'no longer supposed to run?:", e); } return Optional.of(running); } /** Sends a mail with a notification of a failed run, if one should be sent. */ /** Returns the deployment of the real application in the zone of the given job, if it exists. */ private Optional<Deployment> deployment(ApplicationId id, JobType type) { return Optional.ofNullable(application(id).deployments().get(type.zone(controller.system()))); } /** Returns the real application with the given id. */ private Application application(ApplicationId id) { return controller.applications().require(id); } /** Returns whether the time elapsed since the last real deployment in the given zone is more than the given timeout. */ private boolean timedOut(Deployment deployment, Duration timeout) { return deployment.at().isBefore(controller.clock().instant().minus(timeout)); } /** Returns the application package for the tester application, assembled from a generated config, fat-jar and services.xml. */ private ApplicationPackage testerPackage(RunId id) { ApplicationVersion version = controller.jobController().run(id).get().versions().targetApplication(); byte[] testPackage = controller.applications().applicationStore().get(id.tester(), version); byte[] servicesXml = servicesXml(controller.system()); DeploymentSpec spec = controller.applications().require(id.application()).deploymentSpec(); ZoneId zone = id.type().zone(controller.system()); byte[] deploymentXml = deploymentXml(spec.athenzDomain(), spec.athenzService(zone.environment(), zone.region())); try (ZipBuilder zipBuilder = new ZipBuilder(testPackage.length + servicesXml.length + 1000)) { zipBuilder.add(testPackage); zipBuilder.add("services.xml", servicesXml); zipBuilder.add("deployment.xml", deploymentXml); zipBuilder.close(); return new ApplicationPackage(zipBuilder.toByteArray()); } } /** Returns a stream containing the zone of the deployment tested in the given run, and all production zones for the application. */ private Set<ZoneId> testedZoneAndProductionZones(RunId id) { return Stream.concat(Stream.of(id.type().zone(controller.system())), application(id.application()).productionDeployments().keySet().stream()) .collect(Collectors.toSet()); } /** Returns all endpoints for all current deployments of the given real application. */ private Map<ZoneId, List<URI>> deploymentEndpoints(ApplicationId id, Iterable<ZoneId> zones) { ImmutableMap.Builder<ZoneId, List<URI>> deployments = ImmutableMap.builder(); for (ZoneId zone : zones) controller.applications().getDeploymentEndpoints(new DeploymentId(id, zone)) .filter(endpoints -> ! endpoints.isEmpty()) .ifPresent(endpoints -> deployments.put(zone, endpoints)); return deployments.build(); } /** Returns all content clusters in all current deployments of the given real application. */ private Map<ZoneId, List<String>> listClusters(ApplicationId id, Iterable<ZoneId> zones) { ImmutableMap.Builder<ZoneId, List<String>> clusters = ImmutableMap.builder(); for (ZoneId zone : zones) clusters.put(zone, ImmutableList.copyOf(controller.configServer().getContentClusters(new DeploymentId(id, zone)))); return clusters.build(); } /** Returns the generated services.xml content for the tester application. */ static byte[] servicesXml(SystemName systemName) { String domain = systemName == SystemName.main ? "vespa.vespa" : "vespa.vespa.cd"; String servicesXml = "<?xml version='1.0' encoding='UTF-8'?>\n" + "<services xmlns:deploy='vespa' version='1.0'>\n" + " <container version='1.0' id='default'>\n" + "\n" + " <component id=\"com.yahoo.vespa.hosted.testrunner.TestRunner\" bundle=\"vespa-testrunner-components\">\n" + " <config name=\"com.yahoo.vespa.hosted.testrunner.test-runner\">\n" + " <artifactsPath>artifacts</artifactsPath>\n" + " </config>\n" + " </component>\n" + "\n" + " <handler id=\"com.yahoo.vespa.hosted.testrunner.TestRunnerHandler\" bundle=\"vespa-testrunner-components\">\n" + " <binding>http: " </handler>\n" + "\n" + " <http>\n" + " <server id='default' port='4080'/>\n" + " <filtering>\n" + " <access-control domain='" + domain + "'>\n" + " <exclude>\n" + " <binding>http: " </exclude>\n" + " </access-control>\n" + " <request-chain id=\"testrunner-api\">\n" + " <filter id='authz-filter' class='com.yahoo.jdisc.http.filter.security.athenz.AthenzAuthorizationFilter' bundle=\"jdisc-security-filters\">\n" + " <config name=\"jdisc.http.filter.security.athenz.athenz-authorization-filter\">\n" + " <credentialsToVerify>TOKEN_ONLY</credentialsToVerify>\n" + " <roleTokenHeaderName>Yahoo-Role-Auth</roleTokenHeaderName>\n" + " </config>\n" + " <component id=\"com.yahoo.jdisc.http.filter.security.athenz.StaticRequestResourceMapper\" bundle=\"jdisc-security-filters\">\n" + " <config name=\"jdisc.http.filter.security.athenz.static-request-resource-mapper\">\n" + " <resourceName>" + domain + ":tester-application</resourceName>\n" + " <action>deploy</action>\n" + " </config>\n" + " </component>\n" + " </filter>\n" + " </request-chain>\n" + " </filtering>\n" + " </http>\n" + "\n" + " <nodes count=\"1\" flavor=\"d-1-4-50\" />\n" + " </container>\n" + "</services>\n"; return servicesXml.getBytes(); } /** Returns a dummy deployment xml which sets up the service identity for the tester, if present. */ private static byte[] deploymentXml(Optional<AthenzDomain> athenzDomain, Optional<AthenzService> athenzService) { String deploymentSpec = "<?xml version='1.0' encoding='UTF-8'?>\n" + "<deployment version=\"1.0\" " + athenzDomain.map(domain -> "athenz-domain=\"" + domain.value() + "\" ").orElse("") + athenzService.map(service -> "athenz-service=\"" + service.value() + "\" ").orElse("") + "/>"; return deploymentSpec.getBytes(StandardCharsets.UTF_8); } /** Returns the config for the tests to run for the given job. */ private static byte[] testConfig(ApplicationId id, ZoneId testerZone, SystemName system, Map<ZoneId, List<URI>> deployments, Map<ZoneId, List<String>> clusters) { Slime slime = new Slime(); Cursor root = slime.setObject(); root.setString("application", id.serializedForm()); root.setString("zone", testerZone.value()); root.setString("system", system.name()); Cursor endpointsObject = root.setObject("endpoints"); deployments.forEach((zone, endpoints) -> { Cursor endpointArray = endpointsObject.setArray(zone.value()); for (URI endpoint : endpoints) endpointArray.addString(endpoint.toString()); }); Cursor clustersObject = root.setObject("clusters"); clusters.forEach((zone, clusterList) -> { Cursor clusterArray = clustersObject.setArray(zone.value()); for (String cluster : clusterList) clusterArray.addString(cluster); }); try { return SlimeUtils.toJsonBytes(slime); } catch (IOException e) { throw new UncheckedIOException(e); } } /** Logger which logs to a {@link JobController}, as well as to the parent class' {@link Logger}. */ private class DualLogger { private final RunId id; private final Step step; private DualLogger(RunId id, Step step) { this.id = id; this.step = step; } private void log(String... messages) { log(List.of(messages)); } private void log(List<String> messages) { controller.jobController().log(id, step, DEBUG, messages); } private void log(Level level, String message) { log(level, message, null); } private void log(Level level, String message, Throwable thrown) { logger.log(level, id + " at " + step + ": " + message, thrown); if (thrown != null) { ByteArrayOutputStream traceBuffer = new ByteArrayOutputStream(); thrown.printStackTrace(new PrintStream(traceBuffer)); message += "\n" + traceBuffer; } controller.jobController().log(id, step, level, message); } } }
Consider using explicit `Charset` or you'll get whatever is the system default
private static String readToString(String filename) { try { return new String(Files.readAllBytes(Paths.get(filename))); } catch (IOException e) { throw new UncheckedIOException(e); } }
return new String(Files.readAllBytes(Paths.get(filename)));
private static String readToString(String filename) { try { return Files.readString(Paths.get(filename), StandardCharsets.UTF_8); } catch (IOException e) { throw new UncheckedIOException(e); } }
class ConfiguredSslContextFactoryProvider implements SslContextFactoryProvider { private final ConnectorConfig connectorConfig; public ConfiguredSslContextFactoryProvider(ConnectorConfig connectorConfig) { validateConfig(connectorConfig.ssl()); this.connectorConfig = connectorConfig; } @Override public SslContextFactory getInstance(String containerId, int port) { ConnectorConfig.Ssl sslConfig = connectorConfig.ssl(); if (!sslConfig.enabled()) throw new IllegalStateException(); SslContextFactory factory = new JDiscSslContextFactory(); switch (sslConfig.clientAuth()) { case NEED_AUTH: factory.setNeedClientAuth(true); break; case WANT_AUTH: factory.setWantClientAuth(true); break; } factory.setKeyStore(createKeystore(sslConfig)); factory.setKeyStorePassword(""); if (!sslConfig.caCertificateFile().isEmpty()) { factory.setTrustStore(createTruststore(sslConfig)); } factory.setProtocol("TLS"); return factory; } private static void validateConfig(ConnectorConfig.Ssl config) { if (!config.enabled()) return; if (config.certificateFile().isEmpty()) { throw new IllegalArgumentException("Missing certificate file."); } if (config.privateKeyFile().isEmpty()) { throw new IllegalArgumentException("Missing private key file."); } } private static KeyStore createTruststore(ConnectorConfig.Ssl sslConfig) { List<X509Certificate> caCertificates = X509CertificateUtils.certificateListFromPem(readToString(sslConfig.caCertificateFile())); KeyStoreBuilder truststoreBuilder = KeyStoreBuilder.withType(KeyStoreType.JKS); for (int i = 0; i < caCertificates.size(); i++) { truststoreBuilder.withCertificateEntry("entry-" + i, caCertificates.get(i)); } return truststoreBuilder.build(); } private static KeyStore createKeystore(ConnectorConfig.Ssl sslConfig) { PrivateKey privateKey = KeyUtils.fromPemEncodedPrivateKey(readToString(sslConfig.privateKeyFile())); List<X509Certificate> certificates = X509CertificateUtils.certificateListFromPem(readToString(sslConfig.certificateFile())); return KeyStoreBuilder.withType(KeyStoreType.JKS).withKeyEntry("default", privateKey, certificates).build(); } }
class ConfiguredSslContextFactoryProvider implements SslContextFactoryProvider { private final ConnectorConfig connectorConfig; public ConfiguredSslContextFactoryProvider(ConnectorConfig connectorConfig) { validateConfig(connectorConfig.ssl()); this.connectorConfig = connectorConfig; } @Override public SslContextFactory getInstance(String containerId, int port) { ConnectorConfig.Ssl sslConfig = connectorConfig.ssl(); if (!sslConfig.enabled()) throw new IllegalStateException(); SslContextFactory factory = new JDiscSslContextFactory(); switch (sslConfig.clientAuth()) { case NEED_AUTH: factory.setNeedClientAuth(true); break; case WANT_AUTH: factory.setWantClientAuth(true); break; } factory.setKeyStore(createKeystore(sslConfig)); factory.setKeyStorePassword(""); if (!sslConfig.caCertificateFile().isEmpty()) { factory.setTrustStore(createTruststore(sslConfig)); } factory.setProtocol("TLS"); return factory; } private static void validateConfig(ConnectorConfig.Ssl config) { if (!config.enabled()) return; if (config.certificateFile().isEmpty()) { throw new IllegalArgumentException("Missing certificate file."); } if (config.privateKeyFile().isEmpty()) { throw new IllegalArgumentException("Missing private key file."); } } private static KeyStore createTruststore(ConnectorConfig.Ssl sslConfig) { List<X509Certificate> caCertificates = X509CertificateUtils.certificateListFromPem(readToString(sslConfig.caCertificateFile())); KeyStoreBuilder truststoreBuilder = KeyStoreBuilder.withType(KeyStoreType.JKS); for (int i = 0; i < caCertificates.size(); i++) { truststoreBuilder.withCertificateEntry("entry-" + i, caCertificates.get(i)); } return truststoreBuilder.build(); } private static KeyStore createKeystore(ConnectorConfig.Ssl sslConfig) { PrivateKey privateKey = KeyUtils.fromPemEncodedPrivateKey(readToString(sslConfig.privateKeyFile())); List<X509Certificate> certificates = X509CertificateUtils.certificateListFromPem(readToString(sslConfig.certificateFile())); return KeyStoreBuilder.withType(KeyStoreType.JKS).withKeyEntry("default", privateKey, certificates).build(); } }
hard to use ...
public static String[] getBucketAndPrefix() { int index = Config.aws_s3_path.indexOf('/'); if (index < 0) { return new String[] {Config.aws_s3_path, ""}; } return new String[] {Config.aws_s3_path.substring(0, index), Config.aws_s3_path.substring(index + 1)}; }
return new String[] {Config.aws_s3_path.substring(0, index),
public static String[] getBucketAndPrefix() { int index = Config.aws_s3_path.indexOf('/'); if (index < 0) { return new String[] {Config.aws_s3_path, ""}; } return new String[] {Config.aws_s3_path.substring(0, index), Config.aws_s3_path.substring(index + 1)}; }
class SingletonHolder { private static final StarMgrServer INSTANCE = new StarMgrServer(); }
class SingletonHolder { private static final StarMgrServer INSTANCE = new StarMgrServer(); }
Consider not using exception handling for the normal control flow.
private void run() { try { while (true) { ((Runnable) workQueue.dequeue()).run(); } } catch (EndOfQueueException e) {} }
} catch (EndOfQueueException e) {}
private void run() { try { while (true) { ((Runnable) workQueue.dequeue()).run(); } } catch (EndOfQueueException e) {} }
class DoHandshakeWork implements Runnable { private Connection connection; DoHandshakeWork(Connection c) { connection = c; } public void run() { connection.doHandshakeWork(); connection.transport().handshakeWorkDone(connection); } }
class DoHandshakeWork implements Runnable { private Connection connection; DoHandshakeWork(Connection c) { connection = c; } public void run() { connection.doHandshakeWork(); connection.transport().handshakeWorkDone(connection); } }
Seems to me that there are users of this method that will not handle an exception being thrown, e.g. ApplicationRepository.lastDeployTime()
public long requireActiveSessionOf(ApplicationId applicationId) { return activeSessionOf(applicationId) .orElseThrow(() -> new IllegalArgumentException("Application '" + applicationId + "' has no active session.")); }
.orElseThrow(() -> new IllegalArgumentException("Application '" + applicationId + "' has no active session."));
public long requireActiveSessionOf(ApplicationId applicationId) { return activeSessionOf(applicationId) .orElseThrow(() -> new IllegalArgumentException("Application '" + applicationId + "' has no active session.")); }
class TenantApplications { private static final Logger log = Logger.getLogger(TenantApplications.class.getName()); private final Curator curator; private final Path applicationsPath; private static final ExecutorService pathChildrenExecutor = Executors.newCachedThreadPool(ThreadFactoryFactory.getDaemonThreadFactory(TenantApplications.class.getName())); private final Curator.DirectoryCache directoryCache; private final ReloadHandler reloadHandler; private final TenantName tenant; private TenantApplications(Curator curator, Path applicationsPath, ReloadHandler reloadHandler, TenantName tenant) { this.curator = curator; this.applicationsPath = applicationsPath; curator.create(applicationsPath); this.reloadHandler = reloadHandler; this.tenant = tenant; this.directoryCache = curator.createDirectoryCache(applicationsPath.getAbsolute(), false, false, pathChildrenExecutor); this.directoryCache.start(); this.directoryCache.addListener(this::childEvent); } public static TenantApplications create(Curator curator, ReloadHandler reloadHandler, TenantName tenant) { try { return new TenantApplications(curator, TenantRepository.getApplicationsPath(tenant), reloadHandler, tenant); } catch (Exception e) { throw new RuntimeException(TenantRepository.logPre(tenant) + "Error creating application repo", e); } } /** * List the active applications of a tenant in this config server. * * @return a list of {@link ApplicationId}s that are active. */ public List<ApplicationId> activeApplications() { return curator.getChildren(applicationsPath).stream() .filter(this::isValid) .map(ApplicationId::fromSerializedForm) .filter(id -> activeSessionOf(id).isPresent()) .collect(Collectors.toUnmodifiableList()); } private boolean isValid(String appNode) { try { ApplicationId.fromSerializedForm(appNode); return true; } catch (IllegalArgumentException __) { log.log(LogLevel.INFO, TenantRepository.logPre(tenant) + "Unable to parse application id from '" + appNode + "'; deleting it as it shouldn't be here."); try { curator.delete(applicationsPath.append(appNode)); } catch (Exception e) { log.log(LogLevel.WARNING, TenantRepository.logPre(tenant) + "Failed to clean up stray node '" + appNode + "'!", e); } return false; } } /** Returns the id of the currently active session for the given application, if any. Throws on unknown applications. */ public OptionalLong activeSessionOf(ApplicationId id) { String data = curator.getData(applicationPath(id)).map(Utf8::toString) .orElseThrow(() -> new IllegalArgumentException("Unknown application '" + id + "'.")); return data.isEmpty() ? OptionalLong.empty() : OptionalLong.of(Long.parseLong(data)); } /** * Returns a transaction which writes the given session id as the currently active for the given application. * * @param applicationId An {@link ApplicationId} that represents an active application. * @param sessionId Id of the session containing the application package for this id. */ public Transaction createPutTransaction(ApplicationId applicationId, long sessionId) { if (curator.exists(applicationPath(applicationId))) { return new CuratorTransaction(curator).add(CuratorOperations.setData(applicationPath(applicationId).getAbsolute(), Utf8.toAsciiBytes(sessionId))); } else { return new CuratorTransaction(curator).add(CuratorOperations.create(applicationPath(applicationId).getAbsolute(), Utf8.toAsciiBytes(sessionId))); } } /** * Return the active session id for a given application. * * @param applicationId an {@link ApplicationId} * @return session id of given application id. * @throws IllegalArgumentException if the application does not exist */ /** * Returns a transaction which deletes this application. */ public CuratorTransaction createDeleteTransaction(ApplicationId applicationId) { return CuratorTransaction.from(CuratorOperations.delete(applicationPath(applicationId).getAbsolute()), curator); } /** * Removes all applications not known to this from the config server state. */ public void removeUnusedApplications() { reloadHandler.removeApplicationsExcept(Set.copyOf(activeApplications())); } /** * Closes the application repo. Once a repo has been closed, it should not be used again. */ public void close() { directoryCache.close(); } private void childEvent(CuratorFramework client, PathChildrenCacheEvent event) { switch (event.getType()) { case CHILD_ADDED: applicationAdded(ApplicationId.fromSerializedForm(Path.fromString(event.getData().getPath()).getName())); break; case CHILD_REMOVED: applicationRemoved(ApplicationId.fromSerializedForm(Path.fromString(event.getData().getPath()).getName())); break; case CHILD_UPDATED: break; default: break; } removeUnusedApplications(); } private void applicationRemoved(ApplicationId applicationId) { reloadHandler.removeApplication(applicationId); log.log(LogLevel.INFO, TenantRepository.logPre(applicationId) + "Application removed: " + applicationId); } private void applicationAdded(ApplicationId applicationId) { log.log(LogLevel.DEBUG, TenantRepository.logPre(applicationId) + "Application added: " + applicationId); } private Path applicationPath(ApplicationId id) { return applicationsPath.append(id.serializedForm()); } }
class TenantApplications { private static final Logger log = Logger.getLogger(TenantApplications.class.getName()); private final Curator curator; private final Path applicationsPath; private final Path locksPath; private static final ExecutorService pathChildrenExecutor = Executors.newCachedThreadPool(ThreadFactoryFactory.getDaemonThreadFactory(TenantApplications.class.getName())); private final Curator.DirectoryCache directoryCache; private final ReloadHandler reloadHandler; private final TenantName tenant; private final Map<ApplicationId, Lock> locks; private TenantApplications(Curator curator, ReloadHandler reloadHandler, TenantName tenant) { this.curator = curator; this.applicationsPath = TenantRepository.getApplicationsPath(tenant); this.locksPath = TenantRepository.getLocksPath(tenant); this.locks = new ConcurrentHashMap<>(2); this.reloadHandler = reloadHandler; this.tenant = tenant; this.directoryCache = curator.createDirectoryCache(applicationsPath.getAbsolute(), false, false, pathChildrenExecutor); this.directoryCache.start(); this.directoryCache.addListener(this::childEvent); } public static TenantApplications create(Curator curator, ReloadHandler reloadHandler, TenantName tenant) { return new TenantApplications(curator, reloadHandler, tenant); } /** * List the active applications of a tenant in this config server. * * @return a list of {@link ApplicationId}s that are active. */ public List<ApplicationId> activeApplications() { return curator.getChildren(applicationsPath).stream() .filter(this::isValid) .sorted() .map(ApplicationId::fromSerializedForm) .filter(id -> activeSessionOf(id).isPresent()) .collect(Collectors.toUnmodifiableList()); } private boolean isValid(String appNode) { try { ApplicationId.fromSerializedForm(appNode); return true; } catch (IllegalArgumentException __) { log.log(LogLevel.INFO, TenantRepository.logPre(tenant) + "Unable to parse application id from '" + appNode + "'; deleting it as it shouldn't be here."); try { curator.delete(applicationsPath.append(appNode)); } catch (RuntimeException e) { log.log(LogLevel.WARNING, TenantRepository.logPre(tenant) + "Failed to clean up stray node '" + appNode + "'!", e); } return false; } } public boolean exists(ApplicationId id) { return curator.exists(applicationPath(id)); } /** Returns the id of the currently active session for the given application, if any. Throws on unknown applications. */ public OptionalLong activeSessionOf(ApplicationId id) { String data = curator.getData(applicationPath(id)).map(Utf8::toString) .orElseThrow(() -> new IllegalArgumentException("Unknown application '" + id + "'.")); return data.isEmpty() ? OptionalLong.empty() : OptionalLong.of(Long.parseLong(data)); } /** * Returns a transaction which writes the given session id as the currently active for the given application. * * @param applicationId An {@link ApplicationId} that represents an active application. * @param sessionId Id of the session containing the application package for this id. */ public Transaction createPutTransaction(ApplicationId applicationId, long sessionId) { return new CuratorTransaction(curator).add(CuratorOperations.setData(applicationPath(applicationId).getAbsolute(), Utf8.toAsciiBytes(sessionId))); } /** * Creates a node for the given application, marking its existence. */ public void createApplication(ApplicationId id) { try (Lock lock = lock(id)) { curator.create(applicationPath(id)); } } /** * Return the active session id for a given application. * * @param applicationId an {@link ApplicationId} * @return session id of given application id. * @throws IllegalArgumentException if the application has no active session */ /** * Returns a transaction which deletes this application. */ public CuratorTransaction createDeleteTransaction(ApplicationId applicationId) { return CuratorTransaction.from(CuratorOperations.deleteAll(applicationPath(applicationId).getAbsolute(), curator), curator); } /** * Removes all applications not known to this from the config server state. */ public void removeUnusedApplications() { reloadHandler.removeApplicationsExcept(Set.copyOf(activeApplications())); } /** * Closes the application repo. Once a repo has been closed, it should not be used again. */ public void close() { directoryCache.close(); } /** Returns the lock for changing the session status of the given application. */ public Lock lock(ApplicationId id) { curator.create(lockPath(id)); Lock lock = locks.computeIfAbsent(id, __ -> new Lock(lockPath(id).getAbsolute(), curator)); lock.acquire(Duration.ofMinutes(1)); return lock; } private void childEvent(CuratorFramework client, PathChildrenCacheEvent event) { switch (event.getType()) { case CHILD_ADDED: applicationAdded(ApplicationId.fromSerializedForm(Path.fromString(event.getData().getPath()).getName())); break; case CHILD_REMOVED: applicationRemoved(ApplicationId.fromSerializedForm(Path.fromString(event.getData().getPath()).getName())); break; case CHILD_UPDATED: break; default: break; } removeUnusedApplications(); } private void applicationRemoved(ApplicationId applicationId) { reloadHandler.removeApplication(applicationId); log.log(LogLevel.INFO, TenantRepository.logPre(applicationId) + "Application removed: " + applicationId); } private void applicationAdded(ApplicationId applicationId) { log.log(LogLevel.DEBUG, TenantRepository.logPre(applicationId) + "Application added: " + applicationId); } private Path applicationPath(ApplicationId id) { return applicationsPath.append(id.serializedForm()); } private Path lockPath(ApplicationId id) { return locksPath.append(id.serializedForm()); } }
This threw before, as well, when the application didn't have an active session. I'll check if that may have been unfortunate.
public long requireActiveSessionOf(ApplicationId applicationId) { return activeSessionOf(applicationId) .orElseThrow(() -> new IllegalArgumentException("Application '" + applicationId + "' has no active session.")); }
.orElseThrow(() -> new IllegalArgumentException("Application '" + applicationId + "' has no active session."));
public long requireActiveSessionOf(ApplicationId applicationId) { return activeSessionOf(applicationId) .orElseThrow(() -> new IllegalArgumentException("Application '" + applicationId + "' has no active session.")); }
class TenantApplications { private static final Logger log = Logger.getLogger(TenantApplications.class.getName()); private final Curator curator; private final Path applicationsPath; private static final ExecutorService pathChildrenExecutor = Executors.newCachedThreadPool(ThreadFactoryFactory.getDaemonThreadFactory(TenantApplications.class.getName())); private final Curator.DirectoryCache directoryCache; private final ReloadHandler reloadHandler; private final TenantName tenant; private TenantApplications(Curator curator, Path applicationsPath, ReloadHandler reloadHandler, TenantName tenant) { this.curator = curator; this.applicationsPath = applicationsPath; curator.create(applicationsPath); this.reloadHandler = reloadHandler; this.tenant = tenant; this.directoryCache = curator.createDirectoryCache(applicationsPath.getAbsolute(), false, false, pathChildrenExecutor); this.directoryCache.start(); this.directoryCache.addListener(this::childEvent); } public static TenantApplications create(Curator curator, ReloadHandler reloadHandler, TenantName tenant) { try { return new TenantApplications(curator, TenantRepository.getApplicationsPath(tenant), reloadHandler, tenant); } catch (Exception e) { throw new RuntimeException(TenantRepository.logPre(tenant) + "Error creating application repo", e); } } /** * List the active applications of a tenant in this config server. * * @return a list of {@link ApplicationId}s that are active. */ public List<ApplicationId> activeApplications() { return curator.getChildren(applicationsPath).stream() .filter(this::isValid) .map(ApplicationId::fromSerializedForm) .filter(id -> activeSessionOf(id).isPresent()) .collect(Collectors.toUnmodifiableList()); } private boolean isValid(String appNode) { try { ApplicationId.fromSerializedForm(appNode); return true; } catch (IllegalArgumentException __) { log.log(LogLevel.INFO, TenantRepository.logPre(tenant) + "Unable to parse application id from '" + appNode + "'; deleting it as it shouldn't be here."); try { curator.delete(applicationsPath.append(appNode)); } catch (Exception e) { log.log(LogLevel.WARNING, TenantRepository.logPre(tenant) + "Failed to clean up stray node '" + appNode + "'!", e); } return false; } } /** Returns the id of the currently active session for the given application, if any. Throws on unknown applications. */ public OptionalLong activeSessionOf(ApplicationId id) { String data = curator.getData(applicationPath(id)).map(Utf8::toString) .orElseThrow(() -> new IllegalArgumentException("Unknown application '" + id + "'.")); return data.isEmpty() ? OptionalLong.empty() : OptionalLong.of(Long.parseLong(data)); } /** * Returns a transaction which writes the given session id as the currently active for the given application. * * @param applicationId An {@link ApplicationId} that represents an active application. * @param sessionId Id of the session containing the application package for this id. */ public Transaction createPutTransaction(ApplicationId applicationId, long sessionId) { if (curator.exists(applicationPath(applicationId))) { return new CuratorTransaction(curator).add(CuratorOperations.setData(applicationPath(applicationId).getAbsolute(), Utf8.toAsciiBytes(sessionId))); } else { return new CuratorTransaction(curator).add(CuratorOperations.create(applicationPath(applicationId).getAbsolute(), Utf8.toAsciiBytes(sessionId))); } } /** * Return the active session id for a given application. * * @param applicationId an {@link ApplicationId} * @return session id of given application id. * @throws IllegalArgumentException if the application does not exist */ /** * Returns a transaction which deletes this application. */ public CuratorTransaction createDeleteTransaction(ApplicationId applicationId) { return CuratorTransaction.from(CuratorOperations.delete(applicationPath(applicationId).getAbsolute()), curator); } /** * Removes all applications not known to this from the config server state. */ public void removeUnusedApplications() { reloadHandler.removeApplicationsExcept(Set.copyOf(activeApplications())); } /** * Closes the application repo. Once a repo has been closed, it should not be used again. */ public void close() { directoryCache.close(); } private void childEvent(CuratorFramework client, PathChildrenCacheEvent event) { switch (event.getType()) { case CHILD_ADDED: applicationAdded(ApplicationId.fromSerializedForm(Path.fromString(event.getData().getPath()).getName())); break; case CHILD_REMOVED: applicationRemoved(ApplicationId.fromSerializedForm(Path.fromString(event.getData().getPath()).getName())); break; case CHILD_UPDATED: break; default: break; } removeUnusedApplications(); } private void applicationRemoved(ApplicationId applicationId) { reloadHandler.removeApplication(applicationId); log.log(LogLevel.INFO, TenantRepository.logPre(applicationId) + "Application removed: " + applicationId); } private void applicationAdded(ApplicationId applicationId) { log.log(LogLevel.DEBUG, TenantRepository.logPre(applicationId) + "Application added: " + applicationId); } private Path applicationPath(ApplicationId id) { return applicationsPath.append(id.serializedForm()); } }
class TenantApplications { private static final Logger log = Logger.getLogger(TenantApplications.class.getName()); private final Curator curator; private final Path applicationsPath; private final Path locksPath; private static final ExecutorService pathChildrenExecutor = Executors.newCachedThreadPool(ThreadFactoryFactory.getDaemonThreadFactory(TenantApplications.class.getName())); private final Curator.DirectoryCache directoryCache; private final ReloadHandler reloadHandler; private final TenantName tenant; private final Map<ApplicationId, Lock> locks; private TenantApplications(Curator curator, ReloadHandler reloadHandler, TenantName tenant) { this.curator = curator; this.applicationsPath = TenantRepository.getApplicationsPath(tenant); this.locksPath = TenantRepository.getLocksPath(tenant); this.locks = new ConcurrentHashMap<>(2); this.reloadHandler = reloadHandler; this.tenant = tenant; this.directoryCache = curator.createDirectoryCache(applicationsPath.getAbsolute(), false, false, pathChildrenExecutor); this.directoryCache.start(); this.directoryCache.addListener(this::childEvent); } public static TenantApplications create(Curator curator, ReloadHandler reloadHandler, TenantName tenant) { return new TenantApplications(curator, reloadHandler, tenant); } /** * List the active applications of a tenant in this config server. * * @return a list of {@link ApplicationId}s that are active. */ public List<ApplicationId> activeApplications() { return curator.getChildren(applicationsPath).stream() .filter(this::isValid) .sorted() .map(ApplicationId::fromSerializedForm) .filter(id -> activeSessionOf(id).isPresent()) .collect(Collectors.toUnmodifiableList()); } private boolean isValid(String appNode) { try { ApplicationId.fromSerializedForm(appNode); return true; } catch (IllegalArgumentException __) { log.log(LogLevel.INFO, TenantRepository.logPre(tenant) + "Unable to parse application id from '" + appNode + "'; deleting it as it shouldn't be here."); try { curator.delete(applicationsPath.append(appNode)); } catch (RuntimeException e) { log.log(LogLevel.WARNING, TenantRepository.logPre(tenant) + "Failed to clean up stray node '" + appNode + "'!", e); } return false; } } public boolean exists(ApplicationId id) { return curator.exists(applicationPath(id)); } /** Returns the id of the currently active session for the given application, if any. Throws on unknown applications. */ public OptionalLong activeSessionOf(ApplicationId id) { String data = curator.getData(applicationPath(id)).map(Utf8::toString) .orElseThrow(() -> new IllegalArgumentException("Unknown application '" + id + "'.")); return data.isEmpty() ? OptionalLong.empty() : OptionalLong.of(Long.parseLong(data)); } /** * Returns a transaction which writes the given session id as the currently active for the given application. * * @param applicationId An {@link ApplicationId} that represents an active application. * @param sessionId Id of the session containing the application package for this id. */ public Transaction createPutTransaction(ApplicationId applicationId, long sessionId) { return new CuratorTransaction(curator).add(CuratorOperations.setData(applicationPath(applicationId).getAbsolute(), Utf8.toAsciiBytes(sessionId))); } /** * Creates a node for the given application, marking its existence. */ public void createApplication(ApplicationId id) { try (Lock lock = lock(id)) { curator.create(applicationPath(id)); } } /** * Return the active session id for a given application. * * @param applicationId an {@link ApplicationId} * @return session id of given application id. * @throws IllegalArgumentException if the application has no active session */ /** * Returns a transaction which deletes this application. */ public CuratorTransaction createDeleteTransaction(ApplicationId applicationId) { return CuratorTransaction.from(CuratorOperations.deleteAll(applicationPath(applicationId).getAbsolute(), curator), curator); } /** * Removes all applications not known to this from the config server state. */ public void removeUnusedApplications() { reloadHandler.removeApplicationsExcept(Set.copyOf(activeApplications())); } /** * Closes the application repo. Once a repo has been closed, it should not be used again. */ public void close() { directoryCache.close(); } /** Returns the lock for changing the session status of the given application. */ public Lock lock(ApplicationId id) { curator.create(lockPath(id)); Lock lock = locks.computeIfAbsent(id, __ -> new Lock(lockPath(id).getAbsolute(), curator)); lock.acquire(Duration.ofMinutes(1)); return lock; } private void childEvent(CuratorFramework client, PathChildrenCacheEvent event) { switch (event.getType()) { case CHILD_ADDED: applicationAdded(ApplicationId.fromSerializedForm(Path.fromString(event.getData().getPath()).getName())); break; case CHILD_REMOVED: applicationRemoved(ApplicationId.fromSerializedForm(Path.fromString(event.getData().getPath()).getName())); break; case CHILD_UPDATED: break; default: break; } removeUnusedApplications(); } private void applicationRemoved(ApplicationId applicationId) { reloadHandler.removeApplication(applicationId); log.log(LogLevel.INFO, TenantRepository.logPre(applicationId) + "Application removed: " + applicationId); } private void applicationAdded(ApplicationId applicationId) { log.log(LogLevel.DEBUG, TenantRepository.logPre(applicationId) + "Application added: " + applicationId); } private Path applicationPath(ApplicationId id) { return applicationsPath.append(id.serializedForm()); } private Path lockPath(ApplicationId id) { return locksPath.append(id.serializedForm()); } }
consider committing to property map name (startsWith -> equals)
private void addBackendTrace(Query query, QueryResultPacket resultPacket) { if (resultPacket.propsArray == null) return; for (FS4Properties properties : resultPacket.propsArray) { if ( ! properties.getName().startsWith("trace")) continue; for (FS4Properties.Entry entry : properties.getEntries()) { if (!entry.key.equals("slime")) continue; Slime trace = BinaryFormat.decode(entry.getValue()); query.trace("Backend trace :" + entry.key + " => " + Utf8.toString(JsonFormat.toJsonBytes(trace)), query.getTraceLevel()); } } }
if ( ! properties.getName().startsWith("trace")) continue;
private void addBackendTrace(Query query, QueryResultPacket resultPacket) { if (resultPacket.propsArray == null) return; Value.ArrayValue traces = new Value.ArrayValue(); for (FS4Properties properties : resultPacket.propsArray) { if ( ! properties.getName().equals("trace")) continue; for (FS4Properties.Entry entry : properties.getEntries()) { traces.add(new SlimeAdapter(BinaryFormat.decode(entry.getValue()).get())); } } query.trace(traces, query.getTraceLevel()); }
class - remove List<Result> parts= partitionHits(result, summaryClass); if (parts.size() > 0) { for (Result r : parts) { doPartialFill(r, summaryClass); mergeErrorsInto(result, r); } result.hits().setSorted(false); result.analyzeHits(); }
class - remove List<Result> parts= partitionHits(result, summaryClass); if (parts.size() > 0) { for (Result r : parts) { doPartialFill(r, summaryClass); mergeErrorsInto(result, r); } result.hits().setSorted(false); result.analyzeHits(); }
'trace' it is.
private void addBackendTrace(Query query, QueryResultPacket resultPacket) { if (resultPacket.propsArray == null) return; for (FS4Properties properties : resultPacket.propsArray) { if ( ! properties.getName().startsWith("trace")) continue; for (FS4Properties.Entry entry : properties.getEntries()) { if (!entry.key.equals("slime")) continue; Slime trace = BinaryFormat.decode(entry.getValue()); query.trace("Backend trace :" + entry.key + " => " + Utf8.toString(JsonFormat.toJsonBytes(trace)), query.getTraceLevel()); } } }
if ( ! properties.getName().startsWith("trace")) continue;
private void addBackendTrace(Query query, QueryResultPacket resultPacket) { if (resultPacket.propsArray == null) return; Value.ArrayValue traces = new Value.ArrayValue(); for (FS4Properties properties : resultPacket.propsArray) { if ( ! properties.getName().equals("trace")) continue; for (FS4Properties.Entry entry : properties.getEntries()) { traces.add(new SlimeAdapter(BinaryFormat.decode(entry.getValue()).get())); } } query.trace(traces, query.getTraceLevel()); }
class - remove List<Result> parts= partitionHits(result, summaryClass); if (parts.size() > 0) { for (Result r : parts) { doPartialFill(r, summaryClass); mergeErrorsInto(result, r); } result.hits().setSorted(false); result.analyzeHits(); }
class - remove List<Result> parts= partitionHits(result, summaryClass); if (parts.size() > 0) { for (Result r : parts) { doPartialFill(r, summaryClass); mergeErrorsInto(result, r); } result.hits().setSorted(false); result.analyzeHits(); }
FYI: @hakonhall
private String getLogServerURI(ApplicationId applicationId, Optional<String> hostname) { Application application = getApplication(applicationId); Collection<HostInfo> hostInfos = application.getModel().getHosts(); if (hostname.isPresent()) { HostInfo logServerHostInfo = hostInfos.stream() .filter(host -> host.getHostname().equalsIgnoreCase(hostname.get())) .findFirst().orElseThrow(() -> new IllegalArgumentException("Host " + hostname.get() + " does not belong to " + applicationId)); return "http: } HostInfo logServerHostInfo = hostInfos.stream() .filter(host -> host.getServices().stream() .anyMatch(serviceInfo -> serviceInfo.getServiceType().equalsIgnoreCase("logserver"))) .findFirst().orElseThrow(() -> new IllegalArgumentException("Could not find HostInfo for LogServer")); ServiceInfo containerServiceInfo = logServerHostInfo.getServices().stream() .filter(service -> service.getServiceType().equals("container")) .findFirst().orElseThrow(() -> new IllegalArgumentException("No container running on logserver host")); int port = containerServiceInfo.getPorts().stream() .filter(portInfo -> portInfo.getTags().stream().anyMatch(tag -> tag.equalsIgnoreCase("http"))) .findFirst().orElseThrow(() -> new IllegalArgumentException("Could not find HTTP port")) .getPort(); return "http: }
private String getLogServerURI(ApplicationId applicationId, Optional<String> hostname) { Application application = getApplication(applicationId); Collection<HostInfo> hostInfos = application.getModel().getHosts(); if (hostname.isPresent()) { HostInfo logServerHostInfo = hostInfos.stream() .filter(host -> host.getHostname().equalsIgnoreCase(hostname.get())) .findFirst().orElseThrow(() -> new IllegalArgumentException("Host " + hostname.get() + " does not belong to " + applicationId)); return "http: } HostInfo logServerHostInfo = hostInfos.stream() .filter(host -> host.getServices().stream() .anyMatch(serviceInfo -> serviceInfo.getServiceType().equalsIgnoreCase("logserver"))) .findFirst().orElseThrow(() -> new IllegalArgumentException("Could not find HostInfo for LogServer")); ServiceInfo containerServiceInfo = logServerHostInfo.getServices().stream() .filter(service -> service.getServiceType().equals("container")) .findFirst().orElseThrow(() -> new IllegalArgumentException("No container running on logserver host")); int port = containerServiceInfo.getPorts().stream() .filter(portInfo -> portInfo.getTags().stream().anyMatch(tag -> tag.equalsIgnoreCase("http"))) .findFirst().orElseThrow(() -> new IllegalArgumentException("Could not find HTTP port")) .getPort(); return "http: }
class ApplicationRepository implements com.yahoo.config.provision.Deployer { private static final Logger log = Logger.getLogger(ApplicationRepository.class.getName()); private final TenantRepository tenantRepository; private final Optional<Provisioner> hostProvisioner; private final ConfigConvergenceChecker convergeChecker; private final HttpProxy httpProxy; private final Clock clock; private final DeployLogger logger = new SilentDeployLogger(); private final ConfigserverConfig configserverConfig; private final FileDistributionStatus fileDistributionStatus; private final Orchestrator orchestrator; private final LogRetriever logRetriever = new LogRetriever(); @Inject public ApplicationRepository(TenantRepository tenantRepository, HostProvisionerProvider hostProvisionerProvider, ConfigConvergenceChecker configConvergenceChecker, HttpProxy httpProxy, ConfigserverConfig configserverConfig, Orchestrator orchestrator) { this(tenantRepository, hostProvisionerProvider.getHostProvisioner(), configConvergenceChecker, httpProxy, configserverConfig, orchestrator, Clock.systemUTC(), new FileDistributionStatus()); } public ApplicationRepository(TenantRepository tenantRepository, Provisioner hostProvisioner, Orchestrator orchestrator, Clock clock) { this(tenantRepository, hostProvisioner, orchestrator, clock, new ConfigserverConfig(new ConfigserverConfig.Builder())); } public ApplicationRepository(TenantRepository tenantRepository, Provisioner hostProvisioner, Orchestrator orchestrator, Clock clock, ConfigserverConfig configserverConfig) { this(tenantRepository, Optional.of(hostProvisioner), new ConfigConvergenceChecker(), new HttpProxy(new SimpleHttpFetcher()), configserverConfig, orchestrator, clock, new FileDistributionStatus()); } private ApplicationRepository(TenantRepository tenantRepository, Optional<Provisioner> hostProvisioner, ConfigConvergenceChecker configConvergenceChecker, HttpProxy httpProxy, ConfigserverConfig configserverConfig, Orchestrator orchestrator, Clock clock, FileDistributionStatus fileDistributionStatus) { this.tenantRepository = tenantRepository; this.hostProvisioner = hostProvisioner; this.convergeChecker = configConvergenceChecker; this.httpProxy = httpProxy; this.clock = clock; this.configserverConfig = configserverConfig; this.orchestrator = orchestrator; this.fileDistributionStatus = fileDistributionStatus; } public PrepareResult prepare(Tenant tenant, long sessionId, PrepareParams prepareParams, Instant now) { validateThatLocalSessionIsNotActive(tenant, sessionId); LocalSession session = getLocalSession(tenant, sessionId); ApplicationId applicationId = prepareParams.getApplicationId(); Optional<ApplicationSet> currentActiveApplicationSet = getCurrentActiveApplicationSet(tenant, applicationId); Slime deployLog = createDeployLog(); DeployLogger logger = new DeployHandlerLogger(deployLog.get().setArray("log"), prepareParams.isVerbose(), applicationId); ConfigChangeActions actions = session.prepare(logger, prepareParams, currentActiveApplicationSet, tenant.getPath(), now); logConfigChangeActions(actions, logger); log.log(LogLevel.INFO, TenantRepository.logPre(applicationId) + "Session " + sessionId + " prepared successfully. "); return new PrepareResult(sessionId, actions, deployLog); } public PrepareResult prepareAndActivate(Tenant tenant, long sessionId, PrepareParams prepareParams, boolean ignoreSessionStaleFailure, Instant now) { PrepareResult result = prepare(tenant, sessionId, prepareParams, now); activate(tenant, sessionId, prepareParams.getTimeoutBudget(), ignoreSessionStaleFailure); return result; } public PrepareResult deploy(CompressedApplicationInputStream in, PrepareParams prepareParams) { return deploy(in, prepareParams, false, clock.instant()); } public PrepareResult deploy(CompressedApplicationInputStream in, PrepareParams prepareParams, boolean ignoreSessionStaleFailure, Instant now) { File tempDir = Files.createTempDir(); PrepareResult prepareResult; try { prepareResult = deploy(decompressApplication(in, tempDir), prepareParams, ignoreSessionStaleFailure, now); } finally { cleanupTempDirectory(tempDir); } return prepareResult; } public PrepareResult deploy(File applicationPackage, PrepareParams prepareParams) { return deploy(applicationPackage, prepareParams, false, Instant.now()); } public PrepareResult deploy(File applicationPackage, PrepareParams prepareParams, boolean ignoreSessionStaleFailure, Instant now) { ApplicationId applicationId = prepareParams.getApplicationId(); long sessionId = createSession(applicationId, prepareParams.getTimeoutBudget(), applicationPackage); Tenant tenant = tenantRepository.getTenant(applicationId.tenant()); return prepareAndActivate(tenant, sessionId, prepareParams, ignoreSessionStaleFailure, now); } /** * Creates a new deployment from the active application, if available. * This is used for system internal redeployments, not on application package changes. * * @param application the active application to be redeployed * @return a new deployment from the local active, or empty if a local active application * was not present for this id (meaning it either is not active or active on another * node in the config server cluster) */ @Override public Optional<com.yahoo.config.provision.Deployment> deployFromLocalActive(ApplicationId application) { return deployFromLocalActive(application, false); } /** * Creates a new deployment from the active application, if available. * This is used for system internal redeployments, not on application package changes. * * @param application the active application to be redeployed * @param bootstrap the deployment is done when bootstrapping * @return a new deployment from the local active, or empty if a local active application * was not present for this id (meaning it either is not active or active on another * node in the config server cluster) */ @Override public Optional<com.yahoo.config.provision.Deployment> deployFromLocalActive(ApplicationId application, boolean bootstrap) { return deployFromLocalActive(application, Duration.ofSeconds(configserverConfig.zookeeper().barrierTimeout()).plus(Duration.ofSeconds(5)), bootstrap); } /** * Creates a new deployment from the active application, if available. * This is used for system internal redeployments, not on application package changes. * * @param application the active application to be redeployed * @param timeout the timeout to use for each individual deployment operation * @param bootstrap the deployment is done when bootstrapping * @return a new deployment from the local active, or empty if a local active application * was not present for this id (meaning it either is not active or active on another * node in the config server cluster) */ @Override public Optional<com.yahoo.config.provision.Deployment> deployFromLocalActive(ApplicationId application, Duration timeout, boolean bootstrap) { Tenant tenant = tenantRepository.getTenant(application.tenant()); if (tenant == null) return Optional.empty(); LocalSession activeSession = getActiveSession(tenant, application); if (activeSession == null) return Optional.empty(); TimeoutBudget timeoutBudget = new TimeoutBudget(clock, timeout); LocalSession newSession = tenant.getSessionFactory().createSessionFromExisting(activeSession, logger, true, timeoutBudget); tenant.getLocalSessionRepo().addSession(newSession); Version version = decideVersion(application, zone().environment(), newSession.getVespaVersion(), bootstrap); return Optional.of(Deployment.unprepared(newSession, this, hostProvisioner, tenant, timeout, clock, false /* don't validate as this is already deployed */, version, bootstrap)); } @Override public Optional<Instant> lastDeployTime(ApplicationId application) { Tenant tenant = tenantRepository.getTenant(application.tenant()); if (tenant == null) return Optional.empty(); LocalSession activeSession = getActiveSession(tenant, application); if (activeSession == null) return Optional.empty(); return Optional.of(Instant.ofEpochSecond(activeSession.getCreateTime())); } public ApplicationId activate(Tenant tenant, long sessionId, TimeoutBudget timeoutBudget, boolean ignoreSessionStaleFailure) { LocalSession localSession = getLocalSession(tenant, sessionId); Deployment deployment = deployFromPreparedSession(localSession, tenant, timeoutBudget.timeLeft()); deployment.setIgnoreSessionStaleFailure(ignoreSessionStaleFailure); deployment.activate(); return localSession.getApplicationId(); } private Deployment deployFromPreparedSession(LocalSession session, Tenant tenant, Duration timeout) { return Deployment.prepared(session, this, hostProvisioner, tenant, timeout, clock, false); } /** * Deletes an application * * @return true if the application was found and deleted, false if it was not present * @throws RuntimeException if the delete transaction fails. This method is exception safe. */ public boolean delete(ApplicationId applicationId) { Tenant tenant = tenantRepository.getTenant(applicationId.tenant()); if (tenant == null) return false; TenantApplications tenantApplications = tenant.getApplicationRepo(); if (!tenantApplications.listApplications().contains(applicationId)) return false; long sessionId = tenantApplications.getSessionIdForApplication(applicationId); RemoteSession remoteSession = getRemoteSession(tenant, sessionId); remoteSession.createDeleteTransaction().commit(); log.log(LogLevel.INFO, TenantRepository.logPre(applicationId) + "Waiting for session " + sessionId + " to be deleted"); Duration waitTime = Duration.ofSeconds(60); if (localSessionHasBeenDeleted(applicationId, sessionId, waitTime)) { log.log(LogLevel.INFO, TenantRepository.logPre(applicationId) + "Session " + sessionId + " deleted"); } else { log.log(LogLevel.ERROR, TenantRepository.logPre(applicationId) + "Session " + sessionId + " was not deleted (waited " + waitTime + ")"); return false; } NestedTransaction transaction = new NestedTransaction(); transaction.add(new Rotations(tenant.getCurator(), tenant.getPath()).delete(applicationId)); transaction.add(tenantApplications.deleteApplication(applicationId)); hostProvisioner.ifPresent(provisioner -> provisioner.remove(transaction, applicationId)); transaction.onCommitted(() -> log.log(LogLevel.INFO, "Deleted " + applicationId)); transaction.commit(); return true; } public HttpResponse clusterControllerStatusPage(ApplicationId applicationId, String hostName, String pathSuffix) { String relativePath = "clustercontroller-status/" + pathSuffix; return httpProxy.get(getApplication(applicationId), hostName, "container-clustercontroller", relativePath); } public Long getApplicationGeneration(ApplicationId applicationId) { return getApplication(applicationId).getApplicationGeneration(); } public void restart(ApplicationId applicationId, HostFilter hostFilter) { hostProvisioner.ifPresent(provisioner -> provisioner.restart(applicationId, hostFilter)); } public boolean isSuspended(ApplicationId application) { return orchestrator.getAllSuspendedApplications().contains(application); } public HttpResponse filedistributionStatus(ApplicationId applicationId, Duration timeout) { return fileDistributionStatus.status(getApplication(applicationId), timeout); } public Set<String> deleteUnusedFiledistributionReferences(File fileReferencesPath) { if (!fileReferencesPath.isDirectory()) throw new RuntimeException(fileReferencesPath + " is not a directory"); Set<String> fileReferencesInUse = new HashSet<>(); listApplications().stream() .map(this::getOptionalApplication) .map(Optional::get) .forEach(application -> fileReferencesInUse.addAll(application.getModel().fileReferences().stream() .map(FileReference::value) .collect(Collectors.toSet()))); log.log(LogLevel.DEBUG, "File references in use : " + fileReferencesInUse); Set<String> fileReferencesOnDisk = new HashSet<>(); File[] filesOnDisk = fileReferencesPath.listFiles(); if (filesOnDisk != null) fileReferencesOnDisk.addAll(Arrays.stream(filesOnDisk).map(File::getName).collect(Collectors.toSet())); log.log(LogLevel.DEBUG, "File references on disk (in " + fileReferencesPath + "): " + fileReferencesOnDisk); Instant instant = Instant.now().minus(Duration.ofDays(14)); Set<String> fileReferencesToDelete = fileReferencesOnDisk .stream() .filter(fileReference -> ! fileReferencesInUse.contains(fileReference)) .filter(fileReference -> isFileLastModifiedBefore(new File(fileReferencesPath, fileReference), instant)) .collect(Collectors.toSet()); if (fileReferencesToDelete.size() > 0) { log.log(LogLevel.INFO, "Will delete file references not in use: " + fileReferencesToDelete); fileReferencesToDelete.forEach(fileReference -> { File file = new File(fileReferencesPath, fileReference); if ( ! IOUtils.recursiveDeleteDir(file)) log.log(LogLevel.WARNING, "Could not delete " + file.getAbsolutePath()); }); } return fileReferencesToDelete; } public ApplicationFile getApplicationFileFromSession(TenantName tenantName, long sessionId, String path, LocalSession.Mode mode) { Tenant tenant = tenantRepository.getTenant(tenantName); return getLocalSession(tenant, sessionId).getApplicationFile(Path.fromString(path), mode); } private Application getApplication(ApplicationId applicationId) { try { Tenant tenant = tenantRepository.getTenant(applicationId.tenant()); if (tenant == null) throw new IllegalArgumentException("Tenant '" + applicationId.tenant() + "' not found"); long sessionId = getSessionIdForApplication(tenant, applicationId); RemoteSession session = tenant.getRemoteSessionRepo().getSession(sessionId, 0); return session.ensureApplicationLoaded().getForVersionOrLatest(Optional.empty(), clock.instant()); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed getting application for '" + applicationId + "'", e); throw e; } } private Optional<Application> getOptionalApplication(ApplicationId applicationId) { try { return Optional.of(getApplication(applicationId)); } catch (Exception e) { return Optional.empty(); } } Set<ApplicationId> listApplications() { return tenantRepository.getAllTenants().stream() .flatMap(tenant -> tenant.getApplicationRepo().listApplications().stream()) .collect(Collectors.toSet()); } private boolean isFileLastModifiedBefore(File fileReference, Instant instant) { BasicFileAttributes fileAttributes; try { fileAttributes = readAttributes(fileReference.toPath(), BasicFileAttributes.class); return fileAttributes.lastModifiedTime().toInstant().isBefore(instant); } catch (IOException e) { throw new UncheckedIOException(e); } } private boolean localSessionHasBeenDeleted(ApplicationId applicationId, long sessionId, Duration waitTime) { RemoteSessionRepo remoteSessionRepo = tenantRepository.getTenant(applicationId.tenant()).getRemoteSessionRepo(); Instant end = Instant.now().plus(waitTime); do { if (remoteSessionRepo.getSession(sessionId) == null) return true; try { Thread.sleep(10); } catch (InterruptedException e) { /* ignored */} } while (Instant.now().isBefore(end)); return false; } public HttpResponse checkServiceForConfigConvergence(ApplicationId applicationId, String hostAndPort, URI uri, Duration timeout) { return convergeChecker.checkService(getApplication(applicationId), hostAndPort, uri, timeout); } public HttpResponse servicesToCheckForConfigConvergence(ApplicationId applicationId, URI uri, Duration timeoutPerService) { return convergeChecker.servicesToCheck(getApplication(applicationId), uri, timeoutPerService); } public HttpResponse getLogs(ApplicationId applicationId, Optional<String> hostname, String apiParams) { String logServerURI = getLogServerURI(applicationId, hostname) + apiParams; return logRetriever.getLogs(logServerURI); } /** * Gets the active Session for the given application id. * * @return the active session, or null if there is no active session for the given application id. */ public LocalSession getActiveSession(ApplicationId applicationId) { return getActiveSession(tenantRepository.getTenant(applicationId.tenant()), applicationId); } public long getSessionIdForApplication(Tenant tenant, ApplicationId applicationId) { TenantApplications applicationRepo = tenant.getApplicationRepo(); if (applicationRepo == null) throw new IllegalArgumentException("Application repo for tenant '" + tenant.getName() + "' not found"); return applicationRepo.getSessionIdForApplication(applicationId); } public void validateThatRemoteSessionIsNotActive(Tenant tenant, long sessionId) { RemoteSession session = getRemoteSession(tenant, sessionId); if (Session.Status.ACTIVATE.equals(session.getStatus())) { throw new IllegalStateException("Session is active: " + sessionId); } } public void validateThatRemoteSessionIsPrepared(Tenant tenant, long sessionId) { RemoteSession session = getRemoteSession(tenant, sessionId); if ( ! Session.Status.PREPARE.equals(session.getStatus())) throw new IllegalStateException("Session not prepared: " + sessionId); } public long createSessionFromExisting(ApplicationId applicationId, DeployLogger logger, boolean internalRedeploy, TimeoutBudget timeoutBudget) { Tenant tenant = tenantRepository.getTenant(applicationId.tenant()); LocalSessionRepo localSessionRepo = tenant.getLocalSessionRepo(); SessionFactory sessionFactory = tenant.getSessionFactory(); LocalSession fromSession = getExistingSession(tenant, applicationId); LocalSession session = sessionFactory.createSessionFromExisting(fromSession, logger, internalRedeploy, timeoutBudget); localSessionRepo.addSession(session); return session.getSessionId(); } public long createSession(ApplicationId applicationId, TimeoutBudget timeoutBudget, InputStream in, String contentType) { File tempDir = Files.createTempDir(); long sessionId; try { sessionId = createSession(applicationId, timeoutBudget, decompressApplication(in, contentType, tempDir)); } finally { cleanupTempDirectory(tempDir); } return sessionId; } public long createSession(ApplicationId applicationId, TimeoutBudget timeoutBudget, File applicationDirectory) { Tenant tenant = tenantRepository.getTenant(applicationId.tenant()); LocalSessionRepo localSessionRepo = tenant.getLocalSessionRepo(); SessionFactory sessionFactory = tenant.getSessionFactory(); LocalSession session = sessionFactory.createSession(applicationDirectory, applicationId, timeoutBudget); localSessionRepo.addSession(session); return session.getSessionId(); } public void deleteExpiredLocalSessions() { tenantRepository.getAllTenants().forEach(tenant -> tenant.getLocalSessionRepo().purgeOldSessions()); } public int deleteExpiredRemoteSessions(Duration expiryTime) { return tenantRepository.getAllTenants() .stream() .map(tenant -> tenant.getRemoteSessionRepo().deleteExpiredSessions(expiryTime)) .mapToInt(i -> i) .sum(); } public Set<TenantName> deleteUnusedTenants(Duration ttlForUnusedTenant, Instant now) { return tenantRepository.getAllTenantNames().stream() .filter(tenantName -> activeApplications(tenantName).isEmpty()) .filter(tenantName -> !tenantName.equals(TenantName.defaultName())) .filter(tenantName -> !tenantName.equals(TenantRepository.HOSTED_VESPA_TENANT)) .filter(tenantName -> tenantRepository.getTenant(tenantName).getCreatedTime().isBefore(now.minus(ttlForUnusedTenant))) .peek(tenantRepository::deleteTenant) .collect(Collectors.toSet()); } public void deleteTenant(TenantName tenantName) { List<ApplicationId> activeApplications = activeApplications(tenantName); if (activeApplications.isEmpty()) tenantRepository.deleteTenant(tenantName); else throw new IllegalArgumentException("Cannot delete tenant '" + tenantName + "', it has active applications: " + activeApplications); } private List<ApplicationId> activeApplications(TenantName tenantName) { return tenantRepository.getTenant(tenantName).getApplicationRepo().listApplications(); } public Tenant verifyTenantAndApplication(ApplicationId applicationId) { TenantName tenantName = applicationId.tenant(); if (!tenantRepository.checkThatTenantExists(tenantName)) { throw new IllegalArgumentException("Tenant " + tenantName + " was not found."); } Tenant tenant = tenantRepository.getTenant(tenantName); List<ApplicationId> applicationIds = listApplicationIds(tenant); if (!applicationIds.contains(applicationId)) { throw new IllegalArgumentException("No such application id: " + applicationId); } return tenant; } public ApplicationMetaData getMetadataFromSession(Tenant tenant, long sessionId) { return getLocalSession(tenant, sessionId).getMetaData(); } public ConfigserverConfig configserverConfig() { return configserverConfig; } private void validateThatLocalSessionIsNotActive(Tenant tenant, long sessionId) { LocalSession session = getLocalSession(tenant, sessionId); if (Session.Status.ACTIVATE.equals(session.getStatus())) { throw new IllegalStateException("Session is active: " + sessionId); } } private LocalSession getLocalSession(Tenant tenant, long sessionId) { LocalSession session = tenant.getLocalSessionRepo().getSession(sessionId); if (session == null) throw new NotFoundException("Session " + sessionId + " was not found"); return session; } private RemoteSession getRemoteSession(Tenant tenant, long sessionId) { RemoteSession session = tenant.getRemoteSessionRepo().getSession(sessionId); if (session == null) throw new NotFoundException("Session " + sessionId + " was not found"); return session; } private Optional<ApplicationSet> getCurrentActiveApplicationSet(Tenant tenant, ApplicationId appId) { Optional<ApplicationSet> currentActiveApplicationSet = Optional.empty(); TenantApplications applicationRepo = tenant.getApplicationRepo(); try { long currentActiveSessionId = applicationRepo.getSessionIdForApplication(appId); RemoteSession currentActiveSession = getRemoteSession(tenant, currentActiveSessionId); if (currentActiveSession != null) { currentActiveApplicationSet = Optional.ofNullable(currentActiveSession.ensureApplicationLoaded()); } } catch (IllegalArgumentException e) { } return currentActiveApplicationSet; } private File decompressApplication(InputStream in, String contentType, File tempDir) { try (CompressedApplicationInputStream application = CompressedApplicationInputStream.createFromCompressedStream(in, contentType)) { return decompressApplication(application, tempDir); } catch (IOException e) { throw new IllegalArgumentException("Unable to decompress data in body", e); } } private File decompressApplication(CompressedApplicationInputStream in, File tempDir) { try { return in.decompress(tempDir); } catch (IOException e) { throw new IllegalArgumentException("Unable to decompress stream", e); } } private List<ApplicationId> listApplicationIds(Tenant tenant) { TenantApplications applicationRepo = tenant.getApplicationRepo(); return applicationRepo.listApplications(); } private void cleanupTempDirectory(File tempDir) { logger.log(LogLevel.DEBUG, "Deleting tmp dir '" + tempDir + "'"); if (!IOUtils.recursiveDeleteDir(tempDir)) { logger.log(LogLevel.WARNING, "Not able to delete tmp dir '" + tempDir + "'"); } } private LocalSession getExistingSession(Tenant tenant, ApplicationId applicationId) { TenantApplications applicationRepo = tenant.getApplicationRepo(); return getLocalSession(tenant, applicationRepo.getSessionIdForApplication(applicationId)); } private LocalSession getActiveSession(Tenant tenant, ApplicationId applicationId) { TenantApplications applicationRepo = tenant.getApplicationRepo(); if (applicationRepo.listApplications().contains(applicationId)) { return tenant.getLocalSessionRepo().getSession(applicationRepo.getSessionIdForApplication(applicationId)); } return null; } private static void logConfigChangeActions(ConfigChangeActions actions, DeployLogger logger) { RestartActions restartActions = actions.getRestartActions(); if ( ! restartActions.isEmpty()) { logger.log(Level.WARNING, "Change(s) between active and new application that require restart:\n" + restartActions.format()); } RefeedActions refeedActions = actions.getRefeedActions(); if ( ! refeedActions.isEmpty()) { boolean allAllowed = refeedActions.getEntries().stream().allMatch(RefeedActions.Entry::allowed); logger.log(allAllowed ? Level.INFO : Level.WARNING, "Change(s) between active and new application that may require re-feed:\n" + refeedActions.format()); } } /** Returns version to use when deploying application in given environment */ static Version decideVersion(ApplicationId application, Environment environment, Version sessionVersion, boolean bootstrap) { if ( environment.isManuallyDeployed() && sessionVersion.getMajor() == Vtag.currentVersion.getMajor() && ! "hosted-vespa".equals(application.tenant().value()) && ! application.instance().isTester() && ! bootstrap) { return Vtag.currentVersion; } return sessionVersion; } public Slime createDeployLog() { Slime deployLog = new Slime(); deployLog.setObject(); return deployLog; } public Zone zone() { return new Zone(SystemName.from(configserverConfig.system()), Environment.from(configserverConfig.environment()), RegionName.from(configserverConfig.region())); } }
class ApplicationRepository implements com.yahoo.config.provision.Deployer { private static final Logger log = Logger.getLogger(ApplicationRepository.class.getName()); private final TenantRepository tenantRepository; private final Optional<Provisioner> hostProvisioner; private final ConfigConvergenceChecker convergeChecker; private final HttpProxy httpProxy; private final Clock clock; private final DeployLogger logger = new SilentDeployLogger(); private final ConfigserverConfig configserverConfig; private final FileDistributionStatus fileDistributionStatus; private final Orchestrator orchestrator; private final LogRetriever logRetriever = new LogRetriever(); @Inject public ApplicationRepository(TenantRepository tenantRepository, HostProvisionerProvider hostProvisionerProvider, ConfigConvergenceChecker configConvergenceChecker, HttpProxy httpProxy, ConfigserverConfig configserverConfig, Orchestrator orchestrator) { this(tenantRepository, hostProvisionerProvider.getHostProvisioner(), configConvergenceChecker, httpProxy, configserverConfig, orchestrator, Clock.systemUTC(), new FileDistributionStatus()); } public ApplicationRepository(TenantRepository tenantRepository, Provisioner hostProvisioner, Orchestrator orchestrator, Clock clock) { this(tenantRepository, hostProvisioner, orchestrator, clock, new ConfigserverConfig(new ConfigserverConfig.Builder())); } public ApplicationRepository(TenantRepository tenantRepository, Provisioner hostProvisioner, Orchestrator orchestrator, Clock clock, ConfigserverConfig configserverConfig) { this(tenantRepository, Optional.of(hostProvisioner), new ConfigConvergenceChecker(), new HttpProxy(new SimpleHttpFetcher()), configserverConfig, orchestrator, clock, new FileDistributionStatus()); } private ApplicationRepository(TenantRepository tenantRepository, Optional<Provisioner> hostProvisioner, ConfigConvergenceChecker configConvergenceChecker, HttpProxy httpProxy, ConfigserverConfig configserverConfig, Orchestrator orchestrator, Clock clock, FileDistributionStatus fileDistributionStatus) { this.tenantRepository = tenantRepository; this.hostProvisioner = hostProvisioner; this.convergeChecker = configConvergenceChecker; this.httpProxy = httpProxy; this.clock = clock; this.configserverConfig = configserverConfig; this.orchestrator = orchestrator; this.fileDistributionStatus = fileDistributionStatus; } public PrepareResult prepare(Tenant tenant, long sessionId, PrepareParams prepareParams, Instant now) { validateThatLocalSessionIsNotActive(tenant, sessionId); LocalSession session = getLocalSession(tenant, sessionId); ApplicationId applicationId = prepareParams.getApplicationId(); Optional<ApplicationSet> currentActiveApplicationSet = getCurrentActiveApplicationSet(tenant, applicationId); Slime deployLog = createDeployLog(); DeployLogger logger = new DeployHandlerLogger(deployLog.get().setArray("log"), prepareParams.isVerbose(), applicationId); ConfigChangeActions actions = session.prepare(logger, prepareParams, currentActiveApplicationSet, tenant.getPath(), now); logConfigChangeActions(actions, logger); log.log(LogLevel.INFO, TenantRepository.logPre(applicationId) + "Session " + sessionId + " prepared successfully. "); return new PrepareResult(sessionId, actions, deployLog); } public PrepareResult prepareAndActivate(Tenant tenant, long sessionId, PrepareParams prepareParams, boolean ignoreSessionStaleFailure, Instant now) { PrepareResult result = prepare(tenant, sessionId, prepareParams, now); activate(tenant, sessionId, prepareParams.getTimeoutBudget(), ignoreSessionStaleFailure); return result; } public PrepareResult deploy(CompressedApplicationInputStream in, PrepareParams prepareParams) { return deploy(in, prepareParams, false, clock.instant()); } public PrepareResult deploy(CompressedApplicationInputStream in, PrepareParams prepareParams, boolean ignoreSessionStaleFailure, Instant now) { File tempDir = Files.createTempDir(); PrepareResult prepareResult; try { prepareResult = deploy(decompressApplication(in, tempDir), prepareParams, ignoreSessionStaleFailure, now); } finally { cleanupTempDirectory(tempDir); } return prepareResult; } public PrepareResult deploy(File applicationPackage, PrepareParams prepareParams) { return deploy(applicationPackage, prepareParams, false, Instant.now()); } public PrepareResult deploy(File applicationPackage, PrepareParams prepareParams, boolean ignoreSessionStaleFailure, Instant now) { ApplicationId applicationId = prepareParams.getApplicationId(); long sessionId = createSession(applicationId, prepareParams.getTimeoutBudget(), applicationPackage); Tenant tenant = tenantRepository.getTenant(applicationId.tenant()); return prepareAndActivate(tenant, sessionId, prepareParams, ignoreSessionStaleFailure, now); } /** * Creates a new deployment from the active application, if available. * This is used for system internal redeployments, not on application package changes. * * @param application the active application to be redeployed * @return a new deployment from the local active, or empty if a local active application * was not present for this id (meaning it either is not active or active on another * node in the config server cluster) */ @Override public Optional<com.yahoo.config.provision.Deployment> deployFromLocalActive(ApplicationId application) { return deployFromLocalActive(application, false); } /** * Creates a new deployment from the active application, if available. * This is used for system internal redeployments, not on application package changes. * * @param application the active application to be redeployed * @param bootstrap the deployment is done when bootstrapping * @return a new deployment from the local active, or empty if a local active application * was not present for this id (meaning it either is not active or active on another * node in the config server cluster) */ @Override public Optional<com.yahoo.config.provision.Deployment> deployFromLocalActive(ApplicationId application, boolean bootstrap) { return deployFromLocalActive(application, Duration.ofSeconds(configserverConfig.zookeeper().barrierTimeout()).plus(Duration.ofSeconds(5)), bootstrap); } /** * Creates a new deployment from the active application, if available. * This is used for system internal redeployments, not on application package changes. * * @param application the active application to be redeployed * @param timeout the timeout to use for each individual deployment operation * @param bootstrap the deployment is done when bootstrapping * @return a new deployment from the local active, or empty if a local active application * was not present for this id (meaning it either is not active or active on another * node in the config server cluster) */ @Override public Optional<com.yahoo.config.provision.Deployment> deployFromLocalActive(ApplicationId application, Duration timeout, boolean bootstrap) { Tenant tenant = tenantRepository.getTenant(application.tenant()); if (tenant == null) return Optional.empty(); LocalSession activeSession = getActiveSession(tenant, application); if (activeSession == null) return Optional.empty(); TimeoutBudget timeoutBudget = new TimeoutBudget(clock, timeout); LocalSession newSession = tenant.getSessionFactory().createSessionFromExisting(activeSession, logger, true, timeoutBudget); tenant.getLocalSessionRepo().addSession(newSession); Version version = decideVersion(application, zone().environment(), newSession.getVespaVersion(), bootstrap); return Optional.of(Deployment.unprepared(newSession, this, hostProvisioner, tenant, timeout, clock, false /* don't validate as this is already deployed */, version, bootstrap)); } @Override public Optional<Instant> lastDeployTime(ApplicationId application) { Tenant tenant = tenantRepository.getTenant(application.tenant()); if (tenant == null) return Optional.empty(); LocalSession activeSession = getActiveSession(tenant, application); if (activeSession == null) return Optional.empty(); return Optional.of(Instant.ofEpochSecond(activeSession.getCreateTime())); } public ApplicationId activate(Tenant tenant, long sessionId, TimeoutBudget timeoutBudget, boolean ignoreSessionStaleFailure) { LocalSession localSession = getLocalSession(tenant, sessionId); Deployment deployment = deployFromPreparedSession(localSession, tenant, timeoutBudget.timeLeft()); deployment.setIgnoreSessionStaleFailure(ignoreSessionStaleFailure); deployment.activate(); return localSession.getApplicationId(); } private Deployment deployFromPreparedSession(LocalSession session, Tenant tenant, Duration timeout) { return Deployment.prepared(session, this, hostProvisioner, tenant, timeout, clock, false); } /** * Deletes an application * * @return true if the application was found and deleted, false if it was not present * @throws RuntimeException if the delete transaction fails. This method is exception safe. */ public boolean delete(ApplicationId applicationId) { Tenant tenant = tenantRepository.getTenant(applicationId.tenant()); if (tenant == null) return false; TenantApplications tenantApplications = tenant.getApplicationRepo(); if (!tenantApplications.listApplications().contains(applicationId)) return false; long sessionId = tenantApplications.getSessionIdForApplication(applicationId); RemoteSession remoteSession = getRemoteSession(tenant, sessionId); remoteSession.createDeleteTransaction().commit(); log.log(LogLevel.INFO, TenantRepository.logPre(applicationId) + "Waiting for session " + sessionId + " to be deleted"); Duration waitTime = Duration.ofSeconds(60); if (localSessionHasBeenDeleted(applicationId, sessionId, waitTime)) { log.log(LogLevel.INFO, TenantRepository.logPre(applicationId) + "Session " + sessionId + " deleted"); } else { log.log(LogLevel.ERROR, TenantRepository.logPre(applicationId) + "Session " + sessionId + " was not deleted (waited " + waitTime + ")"); return false; } NestedTransaction transaction = new NestedTransaction(); transaction.add(new Rotations(tenant.getCurator(), tenant.getPath()).delete(applicationId)); transaction.add(tenantApplications.deleteApplication(applicationId)); hostProvisioner.ifPresent(provisioner -> provisioner.remove(transaction, applicationId)); transaction.onCommitted(() -> log.log(LogLevel.INFO, "Deleted " + applicationId)); transaction.commit(); return true; } public HttpResponse clusterControllerStatusPage(ApplicationId applicationId, String hostName, String pathSuffix) { String relativePath = "clustercontroller-status/" + pathSuffix; return httpProxy.get(getApplication(applicationId), hostName, "container-clustercontroller", relativePath); } public Long getApplicationGeneration(ApplicationId applicationId) { return getApplication(applicationId).getApplicationGeneration(); } public void restart(ApplicationId applicationId, HostFilter hostFilter) { hostProvisioner.ifPresent(provisioner -> provisioner.restart(applicationId, hostFilter)); } public boolean isSuspended(ApplicationId application) { return orchestrator.getAllSuspendedApplications().contains(application); } public HttpResponse filedistributionStatus(ApplicationId applicationId, Duration timeout) { return fileDistributionStatus.status(getApplication(applicationId), timeout); } public Set<String> deleteUnusedFiledistributionReferences(File fileReferencesPath) { if (!fileReferencesPath.isDirectory()) throw new RuntimeException(fileReferencesPath + " is not a directory"); Set<String> fileReferencesInUse = new HashSet<>(); listApplications().stream() .map(this::getOptionalApplication) .map(Optional::get) .forEach(application -> fileReferencesInUse.addAll(application.getModel().fileReferences().stream() .map(FileReference::value) .collect(Collectors.toSet()))); log.log(LogLevel.DEBUG, "File references in use : " + fileReferencesInUse); Set<String> fileReferencesOnDisk = new HashSet<>(); File[] filesOnDisk = fileReferencesPath.listFiles(); if (filesOnDisk != null) fileReferencesOnDisk.addAll(Arrays.stream(filesOnDisk).map(File::getName).collect(Collectors.toSet())); log.log(LogLevel.DEBUG, "File references on disk (in " + fileReferencesPath + "): " + fileReferencesOnDisk); Instant instant = Instant.now().minus(Duration.ofDays(14)); Set<String> fileReferencesToDelete = fileReferencesOnDisk .stream() .filter(fileReference -> ! fileReferencesInUse.contains(fileReference)) .filter(fileReference -> isFileLastModifiedBefore(new File(fileReferencesPath, fileReference), instant)) .collect(Collectors.toSet()); if (fileReferencesToDelete.size() > 0) { log.log(LogLevel.INFO, "Will delete file references not in use: " + fileReferencesToDelete); fileReferencesToDelete.forEach(fileReference -> { File file = new File(fileReferencesPath, fileReference); if ( ! IOUtils.recursiveDeleteDir(file)) log.log(LogLevel.WARNING, "Could not delete " + file.getAbsolutePath()); }); } return fileReferencesToDelete; } public ApplicationFile getApplicationFileFromSession(TenantName tenantName, long sessionId, String path, LocalSession.Mode mode) { Tenant tenant = tenantRepository.getTenant(tenantName); return getLocalSession(tenant, sessionId).getApplicationFile(Path.fromString(path), mode); } private Application getApplication(ApplicationId applicationId) { try { Tenant tenant = tenantRepository.getTenant(applicationId.tenant()); if (tenant == null) throw new IllegalArgumentException("Tenant '" + applicationId.tenant() + "' not found"); long sessionId = getSessionIdForApplication(tenant, applicationId); RemoteSession session = tenant.getRemoteSessionRepo().getSession(sessionId, 0); return session.ensureApplicationLoaded().getForVersionOrLatest(Optional.empty(), clock.instant()); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed getting application for '" + applicationId + "'", e); throw e; } } private Optional<Application> getOptionalApplication(ApplicationId applicationId) { try { return Optional.of(getApplication(applicationId)); } catch (Exception e) { return Optional.empty(); } } Set<ApplicationId> listApplications() { return tenantRepository.getAllTenants().stream() .flatMap(tenant -> tenant.getApplicationRepo().listApplications().stream()) .collect(Collectors.toSet()); } private boolean isFileLastModifiedBefore(File fileReference, Instant instant) { BasicFileAttributes fileAttributes; try { fileAttributes = readAttributes(fileReference.toPath(), BasicFileAttributes.class); return fileAttributes.lastModifiedTime().toInstant().isBefore(instant); } catch (IOException e) { throw new UncheckedIOException(e); } } private boolean localSessionHasBeenDeleted(ApplicationId applicationId, long sessionId, Duration waitTime) { RemoteSessionRepo remoteSessionRepo = tenantRepository.getTenant(applicationId.tenant()).getRemoteSessionRepo(); Instant end = Instant.now().plus(waitTime); do { if (remoteSessionRepo.getSession(sessionId) == null) return true; try { Thread.sleep(10); } catch (InterruptedException e) { /* ignored */} } while (Instant.now().isBefore(end)); return false; } public HttpResponse checkServiceForConfigConvergence(ApplicationId applicationId, String hostAndPort, URI uri, Duration timeout) { return convergeChecker.checkService(getApplication(applicationId), hostAndPort, uri, timeout); } public HttpResponse servicesToCheckForConfigConvergence(ApplicationId applicationId, URI uri, Duration timeoutPerService) { return convergeChecker.servicesToCheck(getApplication(applicationId), uri, timeoutPerService); } public HttpResponse getLogs(ApplicationId applicationId, Optional<String> hostname, String apiParams) { String logServerURI = getLogServerURI(applicationId, hostname) + apiParams; return logRetriever.getLogs(logServerURI); } /** * Gets the active Session for the given application id. * * @return the active session, or null if there is no active session for the given application id. */ public LocalSession getActiveSession(ApplicationId applicationId) { return getActiveSession(tenantRepository.getTenant(applicationId.tenant()), applicationId); } public long getSessionIdForApplication(Tenant tenant, ApplicationId applicationId) { TenantApplications applicationRepo = tenant.getApplicationRepo(); if (applicationRepo == null) throw new IllegalArgumentException("Application repo for tenant '" + tenant.getName() + "' not found"); return applicationRepo.getSessionIdForApplication(applicationId); } public void validateThatRemoteSessionIsNotActive(Tenant tenant, long sessionId) { RemoteSession session = getRemoteSession(tenant, sessionId); if (Session.Status.ACTIVATE.equals(session.getStatus())) { throw new IllegalStateException("Session is active: " + sessionId); } } public void validateThatRemoteSessionIsPrepared(Tenant tenant, long sessionId) { RemoteSession session = getRemoteSession(tenant, sessionId); if ( ! Session.Status.PREPARE.equals(session.getStatus())) throw new IllegalStateException("Session not prepared: " + sessionId); } public long createSessionFromExisting(ApplicationId applicationId, DeployLogger logger, boolean internalRedeploy, TimeoutBudget timeoutBudget) { Tenant tenant = tenantRepository.getTenant(applicationId.tenant()); LocalSessionRepo localSessionRepo = tenant.getLocalSessionRepo(); SessionFactory sessionFactory = tenant.getSessionFactory(); LocalSession fromSession = getExistingSession(tenant, applicationId); LocalSession session = sessionFactory.createSessionFromExisting(fromSession, logger, internalRedeploy, timeoutBudget); localSessionRepo.addSession(session); return session.getSessionId(); } public long createSession(ApplicationId applicationId, TimeoutBudget timeoutBudget, InputStream in, String contentType) { File tempDir = Files.createTempDir(); long sessionId; try { sessionId = createSession(applicationId, timeoutBudget, decompressApplication(in, contentType, tempDir)); } finally { cleanupTempDirectory(tempDir); } return sessionId; } public long createSession(ApplicationId applicationId, TimeoutBudget timeoutBudget, File applicationDirectory) { Tenant tenant = tenantRepository.getTenant(applicationId.tenant()); LocalSessionRepo localSessionRepo = tenant.getLocalSessionRepo(); SessionFactory sessionFactory = tenant.getSessionFactory(); LocalSession session = sessionFactory.createSession(applicationDirectory, applicationId, timeoutBudget); localSessionRepo.addSession(session); return session.getSessionId(); } public void deleteExpiredLocalSessions() { tenantRepository.getAllTenants().forEach(tenant -> tenant.getLocalSessionRepo().purgeOldSessions()); } public int deleteExpiredRemoteSessions(Duration expiryTime) { return tenantRepository.getAllTenants() .stream() .map(tenant -> tenant.getRemoteSessionRepo().deleteExpiredSessions(expiryTime)) .mapToInt(i -> i) .sum(); } public Set<TenantName> deleteUnusedTenants(Duration ttlForUnusedTenant, Instant now) { return tenantRepository.getAllTenantNames().stream() .filter(tenantName -> activeApplications(tenantName).isEmpty()) .filter(tenantName -> !tenantName.equals(TenantName.defaultName())) .filter(tenantName -> !tenantName.equals(TenantRepository.HOSTED_VESPA_TENANT)) .filter(tenantName -> tenantRepository.getTenant(tenantName).getCreatedTime().isBefore(now.minus(ttlForUnusedTenant))) .peek(tenantRepository::deleteTenant) .collect(Collectors.toSet()); } public void deleteTenant(TenantName tenantName) { List<ApplicationId> activeApplications = activeApplications(tenantName); if (activeApplications.isEmpty()) tenantRepository.deleteTenant(tenantName); else throw new IllegalArgumentException("Cannot delete tenant '" + tenantName + "', it has active applications: " + activeApplications); } private List<ApplicationId> activeApplications(TenantName tenantName) { return tenantRepository.getTenant(tenantName).getApplicationRepo().listApplications(); } public Tenant verifyTenantAndApplication(ApplicationId applicationId) { TenantName tenantName = applicationId.tenant(); if (!tenantRepository.checkThatTenantExists(tenantName)) { throw new IllegalArgumentException("Tenant " + tenantName + " was not found."); } Tenant tenant = tenantRepository.getTenant(tenantName); List<ApplicationId> applicationIds = listApplicationIds(tenant); if (!applicationIds.contains(applicationId)) { throw new IllegalArgumentException("No such application id: " + applicationId); } return tenant; } public ApplicationMetaData getMetadataFromSession(Tenant tenant, long sessionId) { return getLocalSession(tenant, sessionId).getMetaData(); } public ConfigserverConfig configserverConfig() { return configserverConfig; } private void validateThatLocalSessionIsNotActive(Tenant tenant, long sessionId) { LocalSession session = getLocalSession(tenant, sessionId); if (Session.Status.ACTIVATE.equals(session.getStatus())) { throw new IllegalStateException("Session is active: " + sessionId); } } private LocalSession getLocalSession(Tenant tenant, long sessionId) { LocalSession session = tenant.getLocalSessionRepo().getSession(sessionId); if (session == null) throw new NotFoundException("Session " + sessionId + " was not found"); return session; } private RemoteSession getRemoteSession(Tenant tenant, long sessionId) { RemoteSession session = tenant.getRemoteSessionRepo().getSession(sessionId); if (session == null) throw new NotFoundException("Session " + sessionId + " was not found"); return session; } private Optional<ApplicationSet> getCurrentActiveApplicationSet(Tenant tenant, ApplicationId appId) { Optional<ApplicationSet> currentActiveApplicationSet = Optional.empty(); TenantApplications applicationRepo = tenant.getApplicationRepo(); try { long currentActiveSessionId = applicationRepo.getSessionIdForApplication(appId); RemoteSession currentActiveSession = getRemoteSession(tenant, currentActiveSessionId); if (currentActiveSession != null) { currentActiveApplicationSet = Optional.ofNullable(currentActiveSession.ensureApplicationLoaded()); } } catch (IllegalArgumentException e) { } return currentActiveApplicationSet; } private File decompressApplication(InputStream in, String contentType, File tempDir) { try (CompressedApplicationInputStream application = CompressedApplicationInputStream.createFromCompressedStream(in, contentType)) { return decompressApplication(application, tempDir); } catch (IOException e) { throw new IllegalArgumentException("Unable to decompress data in body", e); } } private File decompressApplication(CompressedApplicationInputStream in, File tempDir) { try { return in.decompress(tempDir); } catch (IOException e) { throw new IllegalArgumentException("Unable to decompress stream", e); } } private List<ApplicationId> listApplicationIds(Tenant tenant) { TenantApplications applicationRepo = tenant.getApplicationRepo(); return applicationRepo.listApplications(); } private void cleanupTempDirectory(File tempDir) { logger.log(LogLevel.DEBUG, "Deleting tmp dir '" + tempDir + "'"); if (!IOUtils.recursiveDeleteDir(tempDir)) { logger.log(LogLevel.WARNING, "Not able to delete tmp dir '" + tempDir + "'"); } } private LocalSession getExistingSession(Tenant tenant, ApplicationId applicationId) { TenantApplications applicationRepo = tenant.getApplicationRepo(); return getLocalSession(tenant, applicationRepo.getSessionIdForApplication(applicationId)); } private LocalSession getActiveSession(Tenant tenant, ApplicationId applicationId) { TenantApplications applicationRepo = tenant.getApplicationRepo(); if (applicationRepo.listApplications().contains(applicationId)) { return tenant.getLocalSessionRepo().getSession(applicationRepo.getSessionIdForApplication(applicationId)); } return null; } private static void logConfigChangeActions(ConfigChangeActions actions, DeployLogger logger) { RestartActions restartActions = actions.getRestartActions(); if ( ! restartActions.isEmpty()) { logger.log(Level.WARNING, "Change(s) between active and new application that require restart:\n" + restartActions.format()); } RefeedActions refeedActions = actions.getRefeedActions(); if ( ! refeedActions.isEmpty()) { boolean allAllowed = refeedActions.getEntries().stream().allMatch(RefeedActions.Entry::allowed); logger.log(allAllowed ? Level.INFO : Level.WARNING, "Change(s) between active and new application that may require re-feed:\n" + refeedActions.format()); } } /** Returns version to use when deploying application in given environment */ static Version decideVersion(ApplicationId application, Environment environment, Version sessionVersion, boolean bootstrap) { if ( environment.isManuallyDeployed() && sessionVersion.getMajor() == Vtag.currentVersion.getMajor() && ! "hosted-vespa".equals(application.tenant().value()) && ! application.instance().isTester() && ! bootstrap) { return Vtag.currentVersion; } return sessionVersion; } public Slime createDeployLog() { Slime deployLog = new Slime(); deployLog.setObject(); return deployLog; } public Zone zone() { return new Zone(SystemName.from(configserverConfig.system()), Environment.from(configserverConfig.environment()), RegionName.from(configserverConfig.region())); } }
👍 . Weird that I did not notice this bug while testing... 😅 In any case, it is good that it gets fixed now.
public void createRequestMapping(Inspector inspector, Map<String, String> map, String parent) { inspector.traverse((ObjectTraverser) (key, value) -> { String qualifiedKey = parent + key; switch (value.type()) { case BOOL: map.put(qualifiedKey, Boolean.toString(value.asBool())); break; case DOUBLE: map.put(qualifiedKey, Double.toString(value.asDouble())); break; case LONG: map.put(qualifiedKey, Long.toString(value.asLong())); break; case STRING: map.put(qualifiedKey , value.asString()); break; case ARRAY: map.put(qualifiedKey, value.toString()); break; case OBJECT: if (qualifiedKey.equals("select.where") || qualifiedKey.equals("select.grouping")) { map.put(qualifiedKey, value.toString()); break; } createRequestMapping(value, map, qualifiedKey + "."); break; } }); }
map.put(qualifiedKey, value.toString());
public void createRequestMapping(Inspector inspector, Map<String, String> map, String parent) { inspector.traverse((ObjectTraverser) (key, value) -> { String qualifiedKey = parent + key; switch (value.type()) { case BOOL: map.put(qualifiedKey, Boolean.toString(value.asBool())); break; case DOUBLE: map.put(qualifiedKey, Double.toString(value.asDouble())); break; case LONG: map.put(qualifiedKey, Long.toString(value.asLong())); break; case STRING: map.put(qualifiedKey , value.asString()); break; case ARRAY: map.put(qualifiedKey, value.toString()); break; case OBJECT: if (qualifiedKey.equals("select.where") || qualifiedKey.equals("select.grouping")) { map.put(qualifiedKey, value.toString()); break; } createRequestMapping(value, map, qualifiedKey + "."); break; } }); }
class MeanConnections implements Callback { @Override public void run(Handle h, boolean firstTime) { if (firstTime) { metric.set(SEARCH_CONNECTIONS, 0.0d, null); return; } Value v = (Value) h; metric.set(SEARCH_CONNECTIONS, v.getMean(), null); } }
class MeanConnections implements Callback { @Override public void run(Handle h, boolean firstTime) { if (firstTime) { metric.set(SEARCH_CONNECTIONS, 0.0d, null); return; } Value v = (Value) h; metric.set(SEARCH_CONNECTIONS, v.getMean(), null); } }
```suggestion return "Unexpected error parsing or serializing query: " + Exceptions.toMessageString(e); ``` Typo fix.
public String yqlRepresentation() { try { return yqlRepresentation(true); } catch (NullItemException e) { return "Query currently a placeholder, NullItem encountered."; } catch (IllegalArgumentException e) { return "Invalid query: " + Exceptions.toMessageString(e); } catch (RuntimeException e) { return "Unepected error parsing or serializing query: " + Exceptions.toMessageString(e); } }
return "Unepected error parsing or serializing query: " + Exceptions.toMessageString(e);
public String yqlRepresentation() { try { return yqlRepresentation(true); } catch (NullItemException e) { return "Query currently a placeholder, NullItem encountered."; } catch (IllegalArgumentException e) { return "Invalid query: " + Exceptions.toMessageString(e); } catch (RuntimeException e) { return "Unexpected error parsing or serializing query: " + Exceptions.toMessageString(e); } }
class Query extends com.yahoo.processing.Request implements Cloneable { /** The type of the query */ public enum Type { ALL(0,"all"), ANY(1,"any"), PHRASE(2,"phrase"), ADVANCED(3,"adv"), WEB(4,"web"), PROGRAMMATIC(5, "prog"), YQL(6, "yql"), SELECT(7, "select");; private final int intValue; private final String stringValue; Type(int intValue, String stringValue) { this.intValue = intValue; this.stringValue = stringValue; } /** Converts a type argument value into a query type */ public static Type getType(String typeString) { for (Type type : Type.values()) if (type.stringValue.equals(typeString)) return type; return ALL; } public int asInt() { return intValue; } public String toString() { return stringValue; } } /** The offset from the most relevant hits found from this query */ private int offset = 0; /** The number of hits to return */ private int hits = 10; /** The query context level, 0 means no tracing */ private int traceLevel = 0; private static final long dumpTimeout = (6 * 60 * 1000); private static final long defaultTimeout = 500; /** The timeout of the query, in milliseconds */ private long timeout = defaultTimeout; /** Whether this query is forbidden to access cached information */ private boolean noCache = false; /** Whether or not grouping should use a session cache */ private boolean groupingSessionCache = false; /** The synchronous view of the JDisc request causing this query */ private final HttpRequest httpRequest; /** The context, or null if there is no context */ private QueryContext context = null; /** Used for downstream session caches */ private UniqueRequestId requestId = null; /** The ranking requested in this query */ private Ranking ranking = new Ranking(this); /** The query query and/or query program declaration */ private Model model = new Model(this); /** How results of this query should be presented */ private Presentation presentation = new Presentation(this); /** The selection of where-clause and grouping */ private Select select = new Select(this); private static Logger log = Logger.getLogger(Query.class.getName()); /** The time this query was created */ private long startTime; public static final CompoundName OFFSET = new CompoundName("offset"); public static final CompoundName HITS = new CompoundName("hits"); public static final CompoundName QUERY_PROFILE = new CompoundName("queryProfile"); public static final CompoundName SEARCH_CHAIN = new CompoundName("searchChain"); public static final CompoundName TRACE_LEVEL = new CompoundName("traceLevel"); public static final CompoundName NO_CACHE = new CompoundName("noCache"); public static final CompoundName GROUPING_SESSION_CACHE = new CompoundName("groupingSessionCache"); public static final CompoundName TIMEOUT = new CompoundName("timeout"); private static QueryProfileType argumentType; static { argumentType = new QueryProfileType("native"); argumentType.setBuiltin(true); argumentType.addField(new FieldDescription(OFFSET.toString(), "integer", "offset start")); argumentType.addField(new FieldDescription(HITS.toString(), "integer", "hits count")); argumentType.addField(new FieldDescription(QUERY_PROFILE.toString(), "string")); argumentType.addField(new FieldDescription(SEARCH_CHAIN.toString(), "string")); argumentType.addField(new FieldDescription(TRACE_LEVEL.toString(), "integer", "tracelevel")); argumentType.addField(new FieldDescription(NO_CACHE.toString(), "boolean", "nocache")); argumentType.addField(new FieldDescription(GROUPING_SESSION_CACHE.toString(), "boolean", "groupingSessionCache")); argumentType.addField(new FieldDescription(TIMEOUT.toString(), "string", "timeout")); argumentType.addField(new FieldDescription(FederationSearcher.SOURCENAME.toString(),"string")); argumentType.addField(new FieldDescription(FederationSearcher.PROVIDERNAME.toString(),"string")); argumentType.addField(new FieldDescription(Presentation.PRESENTATION, new QueryProfileFieldType(Presentation.getArgumentType()))); argumentType.addField(new FieldDescription(Ranking.RANKING, new QueryProfileFieldType(Ranking.getArgumentType()))); argumentType.addField(new FieldDescription(Model.MODEL, new QueryProfileFieldType(Model.getArgumentType()))); argumentType.addField(new FieldDescription(Select.SELECT, new QueryProfileFieldType(Select.getArgumentType()))); argumentType.freeze(); } public static QueryProfileType getArgumentType() { return argumentType; } /** The aliases of query properties */ private static Map<String,CompoundName> propertyAliases; static { Map<String,CompoundName> propertyAliasesBuilder = new HashMap<>(); addAliases(Query.getArgumentType(), propertyAliasesBuilder); addAliases(Ranking.getArgumentType(), propertyAliasesBuilder); addAliases(Model.getArgumentType(), propertyAliasesBuilder); addAliases(Presentation.getArgumentType(), propertyAliasesBuilder); addAliases(Select.getArgumentType(), propertyAliasesBuilder); propertyAliases = ImmutableMap.copyOf(propertyAliasesBuilder); } private static void addAliases(QueryProfileType arguments, Map<String, CompoundName> aliases) { String prefix = getPrefix(arguments); for (FieldDescription field : arguments.fields().values()) { for (String alias : field.getAliases()) aliases.put(alias, new CompoundName(prefix+field.getName())); } } private static String getPrefix(QueryProfileType type) { if (type.getId().getName().equals("native")) return ""; return type.getId().getName() + "."; } public static void addNativeQueryProfileTypesTo(QueryProfileTypeRegistry registry) { registry.register(Query.getArgumentType().unfrozen()); registry.register(Ranking.getArgumentType().unfrozen()); registry.register(Model.getArgumentType().unfrozen()); registry.register(Select.getArgumentType().unfrozen()); registry.register(Presentation.getArgumentType().unfrozen()); registry.register(DefaultProperties.argumentType.unfrozen()); } /** Returns an unmodifiable list of all the native properties under a Query */ public static final List<CompoundName> nativeProperties = ImmutableList.copyOf(namesUnder(CompoundName.empty, Query.getArgumentType())); private static List<CompoundName> namesUnder(CompoundName prefix, QueryProfileType type) { if ( type == null) return Collections.emptyList(); List<CompoundName> names = new ArrayList<>(); for (Map.Entry<String, FieldDescription> field : type.fields().entrySet()) { if (field.getValue().getType() instanceof QueryProfileFieldType) { names.addAll(namesUnder(prefix.append(field.getKey()), ((QueryProfileFieldType) field.getValue().getType()).getQueryProfileType())); } else { names.add(prefix.append(field.getKey())); } } return names; } /** * Constructs an empty (null) query */ public Query() { this(""); } /** * Construct a query from a string formatted in the http style, e.g <code>?query=test&amp;offset=10&amp;hits=13</code> * The query must be uri encoded. */ public Query(String query) { this(query, null); } /** * Creates a query from a request * * @param request the HTTP request from which this is created */ public Query(HttpRequest request) { this(request, null); } /** * Construct a query from a string formatted in the http style, e.g <code>?query=test&amp;offset=10&amp;hits=13</code> * The query must be uri encoded. */ public Query(String query, CompiledQueryProfile queryProfile) { this(HttpRequest.createTestRequest(query, com.yahoo.jdisc.http.HttpRequest.Method.GET), queryProfile); } /** * Creates a query from a request * * @param request the HTTP request from which this is created * @param queryProfile the query profile to use for this query, or null if none. */ public Query(HttpRequest request, CompiledQueryProfile queryProfile) { this(request, request.propertyMap(), queryProfile); } /** * Creates a query from a request * * @param request the HTTP request from which this is created. * @param requestMap the property map of the query. * @param queryProfile the query profile to use for this query, or null if none. */ public Query(HttpRequest request, Map<String, String> requestMap, CompiledQueryProfile queryProfile) { super(new QueryPropertyAliases(propertyAliases)); this.httpRequest = request; init(requestMap, queryProfile); } private void init(Map<String, String> requestMap, CompiledQueryProfile queryProfile) { startTime = System.currentTimeMillis(); if (queryProfile != null) { Properties queryProfileProperties = new QueryProfileProperties(queryProfile); properties().chain(queryProfileProperties); setPropertiesFromRequestMap(requestMap, properties()); properties().chain(new QueryProperties(this, queryProfile.getRegistry())). chain(new ModelObjectMap()). chain(new RequestContextProperties(requestMap)). chain(queryProfileProperties). chain(new DefaultProperties()); setFieldsFrom(queryProfileProperties, requestMap); } else { properties(). chain(new QueryProperties(this, CompiledQueryProfileRegistry.empty)). chain(new PropertyMap()). chain(new DefaultProperties()); setPropertiesFromRequestMap(requestMap, properties()); } properties().setParentQuery(this); traceProperties(); } public Query(Query query) { this(query, query.getStartTime()); } private Query(Query query, long startTime) { super(query.properties().clone()); this.startTime = startTime; this.httpRequest = query.httpRequest; query.copyPropertiesTo(this); } /** * Creates a new query from another query, but with time sensitive fields reset. */ public static Query createNewQuery(Query query) { return new Query(query, System.currentTimeMillis()); } /** * Calls properties().set on each value in the given properties which is declared in this query or * one of its dependent objects. This will ensure the appropriate setters are called on this and all * dependent objects for the appropriate subset of the given property values */ private void setFieldsFrom(Properties properties, Map<String, String> context) { setFrom("", properties, Query.getArgumentType(), context); } /** * For each field in the given query profile type, take the corresponding value from originalProperties * (if any) set it to properties(), recursively. */ private void setFrom(String prefix, Properties originalProperties, QueryProfileType arguments, Map<String, String> context) { prefix = prefix + getPrefix(arguments); for (FieldDescription field : arguments.fields().values()) { String fullName = prefix + field.getName(); if (field.getType() == FieldType.genericQueryProfileType) { for (Map.Entry<String, Object> entry : originalProperties.listProperties(fullName, context).entrySet()) { try { properties().set(fullName + "." + entry.getKey(), entry.getValue(), context); } catch (IllegalArgumentException e) { throw new QueryException("Invalid request parameter", e); } } } else if (field.getType() instanceof QueryProfileFieldType) { setFrom(prefix, originalProperties, ((QueryProfileFieldType)field.getType()).getQueryProfileType(), context); } else { Object value = originalProperties.get(fullName, context); if (value != null) { try { properties().set(fullName, value, context); } catch (IllegalArgumentException e) { throw new QueryException("Invalid request parameter", e); } } } } } /** Calls properties.set on all entries in requestMap */ private void setPropertiesFromRequestMap(Map<String, String> requestMap, Properties properties) { for (var entry : requestMap.entrySet()) { try { properties.set(entry.getKey(), entry.getValue(), requestMap); } catch (IllegalArgumentException e) { throw new QueryException("Invalid request parameter", e); } } } /** Returns the properties of this query. The properties are modifiable */ @Override public Properties properties() { return (Properties)super.properties(); } /** * Traces how properties was resolved and from where. Done after the fact to avoid special handling * of tracelevel, which is the property deciding whether this needs to be done */ private void traceProperties() { if (traceLevel == 0) return; CompiledQueryProfile profile = null; QueryProfileProperties profileProperties = properties().getInstance(QueryProfileProperties.class); if (profileProperties != null) profile = profileProperties.getQueryProfile(); if (profile == null) trace("No query profile is used", false, 1); else trace("Using " + profile.toString(), false, 1); if (traceLevel < 4) return; StringBuilder b = new StringBuilder("Resolved properties:\n"); Set<String> mentioned = new HashSet<>(); for (Map.Entry<String,String> requestProperty : requestProperties().entrySet() ) { Object resolvedValue = properties().get(requestProperty.getKey(), requestProperties()); if (resolvedValue == null && requestProperty.getKey().equals("queryProfile")) resolvedValue = requestProperty.getValue(); b.append(requestProperty.getKey()); b.append("="); b.append(resolvedValue); b.append(" ("); if (profile != null && ! profile.isOverridable(new CompoundName(requestProperty.getKey()), requestProperties())) b.append("value from query profile - unoverridable, ignoring request value"); else b.append("value from request"); b.append(")\n"); mentioned.add(requestProperty.getKey()); } if (profile != null) { appendQueryProfileProperties(profile, mentioned, b); } trace(b.toString(),false,4); } private Map<String, String> requestProperties() { return httpRequest.propertyMap(); } private void appendQueryProfileProperties(CompiledQueryProfile profile,Set<String> mentioned,StringBuilder b) { for (Map.Entry<String,Object> property : profile.listValues("", requestProperties()).entrySet()) { if ( ! mentioned.contains(property.getKey())) b.append(property.getKey() + "=" + property.getValue() + " (value from query profile)<br/>\n"); } } /** * Validates this query * * @return the reason if it is invalid, null if it is valid */ public String validate() { QueryProfileProperties queryProfileProperties = properties().getInstance(QueryProfileProperties.class); if (queryProfileProperties == null) return null; StringBuilder missingName = new StringBuilder(); if ( ! queryProfileProperties.isComplete(missingName, httpRequest.propertyMap())) return "Incomplete query: Parameter '" + missingName + "' is mandatory in " + queryProfileProperties.getQueryProfile() + " but is not set"; else return null; } /** Returns the time (in milliseconds since epoch) when this query was started */ public long getStartTime() { return startTime; } /** Returns the time (in milliseconds) since the query was started/created */ public long getDurationTime() { return System.currentTimeMillis() - startTime; } /** * Get the appropriate timeout for the query. * * @return timeout in milliseconds */ public long getTimeLeft() { return getTimeout() - getDurationTime(); } /** * Returns the number of milliseconds to wait for a response from a search backend * before timing it out. Default is 500. * <p> * Note: If Ranking.RANKFEATURES is turned on, this is hardcoded to 6 minutes. * * @return timeout in milliseconds. */ public long getTimeout() { return properties().getBoolean(Ranking.RANKFEATURES, false) ? dumpTimeout : timeout; } /** * Sets the number of milliseconds to wait for a response from a search backend * before time out. Default is 500. */ public void setTimeout(long timeout) { if (timeout > 1000000000 || timeout < 0) throw new IllegalArgumentException("'timeout' must be positive and smaller than 1000000000 ms but was " + timeout); this.timeout = timeout; } /** * Sets timeout from a string which will be parsed as a */ public void setTimeout(String timeoutString) { setTimeout(ParameterParser.asMilliSeconds(timeoutString, timeout)); } /** * Resets the start time of the query. This will ensure that the query will run * for the same amount of time as a newly created query. */ public void resetTimeout() { this.startTime = System.currentTimeMillis(); } /** * Sets the context level of this query, 0 means no tracing * Higher numbers means increasingly more tracing */ public void setTraceLevel(int traceLevel) { this.traceLevel = traceLevel; } /** * Returns the context level of this query, 0 means no tracing * Higher numbers means increasingly more tracing */ public int getTraceLevel() { return traceLevel; } /** * Returns the context level of this query, 0 means no tracing * Higher numbers means increasingly more tracing */ public final boolean isTraceable(int level) { return traceLevel >= level; } /** Returns whether this query should never be served from a cache. Default is false */ public boolean getNoCache() { return noCache; } /** Sets whether this query should never be server from a cache. Default is false */ public void setNoCache(boolean noCache) { this.noCache = noCache; } /** Returns whether this query should use the grouping session cache. Default is false */ public boolean getGroupingSessionCache() { return groupingSessionCache; } /** Sets whether this query should use the grouping session cache. Default is false */ public void setGroupingSessionCache(boolean groupingSessionCache) { this.groupingSessionCache = groupingSessionCache; } /** * Returns the offset from the most relevant hits requested by the submitter * of this query. * Default is 0 - to return the most relevant hits */ public int getOffset() { return offset; } /** * Returns the number of hits requested by the submitter of this query. * The default is 10. */ public int getHits() { return hits; } /** * Sets the number of hits requested. If hits is less than 0, an * IllegalArgumentException is thrown. Default number of hits is 10. */ public void setHits(int hits) { if (hits < 0) throw new IllegalArgumentException("Must be a positive number"); this.hits = hits; } /** * Set the hit offset. Can not be less than 0. Default is 0. */ public void setOffset(int offset) { if (offset < 0) throw new IllegalArgumentException("Must be a positive number"); this.offset = offset; } /** Convenience method to set both the offset and the number of hits to return */ public void setWindow(int offset,int hits) { setOffset(offset); setHits(hits); } /** Returns a string describing this query */ @Override public String toString() { String queryTree; try { queryTree = model.getQueryTree().toString(); } catch (Exception e) { queryTree = "[Could not parse user input: " + model.getQueryString() + "]"; } return "query '" + queryTree + "'"; } /** Returns a string describing this query in more detail */ public String toDetailString() { String queryTree; try { queryTree = model.getQueryTree().toString(); } catch (Exception e) { queryTree = "Could not parse user input: " + model.getQueryString(); } return "query=[" + queryTree + "]" + " offset=" + getOffset() + " hits=" + getHits() + "]"; } /** * Encodes this query onto the given buffer * * @param buffer The buffer to encode the query to * @return the number of encoded items */ public int encode(ByteBuffer buffer) { return model.getQueryTree().encode(buffer); } /** * Adds a context message to this query and to the info log, * if the context level of the query is sufficiently high. * The context information will be carried over to the result at creation. * The message parameter will be included <i>with</i> XML escaping. * * @param message the message to add * @param traceLevel the context level of the message, this method will do nothing * if the traceLevel of the query is lower than this value */ public void trace(String message, int traceLevel) { trace(message, false, traceLevel); } /** * Adds a trace message to this query * if the trace level of the query is sufficiently high. * * @param message the message to add * @param includeQuery true to append the query root stringValue * at the end of the message * @param traceLevel the context level of the message, this method will do nothing * if the traceLevel of the query is lower than this value */ public void trace(String message, boolean includeQuery, int traceLevel) { if ( ! isTraceable(traceLevel)) return; if (includeQuery) message += ": [" + queryTreeText() + "]"; log.log(LogLevel.DEBUG,message); getContext(true).trace(message, 0); } /** * Adds a trace message to this query * if the trace level of the query is sufficiently high. * * @param includeQuery true to append the query root stringValue at the end of the message * @param traceLevel the context level of the message, this method will do nothing * if the traceLevel of the query is lower than this value * @param messages the messages whose toStrings will be concatenated into the trace message. * Concatenation will only happen if the trace level is sufficiently high. */ public void trace(boolean includeQuery, int traceLevel, Object... messages) { if ( ! isTraceable(traceLevel)) return; StringBuilder concatenated = new StringBuilder(); for (Object message : messages) concatenated.append(String.valueOf(message)); trace(concatenated.toString(), includeQuery, traceLevel); } /** * Set the context information for another query to be part of this query's * context information. This is to be used if creating fresh query objects as * part of a plug-in's execution. The query should be attached before it is * used, in case an exception causes premature termination. This is enforced * by an IllegalStateException. In other words, intended use is create the * new query, and attach the context to the invoking query as soon as the new * query is properly initialized. * * <p> * This method will always set the argument query's context level to the context * level of this query. * * @param query * The query which should be traced as a part of this query. * @throws IllegalStateException * If the query given as argument already has context * information. */ public void attachContext(Query query) throws IllegalStateException { query.setTraceLevel(getTraceLevel()); if (context == null) { return; } if (query.getContext(false) != null) { throw new IllegalStateException("Query to attach already has context information stored."); } query.context = context; } private String queryTreeText() { QueryTree root = getModel().getQueryTree(); if (getTraceLevel() < 2) return root.toString(); if (getTraceLevel() < 6) return yqlRepresentation(); else return "\n" + yqlRepresentation() + "\n" + new TextualQueryRepresentation(root.getRoot()) + "\n"; } /** * Serialize this query as YQL+. This method will never throw exceptions, * but instead return a human readable error message if a problem occurred while * serializing the query. Hits and offset information will be included if * different from default, while linguistics metadata are not added. * * @return a valid YQL+ query string or a human readable error message * @see Query */ private void commaSeparated(StringBuilder yql, Set<String> fields) { int initLen = yql.length(); for (String field : fields) { if (yql.length() > initLen) { yql.append(", "); } yql.append(field); } } /** @deprecated remove the ignored segmenterVersion argument from invocations */ @Deprecated public String yqlRepresentation(Tuple2<String, Version> segmenterVersion, boolean includeHitsAndOffset) { return yqlRepresentation(includeHitsAndOffset); } /** * Serialize this query as YQL+. This will create a string representation * which should always be legal YQL+. If a problem occurs, a * RuntimeException is thrown. * * @param includeHitsAndOffset whether to include hits and offset parameters converted to a offset/limit slice * @return a valid YQL+ query string * @throws RuntimeException if there is a problem serializing the query tree */ public String yqlRepresentation(boolean includeHitsAndOffset) { Set<String> sources = getModel().getSources(); Set<String> fields = getPresentation().getSummaryFields(); StringBuilder yql = new StringBuilder("select "); if (fields.isEmpty()) { yql.append('*'); } else { commaSeparated(yql, fields); } yql.append(" from "); if (sources.isEmpty()) { yql.append("sources *"); } else { if (sources.size() > 1) { yql.append("sources "); } commaSeparated(yql, sources); } yql.append(" where "); yql.append(VespaSerializer.serialize(this)); if (getRanking().getSorting() != null && getRanking().getSorting().fieldOrders().size() > 0) { serializeSorting(yql); } if (includeHitsAndOffset) { if (getOffset() != 0) { yql.append(" limit ").append(getHits() + getOffset()) .append(" offset ").append(getOffset()); } else if (getHits() != 10) { yql.append(" limit ").append(getHits()); } } if (getTimeout() != defaultTimeout) { yql.append(" timeout ").append(getTimeout()); } yql.append(';'); return yql.toString(); } private void serializeSorting(StringBuilder yql) { yql.append(" order by "); int initLen = yql.length(); for (FieldOrder f : getRanking().getSorting().fieldOrders()) { if (yql.length() > initLen) { yql.append(", "); } Class<? extends AttributeSorter> sorterType = f.getSorter().getClass(); if (sorterType == Sorting.RawSorter.class) { yql.append("[{\"") .append(YqlParser.SORTING_FUNCTION) .append("\": \"") .append(Sorting.RAW) .append("\"}]"); } else if (sorterType == Sorting.LowerCaseSorter.class) { yql.append("[{\"") .append(YqlParser.SORTING_FUNCTION) .append("\": \"") .append(Sorting.LOWERCASE) .append("\"}]"); } else if (sorterType == Sorting.UcaSorter.class) { Sorting.UcaSorter uca = (Sorting.UcaSorter) f.getSorter(); String ucaLocale = uca.getLocale(); Sorting.UcaSorter.Strength ucaStrength = uca.getStrength(); yql.append("[{\"") .append(YqlParser.SORTING_FUNCTION) .append("\": \"") .append(Sorting.UCA) .append("\""); if (ucaLocale != null) { yql.append(", \"") .append(YqlParser.SORTING_LOCALE) .append("\": \"") .append(ucaLocale) .append('"'); } if (ucaStrength != Sorting.UcaSorter.Strength.UNDEFINED) { yql.append(", \"") .append(YqlParser.SORTING_STRENGTH) .append("\": \"") .append(ucaStrength.name()) .append('"'); } yql.append("}]"); } yql.append(f.getFieldName()); if (f.getSortOrder() == Order.DESCENDING) { yql.append(" desc"); } } } /** Returns the context of this query, possibly creating it if missing. Returns the context, or null */ public QueryContext getContext(boolean create) { if (context == null && create) context = new QueryContext(getTraceLevel(),this); return context; } /** Returns a hash of this query based on (some of) its content. */ @Override public int hashCode() { return ranking.hashCode()+3*presentation.hashCode()+5* model.hashCode()+ 11*offset+ 13*hits; } /** Returns whether the given query is equal to this */ @Override public boolean equals(Object other) { if (this == other) return true; if ( ! (other instanceof Query)) return false; Query q = (Query) other; if (getOffset() != q.getOffset()) return false; if (getHits() != q.getHits()) return false; if ( ! getPresentation().equals(q.getPresentation())) return false; if ( ! getRanking().equals(q.getRanking())) return false; if ( ! getModel().equals(q.getModel())) return false; return true; } /** Returns a clone of this query */ @Override public Query clone() { Query clone = (Query) super.clone(); copyPropertiesTo(clone); return clone; } private void copyPropertiesTo(Query clone) { clone.model = model.cloneFor(clone); clone.select = select.cloneFor(clone); clone.ranking = (Ranking) ranking.clone(); clone.presentation = (Presentation) presentation.clone(); clone.context = getContext(true).cloneFor(clone); clone.properties().setParentQuery(clone); assert (clone.properties().getParentQuery() == clone); clone.setTraceLevel(getTraceLevel()); clone.setHits(getHits()); clone.setOffset(getOffset()); clone.setNoCache(getNoCache()); clone.setGroupingSessionCache(getGroupingSessionCache()); clone.requestId = null; } /** Returns the presentation to be used for this query, never null */ public Presentation getPresentation() { return presentation; } /** Returns the select to be used for this query, never null */ public Select getSelect() { return select; } /** Returns the ranking to be used for this query, never null */ public Ranking getRanking() { return ranking; } /** Returns the query representation model to be used for this query, never null */ public Model getModel() { return model; } /** * Return the HTTP request which caused this query. This will never be null * when running with queries from the network. */ public HttpRequest getHttpRequest() { return httpRequest; } /** Returns the session id of this query, or null if none is assigned */ public SessionId getSessionId() { if (requestId == null) return null; return new SessionId(requestId, getRanking().getProfile()); } /** Returns the session id of this query, and creates and assigns it if not already present */ public SessionId getSessionId(String serverId) { if (requestId == null) requestId = UniqueRequestId.next(serverId); return new SessionId(requestId, getRanking().getProfile()); } public boolean hasEncodableProperties() { if ( ! ranking.getProperties().isEmpty()) return true; if ( ! ranking.getFeatures().isEmpty()) return true; if ( ranking.getFreshness() != null) return true; if ( model.getSearchPath() != null) return true; if ( model.getDocumentDb() != null) return true; if ( presentation.getHighlight() != null && ! presentation.getHighlight().getHighlightItems().isEmpty()) return true; return false; } /** * Encodes properties of this query. * * @param buffer the buffer to encode to * @param encodeQueryData true to encode all properties, false to only include session information, not actual query data * @return the encoded length */ public int encodeAsProperties(ByteBuffer buffer, boolean encodeQueryData) { if (! hasEncodableProperties()) return 0; int start = buffer.position(); int mapCountPosition = buffer.position(); buffer.putInt(0); int mapCount = 0; mapCount += ranking.getProperties().encode(buffer, encodeQueryData); if (encodeQueryData) mapCount += ranking.getFeatures().encode(buffer); if (encodeQueryData && presentation.getHighlight() != null) mapCount += MapEncoder.encodeStringMultiMap(Highlight.HIGHLIGHTTERMS, presentation.getHighlight().getHighlightTerms(), buffer); if (encodeQueryData) mapCount += MapEncoder.encodeSingleValue("model", "searchpath", model.getSearchPath(), buffer); mapCount += MapEncoder.encodeSingleValue(DocumentDatabase.MATCH_PROPERTY, DocumentDatabase.SEARCH_DOC_TYPE_KEY, model.getDocumentDb(), buffer); mapCount += MapEncoder.encodeMap("caches", createCacheSettingMap(), buffer); buffer.putInt(mapCountPosition, mapCount); return buffer.position() - start; } private Map<String, Boolean> createCacheSettingMap() { if (getGroupingSessionCache() && ranking.getQueryCache()) { Map<String, Boolean> cacheSettingMap = new HashMap<>(); cacheSettingMap.put("grouping", true); cacheSettingMap.put("query", true); return cacheSettingMap; } if (getGroupingSessionCache()) return Collections.singletonMap("grouping", true); if (ranking.getQueryCache()) return Collections.singletonMap("query", true); return Collections.<String,Boolean>emptyMap(); } /** * Prepares this for binary serialization. * <p> * This must be invoked after all changes have been made to this query before it is passed * on to a receiving backend. Calling it is somewhat expensive, so it should only happen once. * If a prepared query is cloned, it stays prepared. */ public void prepare() { getModel().prepare(getRanking()); getPresentation().prepare(); getRanking().prepare(); } }
class Query extends com.yahoo.processing.Request implements Cloneable { /** The type of the query */ public enum Type { ALL(0,"all"), ANY(1,"any"), PHRASE(2,"phrase"), ADVANCED(3,"adv"), WEB(4,"web"), PROGRAMMATIC(5, "prog"), YQL(6, "yql"), SELECT(7, "select");; private final int intValue; private final String stringValue; Type(int intValue, String stringValue) { this.intValue = intValue; this.stringValue = stringValue; } /** Converts a type argument value into a query type */ public static Type getType(String typeString) { for (Type type : Type.values()) if (type.stringValue.equals(typeString)) return type; return ALL; } public int asInt() { return intValue; } public String toString() { return stringValue; } } /** The offset from the most relevant hits found from this query */ private int offset = 0; /** The number of hits to return */ private int hits = 10; /** The query context level, 0 means no tracing */ private int traceLevel = 0; private static final long dumpTimeout = (6 * 60 * 1000); private static final long defaultTimeout = 500; /** The timeout of the query, in milliseconds */ private long timeout = defaultTimeout; /** Whether this query is forbidden to access cached information */ private boolean noCache = false; /** Whether or not grouping should use a session cache */ private boolean groupingSessionCache = false; /** The synchronous view of the JDisc request causing this query */ private final HttpRequest httpRequest; /** The context, or null if there is no context */ private QueryContext context = null; /** Used for downstream session caches */ private UniqueRequestId requestId = null; /** The ranking requested in this query */ private Ranking ranking = new Ranking(this); /** The query query and/or query program declaration */ private Model model = new Model(this); /** How results of this query should be presented */ private Presentation presentation = new Presentation(this); /** The selection of where-clause and grouping */ private Select select = new Select(this); private static Logger log = Logger.getLogger(Query.class.getName()); /** The time this query was created */ private long startTime; public static final CompoundName OFFSET = new CompoundName("offset"); public static final CompoundName HITS = new CompoundName("hits"); public static final CompoundName QUERY_PROFILE = new CompoundName("queryProfile"); public static final CompoundName SEARCH_CHAIN = new CompoundName("searchChain"); public static final CompoundName TRACE_LEVEL = new CompoundName("traceLevel"); public static final CompoundName NO_CACHE = new CompoundName("noCache"); public static final CompoundName GROUPING_SESSION_CACHE = new CompoundName("groupingSessionCache"); public static final CompoundName TIMEOUT = new CompoundName("timeout"); private static QueryProfileType argumentType; static { argumentType = new QueryProfileType("native"); argumentType.setBuiltin(true); argumentType.addField(new FieldDescription(OFFSET.toString(), "integer", "offset start")); argumentType.addField(new FieldDescription(HITS.toString(), "integer", "hits count")); argumentType.addField(new FieldDescription(QUERY_PROFILE.toString(), "string")); argumentType.addField(new FieldDescription(SEARCH_CHAIN.toString(), "string")); argumentType.addField(new FieldDescription(TRACE_LEVEL.toString(), "integer", "tracelevel")); argumentType.addField(new FieldDescription(NO_CACHE.toString(), "boolean", "nocache")); argumentType.addField(new FieldDescription(GROUPING_SESSION_CACHE.toString(), "boolean", "groupingSessionCache")); argumentType.addField(new FieldDescription(TIMEOUT.toString(), "string", "timeout")); argumentType.addField(new FieldDescription(FederationSearcher.SOURCENAME.toString(),"string")); argumentType.addField(new FieldDescription(FederationSearcher.PROVIDERNAME.toString(),"string")); argumentType.addField(new FieldDescription(Presentation.PRESENTATION, new QueryProfileFieldType(Presentation.getArgumentType()))); argumentType.addField(new FieldDescription(Ranking.RANKING, new QueryProfileFieldType(Ranking.getArgumentType()))); argumentType.addField(new FieldDescription(Model.MODEL, new QueryProfileFieldType(Model.getArgumentType()))); argumentType.addField(new FieldDescription(Select.SELECT, new QueryProfileFieldType(Select.getArgumentType()))); argumentType.freeze(); } public static QueryProfileType getArgumentType() { return argumentType; } /** The aliases of query properties */ private static Map<String,CompoundName> propertyAliases; static { Map<String,CompoundName> propertyAliasesBuilder = new HashMap<>(); addAliases(Query.getArgumentType(), propertyAliasesBuilder); addAliases(Ranking.getArgumentType(), propertyAliasesBuilder); addAliases(Model.getArgumentType(), propertyAliasesBuilder); addAliases(Presentation.getArgumentType(), propertyAliasesBuilder); addAliases(Select.getArgumentType(), propertyAliasesBuilder); propertyAliases = ImmutableMap.copyOf(propertyAliasesBuilder); } private static void addAliases(QueryProfileType arguments, Map<String, CompoundName> aliases) { String prefix = getPrefix(arguments); for (FieldDescription field : arguments.fields().values()) { for (String alias : field.getAliases()) aliases.put(alias, new CompoundName(prefix+field.getName())); } } private static String getPrefix(QueryProfileType type) { if (type.getId().getName().equals("native")) return ""; return type.getId().getName() + "."; } public static void addNativeQueryProfileTypesTo(QueryProfileTypeRegistry registry) { registry.register(Query.getArgumentType().unfrozen()); registry.register(Ranking.getArgumentType().unfrozen()); registry.register(Model.getArgumentType().unfrozen()); registry.register(Select.getArgumentType().unfrozen()); registry.register(Presentation.getArgumentType().unfrozen()); registry.register(DefaultProperties.argumentType.unfrozen()); } /** Returns an unmodifiable list of all the native properties under a Query */ public static final List<CompoundName> nativeProperties = ImmutableList.copyOf(namesUnder(CompoundName.empty, Query.getArgumentType())); private static List<CompoundName> namesUnder(CompoundName prefix, QueryProfileType type) { if ( type == null) return Collections.emptyList(); List<CompoundName> names = new ArrayList<>(); for (Map.Entry<String, FieldDescription> field : type.fields().entrySet()) { if (field.getValue().getType() instanceof QueryProfileFieldType) { names.addAll(namesUnder(prefix.append(field.getKey()), ((QueryProfileFieldType) field.getValue().getType()).getQueryProfileType())); } else { names.add(prefix.append(field.getKey())); } } return names; } /** * Constructs an empty (null) query */ public Query() { this(""); } /** * Construct a query from a string formatted in the http style, e.g <code>?query=test&amp;offset=10&amp;hits=13</code> * The query must be uri encoded. */ public Query(String query) { this(query, null); } /** * Creates a query from a request * * @param request the HTTP request from which this is created */ public Query(HttpRequest request) { this(request, null); } /** * Construct a query from a string formatted in the http style, e.g <code>?query=test&amp;offset=10&amp;hits=13</code> * The query must be uri encoded. */ public Query(String query, CompiledQueryProfile queryProfile) { this(HttpRequest.createTestRequest(query, com.yahoo.jdisc.http.HttpRequest.Method.GET), queryProfile); } /** * Creates a query from a request * * @param request the HTTP request from which this is created * @param queryProfile the query profile to use for this query, or null if none. */ public Query(HttpRequest request, CompiledQueryProfile queryProfile) { this(request, request.propertyMap(), queryProfile); } /** * Creates a query from a request * * @param request the HTTP request from which this is created. * @param requestMap the property map of the query. * @param queryProfile the query profile to use for this query, or null if none. */ public Query(HttpRequest request, Map<String, String> requestMap, CompiledQueryProfile queryProfile) { super(new QueryPropertyAliases(propertyAliases)); this.httpRequest = request; init(requestMap, queryProfile); } private void init(Map<String, String> requestMap, CompiledQueryProfile queryProfile) { startTime = System.currentTimeMillis(); if (queryProfile != null) { Properties queryProfileProperties = new QueryProfileProperties(queryProfile); properties().chain(queryProfileProperties); setPropertiesFromRequestMap(requestMap, properties()); properties().chain(new QueryProperties(this, queryProfile.getRegistry())). chain(new ModelObjectMap()). chain(new RequestContextProperties(requestMap)). chain(queryProfileProperties). chain(new DefaultProperties()); setFieldsFrom(queryProfileProperties, requestMap); } else { properties(). chain(new QueryProperties(this, CompiledQueryProfileRegistry.empty)). chain(new PropertyMap()). chain(new DefaultProperties()); setPropertiesFromRequestMap(requestMap, properties()); } properties().setParentQuery(this); traceProperties(); } public Query(Query query) { this(query, query.getStartTime()); } private Query(Query query, long startTime) { super(query.properties().clone()); this.startTime = startTime; this.httpRequest = query.httpRequest; query.copyPropertiesTo(this); } /** * Creates a new query from another query, but with time sensitive fields reset. */ public static Query createNewQuery(Query query) { return new Query(query, System.currentTimeMillis()); } /** * Calls properties().set on each value in the given properties which is declared in this query or * one of its dependent objects. This will ensure the appropriate setters are called on this and all * dependent objects for the appropriate subset of the given property values */ private void setFieldsFrom(Properties properties, Map<String, String> context) { setFrom("", properties, Query.getArgumentType(), context); } /** * For each field in the given query profile type, take the corresponding value from originalProperties * (if any) set it to properties(), recursively. */ private void setFrom(String prefix, Properties originalProperties, QueryProfileType arguments, Map<String, String> context) { prefix = prefix + getPrefix(arguments); for (FieldDescription field : arguments.fields().values()) { String fullName = prefix + field.getName(); if (field.getType() == FieldType.genericQueryProfileType) { for (Map.Entry<String, Object> entry : originalProperties.listProperties(fullName, context).entrySet()) { try { properties().set(fullName + "." + entry.getKey(), entry.getValue(), context); } catch (IllegalArgumentException e) { throw new QueryException("Invalid request parameter", e); } } } else if (field.getType() instanceof QueryProfileFieldType) { setFrom(prefix, originalProperties, ((QueryProfileFieldType)field.getType()).getQueryProfileType(), context); } else { Object value = originalProperties.get(fullName, context); if (value != null) { try { properties().set(fullName, value, context); } catch (IllegalArgumentException e) { throw new QueryException("Invalid request parameter", e); } } } } } /** Calls properties.set on all entries in requestMap */ private void setPropertiesFromRequestMap(Map<String, String> requestMap, Properties properties) { for (var entry : requestMap.entrySet()) { try { properties.set(entry.getKey(), entry.getValue(), requestMap); } catch (IllegalArgumentException e) { throw new QueryException("Invalid request parameter", e); } } } /** Returns the properties of this query. The properties are modifiable */ @Override public Properties properties() { return (Properties)super.properties(); } /** * Traces how properties was resolved and from where. Done after the fact to avoid special handling * of tracelevel, which is the property deciding whether this needs to be done */ private void traceProperties() { if (traceLevel == 0) return; CompiledQueryProfile profile = null; QueryProfileProperties profileProperties = properties().getInstance(QueryProfileProperties.class); if (profileProperties != null) profile = profileProperties.getQueryProfile(); if (profile == null) trace("No query profile is used", false, 1); else trace("Using " + profile.toString(), false, 1); if (traceLevel < 4) return; StringBuilder b = new StringBuilder("Resolved properties:\n"); Set<String> mentioned = new HashSet<>(); for (Map.Entry<String,String> requestProperty : requestProperties().entrySet() ) { Object resolvedValue = properties().get(requestProperty.getKey(), requestProperties()); if (resolvedValue == null && requestProperty.getKey().equals("queryProfile")) resolvedValue = requestProperty.getValue(); b.append(requestProperty.getKey()); b.append("="); b.append(resolvedValue); b.append(" ("); if (profile != null && ! profile.isOverridable(new CompoundName(requestProperty.getKey()), requestProperties())) b.append("value from query profile - unoverridable, ignoring request value"); else b.append("value from request"); b.append(")\n"); mentioned.add(requestProperty.getKey()); } if (profile != null) { appendQueryProfileProperties(profile, mentioned, b); } trace(b.toString(),false,4); } private Map<String, String> requestProperties() { return httpRequest.propertyMap(); } private void appendQueryProfileProperties(CompiledQueryProfile profile,Set<String> mentioned,StringBuilder b) { for (Map.Entry<String,Object> property : profile.listValues("", requestProperties()).entrySet()) { if ( ! mentioned.contains(property.getKey())) b.append(property.getKey() + "=" + property.getValue() + " (value from query profile)<br/>\n"); } } /** * Validates this query * * @return the reason if it is invalid, null if it is valid */ public String validate() { QueryProfileProperties queryProfileProperties = properties().getInstance(QueryProfileProperties.class); if (queryProfileProperties == null) return null; StringBuilder missingName = new StringBuilder(); if ( ! queryProfileProperties.isComplete(missingName, httpRequest.propertyMap())) return "Incomplete query: Parameter '" + missingName + "' is mandatory in " + queryProfileProperties.getQueryProfile() + " but is not set"; else return null; } /** Returns the time (in milliseconds since epoch) when this query was started */ public long getStartTime() { return startTime; } /** Returns the time (in milliseconds) since the query was started/created */ public long getDurationTime() { return System.currentTimeMillis() - startTime; } /** * Get the appropriate timeout for the query. * * @return timeout in milliseconds */ public long getTimeLeft() { return getTimeout() - getDurationTime(); } /** * Returns the number of milliseconds to wait for a response from a search backend * before timing it out. Default is 500. * <p> * Note: If Ranking.RANKFEATURES is turned on, this is hardcoded to 6 minutes. * * @return timeout in milliseconds. */ public long getTimeout() { return properties().getBoolean(Ranking.RANKFEATURES, false) ? dumpTimeout : timeout; } /** * Sets the number of milliseconds to wait for a response from a search backend * before time out. Default is 500. */ public void setTimeout(long timeout) { if (timeout > 1000000000 || timeout < 0) throw new IllegalArgumentException("'timeout' must be positive and smaller than 1000000000 ms but was " + timeout); this.timeout = timeout; } /** * Sets timeout from a string which will be parsed as a */ public void setTimeout(String timeoutString) { setTimeout(ParameterParser.asMilliSeconds(timeoutString, timeout)); } /** * Resets the start time of the query. This will ensure that the query will run * for the same amount of time as a newly created query. */ public void resetTimeout() { this.startTime = System.currentTimeMillis(); } /** * Sets the context level of this query, 0 means no tracing * Higher numbers means increasingly more tracing */ public void setTraceLevel(int traceLevel) { this.traceLevel = traceLevel; } /** * Returns the context level of this query, 0 means no tracing * Higher numbers means increasingly more tracing */ public int getTraceLevel() { return traceLevel; } /** * Returns the context level of this query, 0 means no tracing * Higher numbers means increasingly more tracing */ public final boolean isTraceable(int level) { return traceLevel >= level; } /** Returns whether this query should never be served from a cache. Default is false */ public boolean getNoCache() { return noCache; } /** Sets whether this query should never be server from a cache. Default is false */ public void setNoCache(boolean noCache) { this.noCache = noCache; } /** Returns whether this query should use the grouping session cache. Default is false */ public boolean getGroupingSessionCache() { return groupingSessionCache; } /** Sets whether this query should use the grouping session cache. Default is false */ public void setGroupingSessionCache(boolean groupingSessionCache) { this.groupingSessionCache = groupingSessionCache; } /** * Returns the offset from the most relevant hits requested by the submitter * of this query. * Default is 0 - to return the most relevant hits */ public int getOffset() { return offset; } /** * Returns the number of hits requested by the submitter of this query. * The default is 10. */ public int getHits() { return hits; } /** * Sets the number of hits requested. If hits is less than 0, an * IllegalArgumentException is thrown. Default number of hits is 10. */ public void setHits(int hits) { if (hits < 0) throw new IllegalArgumentException("Must be a positive number"); this.hits = hits; } /** * Set the hit offset. Can not be less than 0. Default is 0. */ public void setOffset(int offset) { if (offset < 0) throw new IllegalArgumentException("Must be a positive number"); this.offset = offset; } /** Convenience method to set both the offset and the number of hits to return */ public void setWindow(int offset,int hits) { setOffset(offset); setHits(hits); } /** Returns a string describing this query */ @Override public String toString() { String queryTree; try { queryTree = model.getQueryTree().toString(); } catch (Exception e) { queryTree = "[Could not parse user input: " + model.getQueryString() + "]"; } return "query '" + queryTree + "'"; } /** Returns a string describing this query in more detail */ public String toDetailString() { String queryTree; try { queryTree = model.getQueryTree().toString(); } catch (Exception e) { queryTree = "Could not parse user input: " + model.getQueryString(); } return "query=[" + queryTree + "]" + " offset=" + getOffset() + " hits=" + getHits() + "]"; } /** * Encodes this query onto the given buffer * * @param buffer The buffer to encode the query to * @return the number of encoded items */ public int encode(ByteBuffer buffer) { return model.getQueryTree().encode(buffer); } /** * Adds a context message to this query and to the info log, * if the context level of the query is sufficiently high. * The context information will be carried over to the result at creation. * The message parameter will be included <i>with</i> XML escaping. * * @param message the message to add * @param traceLevel the context level of the message, this method will do nothing * if the traceLevel of the query is lower than this value */ public void trace(String message, int traceLevel) { trace(message, false, traceLevel); } /** * Adds a trace message to this query * if the trace level of the query is sufficiently high. * * @param message the message to add * @param includeQuery true to append the query root stringValue * at the end of the message * @param traceLevel the context level of the message, this method will do nothing * if the traceLevel of the query is lower than this value */ public void trace(String message, boolean includeQuery, int traceLevel) { if ( ! isTraceable(traceLevel)) return; if (includeQuery) message += ": [" + queryTreeText() + "]"; log.log(LogLevel.DEBUG,message); getContext(true).trace(message, 0); } /** * Adds a trace message to this query * if the trace level of the query is sufficiently high. * * @param includeQuery true to append the query root stringValue at the end of the message * @param traceLevel the context level of the message, this method will do nothing * if the traceLevel of the query is lower than this value * @param messages the messages whose toStrings will be concatenated into the trace message. * Concatenation will only happen if the trace level is sufficiently high. */ public void trace(boolean includeQuery, int traceLevel, Object... messages) { if ( ! isTraceable(traceLevel)) return; StringBuilder concatenated = new StringBuilder(); for (Object message : messages) concatenated.append(String.valueOf(message)); trace(concatenated.toString(), includeQuery, traceLevel); } /** * Set the context information for another query to be part of this query's * context information. This is to be used if creating fresh query objects as * part of a plug-in's execution. The query should be attached before it is * used, in case an exception causes premature termination. This is enforced * by an IllegalStateException. In other words, intended use is create the * new query, and attach the context to the invoking query as soon as the new * query is properly initialized. * * <p> * This method will always set the argument query's context level to the context * level of this query. * * @param query * The query which should be traced as a part of this query. * @throws IllegalStateException * If the query given as argument already has context * information. */ public void attachContext(Query query) throws IllegalStateException { query.setTraceLevel(getTraceLevel()); if (context == null) { return; } if (query.getContext(false) != null) { throw new IllegalStateException("Query to attach already has context information stored."); } query.context = context; } private String queryTreeText() { QueryTree root = getModel().getQueryTree(); if (getTraceLevel() < 2) return root.toString(); if (getTraceLevel() < 6) return yqlRepresentation(); else return "\n" + yqlRepresentation() + "\n" + new TextualQueryRepresentation(root.getRoot()) + "\n"; } /** * Serialize this query as YQL+. This method will never throw exceptions, * but instead return a human readable error message if a problem occurred while * serializing the query. Hits and offset information will be included if * different from default, while linguistics metadata are not added. * * @return a valid YQL+ query string or a human readable error message * @see Query */ private void commaSeparated(StringBuilder yql, Set<String> fields) { int initLen = yql.length(); for (String field : fields) { if (yql.length() > initLen) { yql.append(", "); } yql.append(field); } } /** @deprecated remove the ignored segmenterVersion argument from invocations */ @Deprecated public String yqlRepresentation(Tuple2<String, Version> segmenterVersion, boolean includeHitsAndOffset) { return yqlRepresentation(includeHitsAndOffset); } /** * Serialize this query as YQL+. This will create a string representation * which should always be legal YQL+. If a problem occurs, a * RuntimeException is thrown. * * @param includeHitsAndOffset whether to include hits and offset parameters converted to a offset/limit slice * @return a valid YQL+ query string * @throws RuntimeException if there is a problem serializing the query tree */ public String yqlRepresentation(boolean includeHitsAndOffset) { Set<String> sources = getModel().getSources(); Set<String> fields = getPresentation().getSummaryFields(); StringBuilder yql = new StringBuilder("select "); if (fields.isEmpty()) { yql.append('*'); } else { commaSeparated(yql, fields); } yql.append(" from "); if (sources.isEmpty()) { yql.append("sources *"); } else { if (sources.size() > 1) { yql.append("sources "); } commaSeparated(yql, sources); } yql.append(" where "); yql.append(VespaSerializer.serialize(this)); if (getRanking().getSorting() != null && getRanking().getSorting().fieldOrders().size() > 0) { serializeSorting(yql); } if (includeHitsAndOffset) { if (getOffset() != 0) { yql.append(" limit ").append(getHits() + getOffset()) .append(" offset ").append(getOffset()); } else if (getHits() != 10) { yql.append(" limit ").append(getHits()); } } if (getTimeout() != defaultTimeout) { yql.append(" timeout ").append(getTimeout()); } yql.append(';'); return yql.toString(); } private void serializeSorting(StringBuilder yql) { yql.append(" order by "); int initLen = yql.length(); for (FieldOrder f : getRanking().getSorting().fieldOrders()) { if (yql.length() > initLen) { yql.append(", "); } Class<? extends AttributeSorter> sorterType = f.getSorter().getClass(); if (sorterType == Sorting.RawSorter.class) { yql.append("[{\"") .append(YqlParser.SORTING_FUNCTION) .append("\": \"") .append(Sorting.RAW) .append("\"}]"); } else if (sorterType == Sorting.LowerCaseSorter.class) { yql.append("[{\"") .append(YqlParser.SORTING_FUNCTION) .append("\": \"") .append(Sorting.LOWERCASE) .append("\"}]"); } else if (sorterType == Sorting.UcaSorter.class) { Sorting.UcaSorter uca = (Sorting.UcaSorter) f.getSorter(); String ucaLocale = uca.getLocale(); Sorting.UcaSorter.Strength ucaStrength = uca.getStrength(); yql.append("[{\"") .append(YqlParser.SORTING_FUNCTION) .append("\": \"") .append(Sorting.UCA) .append("\""); if (ucaLocale != null) { yql.append(", \"") .append(YqlParser.SORTING_LOCALE) .append("\": \"") .append(ucaLocale) .append('"'); } if (ucaStrength != Sorting.UcaSorter.Strength.UNDEFINED) { yql.append(", \"") .append(YqlParser.SORTING_STRENGTH) .append("\": \"") .append(ucaStrength.name()) .append('"'); } yql.append("}]"); } yql.append(f.getFieldName()); if (f.getSortOrder() == Order.DESCENDING) { yql.append(" desc"); } } } /** Returns the context of this query, possibly creating it if missing. Returns the context, or null */ public QueryContext getContext(boolean create) { if (context == null && create) context = new QueryContext(getTraceLevel(),this); return context; } /** Returns a hash of this query based on (some of) its content. */ @Override public int hashCode() { return ranking.hashCode()+3*presentation.hashCode()+5* model.hashCode()+ 11*offset+ 13*hits; } /** Returns whether the given query is equal to this */ @Override public boolean equals(Object other) { if (this == other) return true; if ( ! (other instanceof Query)) return false; Query q = (Query) other; if (getOffset() != q.getOffset()) return false; if (getHits() != q.getHits()) return false; if ( ! getPresentation().equals(q.getPresentation())) return false; if ( ! getRanking().equals(q.getRanking())) return false; if ( ! getModel().equals(q.getModel())) return false; return true; } /** Returns a clone of this query */ @Override public Query clone() { Query clone = (Query) super.clone(); copyPropertiesTo(clone); return clone; } private void copyPropertiesTo(Query clone) { clone.model = model.cloneFor(clone); clone.select = select.cloneFor(clone); clone.ranking = (Ranking) ranking.clone(); clone.presentation = (Presentation) presentation.clone(); clone.context = getContext(true).cloneFor(clone); clone.properties().setParentQuery(clone); assert (clone.properties().getParentQuery() == clone); clone.setTraceLevel(getTraceLevel()); clone.setHits(getHits()); clone.setOffset(getOffset()); clone.setNoCache(getNoCache()); clone.setGroupingSessionCache(getGroupingSessionCache()); clone.requestId = null; } /** Returns the presentation to be used for this query, never null */ public Presentation getPresentation() { return presentation; } /** Returns the select to be used for this query, never null */ public Select getSelect() { return select; } /** Returns the ranking to be used for this query, never null */ public Ranking getRanking() { return ranking; } /** Returns the query representation model to be used for this query, never null */ public Model getModel() { return model; } /** * Return the HTTP request which caused this query. This will never be null * when running with queries from the network. */ public HttpRequest getHttpRequest() { return httpRequest; } /** Returns the session id of this query, or null if none is assigned */ public SessionId getSessionId() { if (requestId == null) return null; return new SessionId(requestId, getRanking().getProfile()); } /** Returns the session id of this query, and creates and assigns it if not already present */ public SessionId getSessionId(String serverId) { if (requestId == null) requestId = UniqueRequestId.next(serverId); return new SessionId(requestId, getRanking().getProfile()); } public boolean hasEncodableProperties() { if ( ! ranking.getProperties().isEmpty()) return true; if ( ! ranking.getFeatures().isEmpty()) return true; if ( ranking.getFreshness() != null) return true; if ( model.getSearchPath() != null) return true; if ( model.getDocumentDb() != null) return true; if ( presentation.getHighlight() != null && ! presentation.getHighlight().getHighlightItems().isEmpty()) return true; return false; } /** * Encodes properties of this query. * * @param buffer the buffer to encode to * @param encodeQueryData true to encode all properties, false to only include session information, not actual query data * @return the encoded length */ public int encodeAsProperties(ByteBuffer buffer, boolean encodeQueryData) { if (! hasEncodableProperties()) return 0; int start = buffer.position(); int mapCountPosition = buffer.position(); buffer.putInt(0); int mapCount = 0; mapCount += ranking.getProperties().encode(buffer, encodeQueryData); if (encodeQueryData) mapCount += ranking.getFeatures().encode(buffer); if (encodeQueryData && presentation.getHighlight() != null) mapCount += MapEncoder.encodeStringMultiMap(Highlight.HIGHLIGHTTERMS, presentation.getHighlight().getHighlightTerms(), buffer); if (encodeQueryData) mapCount += MapEncoder.encodeSingleValue("model", "searchpath", model.getSearchPath(), buffer); mapCount += MapEncoder.encodeSingleValue(DocumentDatabase.MATCH_PROPERTY, DocumentDatabase.SEARCH_DOC_TYPE_KEY, model.getDocumentDb(), buffer); mapCount += MapEncoder.encodeMap("caches", createCacheSettingMap(), buffer); buffer.putInt(mapCountPosition, mapCount); return buffer.position() - start; } private Map<String, Boolean> createCacheSettingMap() { if (getGroupingSessionCache() && ranking.getQueryCache()) { Map<String, Boolean> cacheSettingMap = new HashMap<>(); cacheSettingMap.put("grouping", true); cacheSettingMap.put("query", true); return cacheSettingMap; } if (getGroupingSessionCache()) return Collections.singletonMap("grouping", true); if (ranking.getQueryCache()) return Collections.singletonMap("query", true); return Collections.<String,Boolean>emptyMap(); } /** * Prepares this for binary serialization. * <p> * This must be invoked after all changes have been made to this query before it is passed * on to a receiving backend. Calling it is somewhat expensive, so it should only happen once. * If a prepared query is cloned, it stays prepared. */ public void prepare() { getModel().prepare(getRanking()); getPresentation().prepare(); getRanking().prepare(); } }
Remove `getParameters()` method
private HttpResponse logs(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) { ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = ZoneId.from(environment, region); DeploymentId deployment = new DeploymentId(application, zone); if (queryParameters.containsKey("streaming")) { InputStream logStream = controller.configServer().getLogStream(deployment, queryParameters); return new HttpResponse(200) { @Override public void render(OutputStream outputStream) throws IOException { logStream.transferTo(outputStream); } }; } Optional<Logs> response = controller.configServer().getLogs(deployment, queryParameters); Slime slime = new Slime(); Cursor object = slime.setObject(); if (response.isPresent()) { response.get().logs().entrySet().stream().forEach(entry -> object.setString(entry.getKey(), entry.getValue())); } return new SlimeJsonResponse(slime); }
DeploymentId deployment = new DeploymentId(application, zone);
private HttpResponse logs(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) { ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = ZoneId.from(environment, region); DeploymentId deployment = new DeploymentId(application, zone); if (queryParameters.containsKey("streaming")) { InputStream logStream = controller.configServer().getLogStream(deployment, queryParameters); return new HttpResponse(200) { @Override public void render(OutputStream outputStream) throws IOException { logStream.transferTo(outputStream); } }; } Optional<Logs> response = controller.configServer().getLogs(deployment, queryParameters); Slime slime = new Slime(); Cursor object = slime.setObject(); if (response.isPresent()) { response.get().logs().entrySet().stream().forEach(entry -> object.setString(entry.getKey(), entry.getValue())); } return new SlimeJsonResponse(slime); }
class ApplicationApiHandler extends LoggingRequestHandler { private final Controller controller; private final ZmsClientFacade zmsClient; @Inject public ApplicationApiHandler(LoggingRequestHandler.Context parentCtx, Controller controller, AthenzClientFactory athenzClientFactory) { super(parentCtx); this.controller = controller; this.zmsClient = new ZmsClientFacade(athenzClientFactory.createZmsClient(), athenzClientFactory.getControllerIdentity()); } @Override public Duration getTimeout() { return Duration.ofMinutes(20); } @Override public HttpResponse handle(HttpRequest request) { try { switch (request.getMethod()) { case GET: return handleGET(request); case PUT: return handlePUT(request); case POST: return handlePOST(request); case PATCH: return handlePATCH(request); case DELETE: return handleDELETE(request); case OPTIONS: return handleOPTIONS(); default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported"); } } catch (ForbiddenException e) { return ErrorResponse.forbidden(Exceptions.toMessageString(e)); } catch (NotAuthorizedException e) { return ErrorResponse.unauthorized(Exceptions.toMessageString(e)); } catch (NotExistsException e) { return ErrorResponse.notFoundError(Exceptions.toMessageString(e)); } catch (IllegalArgumentException e) { return ErrorResponse.badRequest(Exceptions.toMessageString(e)); } catch (ConfigServerException e) { return ErrorResponse.from(e); } catch (RuntimeException e) { log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e); return ErrorResponse.internalServerError(Exceptions.toMessageString(e)); } } private HttpResponse handleGET(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/")) return root(request); if (path.matches("/application/v4/user")) return authenticatedUser(request); if (path.matches("/application/v4/tenant")) return tenants(request); if (path.matches("/application/v4/tenant-pipeline")) return tenantPipelines(); if (path.matches("/application/v4/athensDomain")) return athenzDomains(request); if (path.matches("/application/v4/property")) return properties(); if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploying(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePUT(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/user")) return createUser(request); if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePOST(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/promote")) return promoteApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), readToString(request.getData()), false); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), readToString(request.getData()), true); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/jobreport")) return notifyJobCompletion(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return submit(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return trigger(appIdFromPath(path), jobTypeFromPath(path), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return pause(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/promote")) return promoteApplicationDeployment(path.get("tenant"), path.get("application"), path.get("environment"), path.get("region"), path.get("instance"), request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePATCH(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return setMajorVersion(path.get("tenant"), path.get("application"), request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleDELETE(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), "all"); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("choice")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return JobControllerApiHandlerHelper.unregisterResponse(controller.jobController(), path.get("tenant"), path.get("application")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.abortJobResponse(controller.jobController(), appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleOPTIONS() { EmptyJsonResponse response = new EmptyJsonResponse(); response.headers().put("Allow", "GET,PUT,POST,PATCH,DELETE,OPTIONS"); return response; } private HttpResponse recursiveRoot(HttpRequest request) { Slime slime = new Slime(); Cursor tenantArray = slime.setArray(); for (Tenant tenant : controller.tenants().asList()) toSlime(tenantArray.addObject(), tenant, request, true); return new SlimeJsonResponse(slime); } private HttpResponse root(HttpRequest request) { return recurseOverTenants(request) ? recursiveRoot(request) : new ResourceResponse(request, "user", "tenant", "tenant-pipeline", "athensDomain", "property"); } private HttpResponse authenticatedUser(HttpRequest request) { String userIdString = request.getProperty("userOverride"); if (userIdString == null) userIdString = getUserId(request) .map(UserId::id) .orElseThrow(() -> new ForbiddenException("You must be authenticated or specify userOverride")); UserId userId = new UserId(userIdString); List<Tenant> tenants = controller.tenants().asList(userId); Slime slime = new Slime(); Cursor response = slime.setObject(); response.setString("user", userId.id()); Cursor tenantsArray = response.setArray("tenants"); for (Tenant tenant : tenants) tenantInTenantsListToSlime(tenant, request.getUri(), tenantsArray.addObject()); response.setBool("tenantExists", tenants.stream().anyMatch(tenant -> tenant instanceof UserTenant && ((UserTenant) tenant).is(userId.id()))); return new SlimeJsonResponse(slime); } private HttpResponse tenants(HttpRequest request) { Slime slime = new Slime(); Cursor response = slime.setArray(); for (Tenant tenant : controller.tenants().asList()) tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject()); return new SlimeJsonResponse(slime); } /** Lists the screwdriver project id for each application */ private HttpResponse tenantPipelines() { Slime slime = new Slime(); Cursor response = slime.setObject(); Cursor pipelinesArray = response.setArray("tenantPipelines"); for (Application application : controller.applications().asList()) { if ( ! application.deploymentJobs().projectId().isPresent()) continue; Cursor pipelineObject = pipelinesArray.addObject(); pipelineObject.setString("screwdriverId", String.valueOf(application.deploymentJobs().projectId().getAsLong())); pipelineObject.setString("tenant", application.id().tenant().value()); pipelineObject.setString("application", application.id().application().value()); pipelineObject.setString("instance", application.id().instance().value()); } response.setArray("brokenTenantPipelines"); return new SlimeJsonResponse(slime); } private HttpResponse athenzDomains(HttpRequest request) { Slime slime = new Slime(); Cursor response = slime.setObject(); Cursor array = response.setArray("data"); for (AthenzDomain athenzDomain : controller.getDomainList(request.getProperty("prefix"))) { array.addString(athenzDomain.getName()); } return new SlimeJsonResponse(slime); } private HttpResponse properties() { Slime slime = new Slime(); Cursor response = slime.setObject(); Cursor array = response.setArray("properties"); for (Map.Entry<PropertyId, Property> entry : controller.fetchPropertyList().entrySet()) { Cursor propertyObject = array.addObject(); propertyObject.setString("propertyid", entry.getKey().id()); propertyObject.setString("property", entry.getValue().id()); } return new SlimeJsonResponse(slime); } private HttpResponse tenant(String tenantName, HttpRequest request) { return controller.tenants().tenant(TenantName.from(tenantName)) .map(tenant -> tenant(tenant, request, true)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist")); } private HttpResponse tenant(Tenant tenant, HttpRequest request, boolean listApplications) { Slime slime = new Slime(); toSlime(slime.setObject(), tenant, request, listApplications); return new SlimeJsonResponse(slime); } private HttpResponse applications(String tenantName, HttpRequest request) { TenantName tenant = TenantName.from(tenantName); Slime slime = new Slime(); Cursor array = slime.setArray(); for (Application application : controller.applications().asList(tenant)) toSlime(application, array.addObject(), request); return new SlimeJsonResponse(slime); } private HttpResponse application(String tenantName, String applicationName, HttpRequest request) { Slime slime = new Slime(); toSlime(slime.setObject(), getApplication(tenantName, applicationName), request); return new SlimeJsonResponse(slime); } private HttpResponse setMajorVersion(String tenantName, String applicationName, HttpRequest request) { Application application = getApplication(tenantName, applicationName); Inspector majorVersionField = toSlime(request.getData()).get().field("majorVersion"); if ( ! majorVersionField.valid()) throw new IllegalArgumentException("Request body must contain a majorVersion field"); Integer majorVersion = majorVersionField.asLong() == 0 ? null : (int)majorVersionField.asLong(); controller.applications().lockIfPresent(application.id(), a -> controller.applications().store(a.withMajorVersion(majorVersion))); return new MessageResponse("Set major version to " + ( majorVersion == null ? "empty" : majorVersion)); } private Application getApplication(String tenantName, String applicationName) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, "default"); return controller.applications().get(applicationId) .orElseThrow(() -> new NotExistsException(applicationId + " not found")); } private HttpResponse trigger(ApplicationId id, JobType type, HttpRequest request) { String triggered = controller.applications().deploymentTrigger() .forceTrigger(id, type, request.getJDiscRequest().getUserPrincipal().getName()) .stream().map(JobType::jobName).collect(joining(", ")); return new MessageResponse(triggered.isEmpty() ? "Job " + type.jobName() + " for " + id + " not triggered" : "Triggered " + triggered + " for " + id); } private HttpResponse pause(ApplicationId id, JobType type) { Instant until = controller.clock().instant().plus(DeploymentTrigger.maxPause); controller.applications().deploymentTrigger().pauseJob(id, type, until); return new MessageResponse(type.jobName() + " for " + id + " paused for " + DeploymentTrigger.maxPause); } private HashMap<String, String> getParameters(String query) { HashMap<String, String> keyValPair = new HashMap<>(); Arrays.stream(query.split("&")).forEach(pair -> { String[] splitPair = pair.split("="); keyValPair.put(splitPair[0], splitPair[1]); }); return keyValPair; } private void toSlime(Cursor object, Application application, HttpRequest request) { object.setString("application", application.id().application().value()); object.setString("instance", application.id().instance().value()); object.setString("deployments", withPath("/application/v4" + "/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value() + "/instance/" + application.id().instance().value() + "/job/", request.getUri()).toString()); application.deploymentJobs().statusOf(JobType.component) .flatMap(JobStatus::lastSuccess) .map(run -> run.application().source()) .ifPresent(source -> sourceRevisionToSlime(source, object.setObject("source"))); application.deploymentJobs().projectId() .ifPresent(id -> object.setLong("projectId", id)); if ( ! application.change().isEmpty()) { toSlime(object.setObject("deploying"), application.change()); } if ( ! application.outstandingChange().isEmpty()) { toSlime(object.setObject("outstandingChange"), application.outstandingChange()); } List<JobStatus> jobStatus = controller.applications().deploymentTrigger() .steps(application.deploymentSpec()) .sortedJobs(application.deploymentJobs().jobStatus().values()); object.setBool("deployedInternally", application.deploymentJobs().deployedInternally()); Cursor deploymentsArray = object.setArray("deploymentJobs"); for (JobStatus job : jobStatus) { Cursor jobObject = deploymentsArray.addObject(); jobObject.setString("type", job.type().jobName()); jobObject.setBool("success", job.isSuccess()); job.lastTriggered().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastTriggered"))); job.lastCompleted().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastCompleted"))); job.firstFailing().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("firstFailing"))); job.lastSuccess().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastSuccess"))); } Cursor changeBlockers = object.setArray("changeBlockers"); application.deploymentSpec().changeBlocker().forEach(changeBlocker -> { Cursor changeBlockerObject = changeBlockers.addObject(); changeBlockerObject.setBool("versions", changeBlocker.blocksVersions()); changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions()); changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId()); Cursor days = changeBlockerObject.setArray("days"); changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong); Cursor hours = changeBlockerObject.setArray("hours"); changeBlocker.window().hours().forEach(hours::addLong); }); object.setString("compileVersion", controller.applications().oldestInstalledPlatform(application.id()).toFullString()); application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion)); Cursor globalRotationsArray = object.setArray("globalRotations"); application.globalDnsName(controller.system()).ifPresent(rotation -> { globalRotationsArray.addString(rotation.url().toString()); globalRotationsArray.addString(rotation.secureUrl().toString()); globalRotationsArray.addString(rotation.oathUrl().toString()); object.setString("rotationId", application.rotation().get().asString()); }); Set<RoutingPolicy> routingPolicies = controller.applications().routingPolicies(application.id()); for (RoutingPolicy policy : routingPolicies) { for (RotationName rotation : policy.rotations()) { GlobalDnsName dnsName = new GlobalDnsName(application.id(), controller.system(), rotation); globalRotationsArray.addString(dnsName.oathUrl().toString()); } } List<Deployment> deployments = controller.applications().deploymentTrigger() .steps(application.deploymentSpec()) .sortedDeployments(application.deployments().values()); Cursor instancesArray = object.setArray("instances"); for (Deployment deployment : deployments) { Cursor deploymentObject = instancesArray.addObject(); deploymentObject.setString("environment", deployment.zone().environment().value()); deploymentObject.setString("region", deployment.zone().region().value()); deploymentObject.setString("instance", application.id().instance().value()); if (application.rotation().isPresent() && deployment.zone().environment() == Environment.prod) { toSlime(application.rotationStatus(deployment), deploymentObject); } if (recurseOverDeployments(request)) toSlime(deploymentObject, new DeploymentId(application.id(), deployment.zone()), deployment, request); else deploymentObject.setString("url", withPath(request.getUri().getPath() + "/environment/" + deployment.zone().environment().value() + "/region/" + deployment.zone().region().value() + "/instance/" + application.id().instance().value(), request.getUri()).toString()); } Cursor metricsObject = object.setObject("metrics"); metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality()); metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality()); Cursor activity = object.setObject("activity"); application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value())); application.owner().ifPresent(owner -> object.setString("owner", owner.username())); application.deploymentJobs().issueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value())); } private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); Application application = controller.applications().get(id) .orElseThrow(() -> new NotExistsException(id + " not found")); DeploymentId deploymentId = new DeploymentId(application.id(), ZoneId.from(environment, region)); Deployment deployment = application.deployments().get(deploymentId.zoneId()); if (deployment == null) throw new NotExistsException(application + " is not deployed in " + deploymentId.zoneId()); Slime slime = new Slime(); toSlime(slime.setObject(), deploymentId, deployment, request); return new SlimeJsonResponse(slime); } private void toSlime(Cursor object, Change change) { change.platform().ifPresent(version -> object.setString("version", version.toString())); change.application() .filter(version -> !version.isUnknown()) .ifPresent(version -> toSlime(version, object.setObject("revision"))); } private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) { Cursor serviceUrlArray = response.setArray("serviceUrls"); controller.applications().getDeploymentEndpoints(deploymentId) .ifPresent(endpoints -> endpoints.forEach(endpoint -> serviceUrlArray.addString(endpoint.toString()))); response.setString("nodes", withPath("/zone/v2/" + deploymentId.zoneId().environment() + "/" + deploymentId.zoneId().region() + "/nodes/v2/node/?&recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString()); controller.zoneRegistry().getLogServerUri(deploymentId) .ifPresent(elkUrl -> response.setString("elkUrl", elkUrl.toString())); response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString()); response.setString("version", deployment.version().toFullString()); response.setString("revision", deployment.applicationVersion().id()); response.setLong("deployTimeEpochMs", deployment.at().toEpochMilli()); controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zoneId()) .ifPresent(deploymentTimeToLive -> response.setLong("expiryTimeEpochMs", deployment.at().plus(deploymentTimeToLive).toEpochMilli())); controller.applications().require(deploymentId.applicationId()).deploymentJobs().projectId() .ifPresent(i -> response.setString("screwdriverId", String.valueOf(i))); sourceRevisionToSlime(deployment.applicationVersion().source(), response); Cursor activity = response.setObject("activity"); deployment.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); deployment.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); deployment.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); deployment.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); DeploymentCost appCost = deployment.calculateCost(); Cursor costObject = response.setObject("cost"); toSlime(appCost, costObject); DeploymentMetrics metrics = deployment.metrics(); Cursor metricsObject = response.setObject("metrics"); metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond()); metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond()); metricsObject.setDouble("documentCount", metrics.documentCount()); metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis()); metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis()); metrics.instant().ifPresent(instant -> metricsObject.setLong("lastUpdated", instant.toEpochMilli())); } private void toSlime(ApplicationVersion applicationVersion, Cursor object) { if (!applicationVersion.isUnknown()) { object.setString("hash", applicationVersion.id()); sourceRevisionToSlime(applicationVersion.source(), object.setObject("source")); } } private void sourceRevisionToSlime(Optional<SourceRevision> revision, Cursor object) { if ( ! revision.isPresent()) return; object.setString("gitRepository", revision.get().repository()); object.setString("gitBranch", revision.get().branch()); object.setString("gitCommit", revision.get().commit()); } private void toSlime(RotationStatus status, Cursor object) { Cursor bcpStatus = object.setObject("bcpStatus"); bcpStatus.setString("rotationStatus", status.name().toUpperCase()); } private URI monitoringSystemUri(DeploymentId deploymentId) { return controller.zoneRegistry().getMonitoringSystemUri(deploymentId); } private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) { Application application = controller.applications().require(ApplicationId.from(tenantName, applicationName, instanceName)); ZoneId zone = ZoneId.from(environment, region); Deployment deployment = application.deployments().get(zone); if (deployment == null) { throw new NotExistsException(application + " has no deployment in " + zone); } Inspector requestData = toSlime(request.getData()).get(); String reason = mandatory("reason", requestData).asString(); String agent = getUserPrincipal(request).getIdentity().getFullName(); long timestamp = controller.clock().instant().getEpochSecond(); EndpointStatus.Status status = inService ? EndpointStatus.Status.in : EndpointStatus.Status.out; EndpointStatus endpointStatus = new EndpointStatus(status, reason, agent, timestamp); controller.applications().setGlobalRotationStatus(new DeploymentId(application.id(), deployment.zone()), endpointStatus); return new MessageResponse(String.format("Successfully set %s in %s.%s %s service", application.id().toShortString(), deployment.zone().environment().value(), deployment.zone().region().value(), inService ? "in" : "out of")); } private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), ZoneId.from(environment, region)); Slime slime = new Slime(); Cursor array = slime.setObject().setArray("globalrotationoverride"); Map<RoutingEndpoint, EndpointStatus> status = controller.applications().globalRotationStatus(deploymentId); for (RoutingEndpoint endpoint : status.keySet()) { EndpointStatus currentStatus = status.get(endpoint); array.addString(endpoint.upstreamName()); Cursor statusObject = array.addObject(); statusObject.setString("status", currentStatus.getStatus().name()); statusObject.setString("reason", currentStatus.getReason() == null ? "" : currentStatus.getReason()); statusObject.setString("agent", currentStatus.getAgent() == null ? "" : currentStatus.getAgent()); statusObject.setLong("timestamp", currentStatus.getEpoch()); } return new SlimeJsonResponse(slime); } private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); Application application = controller.applications().require(applicationId); ZoneId zone = ZoneId.from(environment, region); if (!application.rotation().isPresent()) { throw new NotExistsException("global rotation does not exist for " + application); } Deployment deployment = application.deployments().get(zone); if (deployment == null) { throw new NotExistsException(application + " has no deployment in " + zone); } Slime slime = new Slime(); Cursor response = slime.setObject(); toSlime(application.rotationStatus(deployment), response); return new SlimeJsonResponse(slime); } private HttpResponse deploying(String tenant, String application, HttpRequest request) { Application app = controller.applications().require(ApplicationId.from(tenant, application, "default")); Slime slime = new Slime(); Cursor root = slime.setObject(); if (!app.change().isEmpty()) { app.change().platform().ifPresent(version -> root.setString("platform", version.toString())); app.change().application().ifPresent(applicationVersion -> root.setString("application", applicationVersion.id())); root.setBool("pinned", app.change().isPinned()); } return new SlimeJsonResponse(slime); } private HttpResponse suspended(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), ZoneId.from(environment, region)); boolean suspended = controller.applications().isSuspended(deploymentId); Slime slime = new Slime(); Cursor response = slime.setObject(); response.setBool("suspended", suspended); return new SlimeJsonResponse(slime); } private HttpResponse services(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationView applicationView = controller.getApplicationView(tenantName, applicationName, instanceName, environment, region); ServiceApiResponse response = new ServiceApiResponse(ZoneId.from(environment, region), new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(), controller.zoneRegistry().getConfigServerApiUris(ZoneId.from(environment, region)), request.getUri()); response.setResponse(applicationView); return response; } private HttpResponse service(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String restPath, HttpRequest request) { Map<?,?> result = controller.getServiceApiResponse(tenantName, applicationName, instanceName, environment, region, serviceName, restPath); ServiceApiResponse response = new ServiceApiResponse(ZoneId.from(environment, region), new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(), controller.zoneRegistry().getConfigServerApiUris(ZoneId.from(environment, region)), request.getUri()); response.setResponse(result, serviceName, restPath); return response; } private HttpResponse createUser(HttpRequest request) { Optional<UserId> user = getUserId(request); if ( ! user.isPresent() ) throw new ForbiddenException("Not authenticated or not an user."); String username = UserTenant.normalizeUser(user.get().id()); try { controller.tenants().create(UserTenant.create(username)); return new MessageResponse("Created user '" + username + "'"); } catch (AlreadyExistsException e) { return new MessageResponse("User '" + username + "' already exists"); } } private HttpResponse updateTenant(String tenantName, HttpRequest request) { Optional<AthenzTenant> tenant = controller.tenants().athenzTenant(TenantName.from(tenantName)); if ( ! tenant.isPresent()) return ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"); Inspector requestData = toSlime(request.getData()).get(); OktaAccessToken token = requireOktaAccessToken(request, "Could not update " + tenantName); controller.tenants().lockOrThrow(tenant.get().name(), lockedTenant -> { lockedTenant = lockedTenant.with(new Property(mandatory("property", requestData).asString())); lockedTenant = controller.tenants().withDomain( lockedTenant, new AthenzDomain(mandatory("athensDomain", requestData).asString()), token ); Optional<PropertyId> propertyId = optional("propertyId", requestData).map(PropertyId::new); if (propertyId.isPresent()) { lockedTenant = lockedTenant.with(propertyId.get()); } controller.tenants().store(lockedTenant); }); return tenant(controller.tenants().requireAthenzTenant(tenant.get().name()), request, true); } private HttpResponse createTenant(String tenantName, HttpRequest request) { Inspector requestData = toSlime(request.getData()).get(); AthenzTenant tenant = AthenzTenant.create(TenantName.from(tenantName), new AthenzDomain(mandatory("athensDomain", requestData).asString()), new Property(mandatory("property", requestData).asString()), optional("propertyId", requestData).map(PropertyId::new)); throwIfNotAthenzDomainAdmin(tenant.domain(), request); controller.tenants().create(tenant, requireOktaAccessToken(request, "Could not create " + tenantName)); return tenant(tenant, request, true); } private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) { Application application; try { application = controller.applications().createApplication(ApplicationId.from(tenantName, applicationName, "default"), getOktaAccessToken(request)); } catch (ZmsClientException e) { if (e.getErrorCode() == com.yahoo.jdisc.Response.Status.FORBIDDEN) throw new ForbiddenException("Not authorized to create application", e); else throw e; } Slime slime = new Slime(); toSlime(application, slime.setObject(), request); return new SlimeJsonResponse(slime); } /** Trigger deployment of the given Vespa version if a valid one is given, e.g., "7.8.9". */ private HttpResponse deployPlatform(String tenantName, String applicationName, String versionString, boolean pin) { ApplicationId id = ApplicationId.from(tenantName, applicationName, "default"); StringBuilder response = new StringBuilder(); controller.applications().lockOrThrow(id, application -> { Version version = Version.fromString(versionString); if (version.equals(Version.emptyVersion)) version = controller.systemVersion(); if ( ! systemHasVersion(version)) throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " + "Version is not active in this system. " + "Active versions: " + controller.versionStatus().versions() .stream() .map(VespaVersion::versionNumber) .map(Version::toString) .collect(joining(", "))); Change change = Change.of(version); if (pin) change = change.withPin(); controller.applications().deploymentTrigger().forceChange(id, change); response.append("Triggered " + change + " for " + id); }); return new MessageResponse(response.toString()); } /** Trigger deployment to the last known application package for the given application. */ private HttpResponse deployApplication(String tenantName, String applicationName) { ApplicationId id = ApplicationId.from(tenantName, applicationName, "default"); StringBuilder response = new StringBuilder(); controller.applications().lockOrThrow(id, application -> { Change change = Change.of(application.get().deploymentJobs().statusOf(JobType.component).get().lastSuccess().get().application()); controller.applications().deploymentTrigger().forceChange(id, change); response.append("Triggered " + change + " for " + id); }); return new MessageResponse(response.toString()); } /** Cancel ongoing change for given application, e.g., everything with {"cancel":"all"} */ private HttpResponse cancelDeploy(String tenantName, String applicationName, String choice) { ApplicationId id = ApplicationId.from(tenantName, applicationName, "default"); StringBuilder response = new StringBuilder(); controller.applications().lockOrThrow(id, application -> { Change change = application.get().change(); if (change.isEmpty()) { response.append("No deployment in progress for " + application + " at this time"); return; } ChangesToCancel cancel = ChangesToCancel.valueOf(choice.toUpperCase()); controller.applications().deploymentTrigger().cancelChange(id, cancel); response.append("Changed deployment from '" + change + "' to '" + controller.applications().require(id).change() + "' for " + application); }); return new MessageResponse(response.toString()); } /** Schedule restart of deployment, or specific host in a deployment */ private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), ZoneId.from(environment, region)); Optional<Hostname> hostname = Optional.ofNullable(request.getProperty("hostname")).map(Hostname::new); controller.applications().restart(deploymentId, hostname); return new StringResponse("Requested restart of " + path(TenantResource.API_PATH, tenantName, ApplicationResource.API_PATH, applicationName, EnvironmentResource.API_PATH, environment, "region", region, "instance", instanceName)); } private HttpResponse deploy(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = ZoneId.from(environment, region); Map<String, byte[]> dataParts = new MultipartParser().parse(request); if ( ! dataParts.containsKey("deployOptions")) return ErrorResponse.badRequest("Missing required form part 'deployOptions'"); Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get(); /* * Special handling of the zone application (the only system application with an application package) * Setting any other deployOptions here is not supported for now (e.g. specifying version), but * this might be handy later to handle emergency downgrades. */ boolean isZoneApplication = SystemApplication.zone.id().equals(applicationId); if (isZoneApplication) { String versionStr = deployOptions.field("vespaVersion").asString(); boolean versionPresent = !versionStr.isEmpty() && !versionStr.equals("null"); if (versionPresent) { throw new RuntimeException("Version not supported for system applications"); } if (controller.versionStatus().isUpgrading()) { throw new IllegalArgumentException("Deployment of system applications during a system upgrade is not allowed"); } Optional<VespaVersion> systemVersion = controller.versionStatus().systemVersion(); if (systemVersion.isEmpty()) { throw new IllegalArgumentException("Deployment of system applications is not permitted until system version is determined"); } ActivateResult result = controller.applications() .deploySystemApplicationPackage(SystemApplication.zone, zone, systemVersion.get().versionNumber()); return new SlimeJsonResponse(toSlime(result)); } /* * Normal applications from here */ Optional<ApplicationPackage> applicationPackage = Optional.ofNullable(dataParts.get("applicationZip")) .map(ApplicationPackage::new); Inspector sourceRevision = deployOptions.field("sourceRevision"); Inspector buildNumber = deployOptions.field("buildNumber"); if (sourceRevision.valid() != buildNumber.valid()) throw new IllegalArgumentException("Source revision and build number must both be provided, or not"); Optional<ApplicationVersion> applicationVersion = Optional.empty(); if (sourceRevision.valid()) { if (applicationPackage.isPresent()) throw new IllegalArgumentException("Application version and application package can't both be provided."); applicationVersion = Optional.of(ApplicationVersion.from(toSourceRevision(sourceRevision), buildNumber.asLong())); applicationPackage = Optional.of(controller.applications().getApplicationPackage(controller.applications().require(applicationId), applicationVersion.get())); } boolean deployDirectly = deployOptions.field("deployDirectly").asBool(); Optional<Version> vespaVersion = optional("vespaVersion", deployOptions).map(Version::new); /* * Deploy direct is when we want to redeploy the current application - retrieve version * info from the application package before deploying */ if(deployDirectly && !applicationPackage.isPresent() && !applicationVersion.isPresent() && !vespaVersion.isPresent()) { Optional<Deployment> deployment = controller.applications().get(applicationId) .map(Application::deployments) .flatMap(deployments -> Optional.ofNullable(deployments.get(zone))); if(!deployment.isPresent()) throw new IllegalArgumentException("Can't redeploy application, no deployment currently exist"); ApplicationVersion version = deployment.get().applicationVersion(); if(version.isUnknown()) throw new IllegalArgumentException("Can't redeploy application, application version is unknown"); applicationVersion = Optional.of(version); vespaVersion = Optional.of(deployment.get().version()); applicationPackage = Optional.of(controller.applications().getApplicationPackage(controller.applications().require(applicationId), applicationVersion.get())); } DeployOptions deployOptionsJsonClass = new DeployOptions(deployDirectly, vespaVersion, deployOptions.field("ignoreValidationErrors").asBool(), deployOptions.field("deployCurrentVersion").asBool()); ActivateResult result = controller.applications().deploy(applicationId, zone, applicationPackage, applicationVersion, deployOptionsJsonClass, Optional.of(getUserPrincipal(request).getIdentity())); return new SlimeJsonResponse(toSlime(result)); } private HttpResponse deleteTenant(String tenantName, HttpRequest request) { Optional<Tenant> tenant = controller.tenants().tenant(tenantName); if ( ! tenant.isPresent()) return ErrorResponse.notFoundError("Could not delete tenant '" + tenantName + "': Tenant not found"); if (tenant.get() instanceof AthenzTenant) { controller.tenants().deleteTenant((AthenzTenant) tenant.get(), requireOktaAccessToken(request, "Could not delete " + tenantName)); } else if (tenant.get() instanceof UserTenant) { controller.tenants().deleteTenant((UserTenant) tenant.get()); } else { throw new IllegalArgumentException("Unknown tenant type:" + tenant.get().getClass().getSimpleName() + ", for " + tenant.get()); } return tenant(tenant.get(), request, false); } private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, "default"); controller.applications().deleteApplication(id, getOktaAccessToken(request)); return new EmptyJsonResponse(); } private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { Application application = controller.applications().require(ApplicationId.from(tenantName, applicationName, instanceName)); controller.applications().deactivate(application.id(), ZoneId.from(environment, region)); return new StringResponse("Deactivated " + path(TenantResource.API_PATH, tenantName, ApplicationResource.API_PATH, applicationName, EnvironmentResource.API_PATH, environment, "region", region, "instance", instanceName)); } /** * Promote application Chef environments. To be used by component jobs only */ private HttpResponse promoteApplication(String tenantName, String applicationName, HttpRequest request) { try{ ApplicationChefEnvironment chefEnvironment = new ApplicationChefEnvironment(controller.system()); String sourceEnvironment = chefEnvironment.systemChefEnvironment(); String targetEnvironment = chefEnvironment.applicationSourceEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName)); controller.chefClient().copyChefEnvironment(sourceEnvironment, targetEnvironment); return new MessageResponse(String.format("Successfully copied environment %s to %s", sourceEnvironment, targetEnvironment)); } catch (Exception e) { log.log(LogLevel.ERROR, String.format("Error during Chef copy environment. (%s.%s)", tenantName, applicationName), e); return ErrorResponse.internalServerError("Unable to promote Chef environments for application"); } } /** * Promote application Chef environments for jobs that deploy applications */ private HttpResponse promoteApplicationDeployment(String tenantName, String applicationName, String environmentName, String regionName, String instanceName, HttpRequest request) { try { ApplicationChefEnvironment chefEnvironment = new ApplicationChefEnvironment(controller.system()); String sourceEnvironment = chefEnvironment.applicationSourceEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName)); String targetEnvironment = chefEnvironment.applicationTargetEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName), Environment.from(environmentName), RegionName.from(regionName)); controller.chefClient().copyChefEnvironment(sourceEnvironment, targetEnvironment); return new MessageResponse(String.format("Successfully copied environment %s to %s", sourceEnvironment, targetEnvironment)); } catch (Exception e) { log.log(LogLevel.ERROR, String.format("Error during Chef copy environment. (%s.%s %s.%s)", tenantName, applicationName, environmentName, regionName), e); return ErrorResponse.internalServerError("Unable to promote Chef environments for application"); } } private HttpResponse notifyJobCompletion(String tenant, String application, HttpRequest request) { try { DeploymentJobs.JobReport report = toJobReport(tenant, application, toSlime(request.getData()).get()); if ( report.jobType() == JobType.component && controller.applications().require(report.applicationId()).deploymentJobs().deployedInternally()) throw new IllegalArgumentException(report.applicationId() + " is set up to be deployed from internally, and no " + "longer accepts submissions from Screwdriver v3 jobs. If you need to revert " + "to the old pipeline, please file a ticket at yo/vespa-support and request this."); controller.applications().deploymentTrigger().notifyOfCompletion(report); return new MessageResponse("ok"); } catch (IllegalStateException e) { return ErrorResponse.badRequest(Exceptions.toMessageString(e)); } } private static DeploymentJobs.JobReport toJobReport(String tenantName, String applicationName, Inspector report) { Optional<DeploymentJobs.JobError> jobError = Optional.empty(); if (report.field("jobError").valid()) { jobError = Optional.of(DeploymentJobs.JobError.valueOf(report.field("jobError").asString())); } ApplicationId id = ApplicationId.from(tenantName, applicationName, report.field("instance").asString()); JobType type = JobType.fromJobName(report.field("jobName").asString()); long buildNumber = report.field("buildNumber").asLong(); if (type == JobType.component) return DeploymentJobs.JobReport.ofComponent(id, report.field("projectId").asLong(), buildNumber, jobError, toSourceRevision(report.field("sourceRevision"))); else return DeploymentJobs.JobReport.ofJob(id, type, buildNumber, jobError); } private static SourceRevision toSourceRevision(Inspector object) { if (!object.field("repository").valid() || !object.field("branch").valid() || !object.field("commit").valid()) { throw new IllegalArgumentException("Must specify \"repository\", \"branch\", and \"commit\"."); } return new SourceRevision(object.field("repository").asString(), object.field("branch").asString(), object.field("commit").asString()); } private Tenant getTenantOrThrow(String tenantName) { return controller.tenants().tenant(tenantName) .orElseThrow(() -> new NotExistsException(new TenantId(tenantName))); } private void toSlime(Cursor object, Tenant tenant, HttpRequest request, boolean listApplications) { object.setString("tenant", tenant.name().value()); object.setString("type", tentantType(tenant)); if (tenant instanceof AthenzTenant) { AthenzTenant athenzTenant = (AthenzTenant) tenant; object.setString("athensDomain", athenzTenant.domain().getName()); object.setString("property", athenzTenant.property().id()); athenzTenant.propertyId().ifPresent(id -> object.setString("propertyId", id.toString())); } Cursor applicationArray = object.setArray("applications"); if (listApplications) { for (Application application : controller.applications().asList(tenant.name())) { if (application.id().instance().isDefault()) { if (recurseOverApplications(request)) toSlime(applicationArray.addObject(), application, request); else toSlime(application, applicationArray.addObject(), request); } } } if (tenant instanceof AthenzTenant) { AthenzTenant athenzTenant = (AthenzTenant) tenant; athenzTenant.contact().ifPresent(c -> { object.setString("propertyUrl", c.propertyUrl().toString()); object.setString("contactsUrl", c.url().toString()); object.setString("issueCreationUrl", c.issueTrackerUrl().toString()); Cursor contactsArray = object.setArray("contacts"); c.persons().forEach(persons -> { Cursor personArray = contactsArray.addArray(); persons.forEach(personArray::addString); }); }); } } private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) { object.setString("tenant", tenant.name().value()); Cursor metaData = object.setObject("metaData"); metaData.setString("type", tentantType(tenant)); if (tenant instanceof AthenzTenant) { AthenzTenant athenzTenant = (AthenzTenant) tenant; metaData.setString("athensDomain", athenzTenant.domain().getName()); metaData.setString("property", athenzTenant.property().id()); } object.setString("url", withPath("/application/v4/tenant/" + tenant.name().value(), requestURI).toString()); } /** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */ private URI withPath(String newPath, URI uri) { try { return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, null, null); } catch (URISyntaxException e) { throw new RuntimeException("Will not happen", e); } } private long asLong(String valueOrNull, long defaultWhenNull) { if (valueOrNull == null) return defaultWhenNull; try { return Long.parseLong(valueOrNull); } catch (NumberFormatException e) { throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'"); } } private void toSlime(JobStatus.JobRun jobRun, Cursor object) { object.setLong("id", jobRun.id()); object.setString("version", jobRun.platform().toFullString()); if (!jobRun.application().isUnknown()) toSlime(jobRun.application(), object.setObject("revision")); object.setString("reason", jobRun.reason()); object.setLong("at", jobRun.at().toEpochMilli()); } private Slime toSlime(InputStream jsonStream) { try { byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000); return SlimeUtils.jsonToSlime(jsonBytes); } catch (IOException e) { throw new RuntimeException(); } } private void throwIfNotAthenzDomainAdmin(AthenzDomain tenantDomain, HttpRequest request) { AthenzIdentity identity = getUserPrincipal(request).getIdentity(); boolean isDomainAdmin = zmsClient.isDomainAdmin(identity, tenantDomain); if ( ! isDomainAdmin) { throw new ForbiddenException( String.format("The user '%s' is not admin in Athenz domain '%s'", identity.getFullName(), tenantDomain.getName())); } } private static Optional<UserId> getUserId(HttpRequest request) { return Optional.of(getUserPrincipal(request)) .map(AthenzPrincipal::getIdentity) .filter(AthenzUser.class::isInstance) .map(AthenzUser.class::cast) .map(AthenzUser::getName) .map(UserId::new); } private static AthenzPrincipal getUserPrincipal(HttpRequest request) { Principal principal = request.getJDiscRequest().getUserPrincipal(); if (principal == null) throw new InternalServerErrorException("Expected a user principal"); if (!(principal instanceof AthenzPrincipal)) throw new InternalServerErrorException( String.format("Expected principal of type %s, got %s", AthenzPrincipal.class.getSimpleName(), principal.getClass().getName())); return (AthenzPrincipal) principal; } private Inspector mandatory(String key, Inspector object) { if ( ! object.field(key).valid()) throw new IllegalArgumentException("'" + key + "' is missing"); return object.field(key); } private Optional<String> optional(String key, Inspector object) { return SlimeUtils.optionalString(object.field(key)); } private static String path(Object... elements) { return Joiner.on("/").join(elements); } private void toSlime(Application application, Cursor object, HttpRequest request) { object.setString("application", application.id().application().value()); object.setString("instance", application.id().instance().value()); object.setString("url", withPath("/application/v4/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value(), request.getUri()).toString()); } private Slime toSlime(ActivateResult result) { Slime slime = new Slime(); Cursor object = slime.setObject(); object.setString("revisionId", result.revisionId().id()); object.setLong("applicationZipSize", result.applicationZipSizeBytes()); Cursor logArray = object.setArray("prepareMessages"); if (result.prepareResponse().log != null) { for (Log logMessage : result.prepareResponse().log) { Cursor logObject = logArray.addObject(); logObject.setLong("time", logMessage.time); logObject.setString("level", logMessage.level); logObject.setString("message", logMessage.message); } } Cursor changeObject = object.setObject("configChangeActions"); Cursor restartActionsArray = changeObject.setArray("restart"); for (RestartAction restartAction : result.prepareResponse().configChangeActions.restartActions) { Cursor restartActionObject = restartActionsArray.addObject(); restartActionObject.setString("clusterName", restartAction.clusterName); restartActionObject.setString("clusterType", restartAction.clusterType); restartActionObject.setString("serviceType", restartAction.serviceType); serviceInfosToSlime(restartAction.services, restartActionObject.setArray("services")); stringsToSlime(restartAction.messages, restartActionObject.setArray("messages")); } Cursor refeedActionsArray = changeObject.setArray("refeed"); for (RefeedAction refeedAction : result.prepareResponse().configChangeActions.refeedActions) { Cursor refeedActionObject = refeedActionsArray.addObject(); refeedActionObject.setString("name", refeedAction.name); refeedActionObject.setBool("allowed", refeedAction.allowed); refeedActionObject.setString("documentType", refeedAction.documentType); refeedActionObject.setString("clusterName", refeedAction.clusterName); serviceInfosToSlime(refeedAction.services, refeedActionObject.setArray("services")); stringsToSlime(refeedAction.messages, refeedActionObject.setArray("messages")); } return slime; } private void serviceInfosToSlime(List<ServiceInfo> serviceInfoList, Cursor array) { for (ServiceInfo serviceInfo : serviceInfoList) { Cursor serviceInfoObject = array.addObject(); serviceInfoObject.setString("serviceName", serviceInfo.serviceName); serviceInfoObject.setString("serviceType", serviceInfo.serviceType); serviceInfoObject.setString("configId", serviceInfo.configId); serviceInfoObject.setString("hostName", serviceInfo.hostName); } } private void stringsToSlime(List<String> strings, Cursor array) { for (String string : strings) array.addString(string); } private String readToString(InputStream stream) { Scanner scanner = new Scanner(stream).useDelimiter("\\A"); if ( ! scanner.hasNext()) return null; return scanner.next(); } private boolean systemHasVersion(Version version) { return controller.versionStatus().versions().stream().anyMatch(v -> v.versionNumber().equals(version)); } public static void toSlime(DeploymentCost deploymentCost, Cursor object) { object.setLong("tco", (long)deploymentCost.getTco()); object.setLong("waste", (long)deploymentCost.getWaste()); object.setDouble("utilization", deploymentCost.getUtilization()); Cursor clustersObject = object.setObject("cluster"); for (Map.Entry<String, ClusterCost> clusterEntry : deploymentCost.getCluster().entrySet()) toSlime(clusterEntry.getValue(), clustersObject.setObject(clusterEntry.getKey())); } private static void toSlime(ClusterCost clusterCost, Cursor object) { object.setLong("count", clusterCost.getClusterInfo().getHostnames().size()); object.setString("resource", getResourceName(clusterCost.getResultUtilization())); object.setDouble("utilization", clusterCost.getResultUtilization().getMaxUtilization()); object.setLong("tco", (int)clusterCost.getTco()); object.setLong("waste", (int)clusterCost.getWaste()); object.setString("flavor", clusterCost.getClusterInfo().getFlavor()); object.setDouble("flavorCost", clusterCost.getClusterInfo().getFlavorCost()); object.setDouble("flavorCpu", clusterCost.getClusterInfo().getFlavorCPU()); object.setDouble("flavorMem", clusterCost.getClusterInfo().getFlavorMem()); object.setDouble("flavorDisk", clusterCost.getClusterInfo().getFlavorDisk()); object.setString("type", clusterCost.getClusterInfo().getClusterType().name()); Cursor utilObject = object.setObject("util"); utilObject.setDouble("cpu", clusterCost.getResultUtilization().getCpu()); utilObject.setDouble("mem", clusterCost.getResultUtilization().getMemory()); utilObject.setDouble("disk", clusterCost.getResultUtilization().getDisk()); utilObject.setDouble("diskBusy", clusterCost.getResultUtilization().getDiskBusy()); Cursor usageObject = object.setObject("usage"); usageObject.setDouble("cpu", clusterCost.getSystemUtilization().getCpu()); usageObject.setDouble("mem", clusterCost.getSystemUtilization().getMemory()); usageObject.setDouble("disk", clusterCost.getSystemUtilization().getDisk()); usageObject.setDouble("diskBusy", clusterCost.getSystemUtilization().getDiskBusy()); Cursor hostnamesArray = object.setArray("hostnames"); for (String hostname : clusterCost.getClusterInfo().getHostnames()) hostnamesArray.addString(hostname); } private static String getResourceName(ClusterUtilization utilization) { String name = "cpu"; double max = utilization.getMaxUtilization(); if (utilization.getMemory() == max) { name = "mem"; } else if (utilization.getDisk() == max) { name = "disk"; } else if (utilization.getDiskBusy() == max) { name = "diskbusy"; } return name; } private static boolean recurseOverTenants(HttpRequest request) { return recurseOverApplications(request) || "tenant".equals(request.getProperty("recursive")); } private static boolean recurseOverApplications(HttpRequest request) { return recurseOverDeployments(request) || "application".equals(request.getProperty("recursive")); } private static boolean recurseOverDeployments(HttpRequest request) { return ImmutableSet.of("all", "true", "deployment").contains(request.getProperty("recursive")); } private static String tentantType(Tenant tenant) { if (tenant instanceof AthenzTenant) { return "ATHENS"; } else if (tenant instanceof UserTenant) { return "USER"; } throw new IllegalArgumentException("Unknown tenant type: " + tenant.getClass().getSimpleName()); } private static OktaAccessToken requireOktaAccessToken(HttpRequest request, String message) { return getOktaAccessToken(request) .orElseThrow(() -> new IllegalArgumentException(message + ": No Okta Access Token provided")); } private static Optional<OktaAccessToken> getOktaAccessToken(HttpRequest request) { return Optional.ofNullable(request.getJDiscRequest().context().get("okta.access-token")) .map(attribute -> new OktaAccessToken((String) attribute)); } private static ApplicationId appIdFromPath(Path path) { return ApplicationId.from(path.get("tenant"), path.get("application"), path.get("instance")); } private static JobType jobTypeFromPath(Path path) { return JobType.fromJobName(path.get("jobtype")); } private static RunId runIdFromPath(Path path) { long number = Long.parseLong(path.get("number")); return new RunId(appIdFromPath(path), jobTypeFromPath(path), number); } private HttpResponse submit(String tenant, String application, HttpRequest request) { Map<String, byte[]> dataParts = new MultipartParser().parse(request); Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get(); SourceRevision sourceRevision = toSourceRevision(submitOptions); String authorEmail = submitOptions.field("authorEmail").asString(); long projectId = Math.max(1, submitOptions.field("projectId").asLong()); ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP)); controller.applications().verifyApplicationIdentityConfiguration(TenantName.from(tenant), applicationPackage, Optional.of(getUserPrincipal(request).getIdentity())); return JobControllerApiHandlerHelper.submitResponse(controller.jobController(), tenant, application, sourceRevision, authorEmail, projectId, applicationPackage, dataParts.get(EnvironmentResource.APPLICATION_TEST_ZIP)); } }
class ApplicationApiHandler extends LoggingRequestHandler { private final Controller controller; private final ZmsClientFacade zmsClient; @Inject public ApplicationApiHandler(LoggingRequestHandler.Context parentCtx, Controller controller, AthenzClientFactory athenzClientFactory) { super(parentCtx); this.controller = controller; this.zmsClient = new ZmsClientFacade(athenzClientFactory.createZmsClient(), athenzClientFactory.getControllerIdentity()); } @Override public Duration getTimeout() { return Duration.ofMinutes(20); } @Override public HttpResponse handle(HttpRequest request) { try { switch (request.getMethod()) { case GET: return handleGET(request); case PUT: return handlePUT(request); case POST: return handlePOST(request); case PATCH: return handlePATCH(request); case DELETE: return handleDELETE(request); case OPTIONS: return handleOPTIONS(); default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported"); } } catch (ForbiddenException e) { return ErrorResponse.forbidden(Exceptions.toMessageString(e)); } catch (NotAuthorizedException e) { return ErrorResponse.unauthorized(Exceptions.toMessageString(e)); } catch (NotExistsException e) { return ErrorResponse.notFoundError(Exceptions.toMessageString(e)); } catch (IllegalArgumentException e) { return ErrorResponse.badRequest(Exceptions.toMessageString(e)); } catch (ConfigServerException e) { return ErrorResponse.from(e); } catch (RuntimeException e) { log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e); return ErrorResponse.internalServerError(Exceptions.toMessageString(e)); } } private HttpResponse handleGET(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/")) return root(request); if (path.matches("/application/v4/user")) return authenticatedUser(request); if (path.matches("/application/v4/tenant")) return tenants(request); if (path.matches("/application/v4/tenant-pipeline")) return tenantPipelines(); if (path.matches("/application/v4/athensDomain")) return athenzDomains(request); if (path.matches("/application/v4/property")) return properties(); if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploying(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePUT(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/user")) return createUser(request); if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePOST(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/promote")) return promoteApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), readToString(request.getData()), false); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), readToString(request.getData()), true); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/jobreport")) return notifyJobCompletion(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return submit(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return trigger(appIdFromPath(path), jobTypeFromPath(path), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return pause(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/promote")) return promoteApplicationDeployment(path.get("tenant"), path.get("application"), path.get("environment"), path.get("region"), path.get("instance"), request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePATCH(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return setMajorVersion(path.get("tenant"), path.get("application"), request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleDELETE(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), "all"); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("choice")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return JobControllerApiHandlerHelper.unregisterResponse(controller.jobController(), path.get("tenant"), path.get("application")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.abortJobResponse(controller.jobController(), appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleOPTIONS() { EmptyJsonResponse response = new EmptyJsonResponse(); response.headers().put("Allow", "GET,PUT,POST,PATCH,DELETE,OPTIONS"); return response; } private HttpResponse recursiveRoot(HttpRequest request) { Slime slime = new Slime(); Cursor tenantArray = slime.setArray(); for (Tenant tenant : controller.tenants().asList()) toSlime(tenantArray.addObject(), tenant, request, true); return new SlimeJsonResponse(slime); } private HttpResponse root(HttpRequest request) { return recurseOverTenants(request) ? recursiveRoot(request) : new ResourceResponse(request, "user", "tenant", "tenant-pipeline", "athensDomain", "property"); } private HttpResponse authenticatedUser(HttpRequest request) { String userIdString = request.getProperty("userOverride"); if (userIdString == null) userIdString = getUserId(request) .map(UserId::id) .orElseThrow(() -> new ForbiddenException("You must be authenticated or specify userOverride")); UserId userId = new UserId(userIdString); List<Tenant> tenants = controller.tenants().asList(userId); Slime slime = new Slime(); Cursor response = slime.setObject(); response.setString("user", userId.id()); Cursor tenantsArray = response.setArray("tenants"); for (Tenant tenant : tenants) tenantInTenantsListToSlime(tenant, request.getUri(), tenantsArray.addObject()); response.setBool("tenantExists", tenants.stream().anyMatch(tenant -> tenant instanceof UserTenant && ((UserTenant) tenant).is(userId.id()))); return new SlimeJsonResponse(slime); } private HttpResponse tenants(HttpRequest request) { Slime slime = new Slime(); Cursor response = slime.setArray(); for (Tenant tenant : controller.tenants().asList()) tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject()); return new SlimeJsonResponse(slime); } /** Lists the screwdriver project id for each application */ private HttpResponse tenantPipelines() { Slime slime = new Slime(); Cursor response = slime.setObject(); Cursor pipelinesArray = response.setArray("tenantPipelines"); for (Application application : controller.applications().asList()) { if ( ! application.deploymentJobs().projectId().isPresent()) continue; Cursor pipelineObject = pipelinesArray.addObject(); pipelineObject.setString("screwdriverId", String.valueOf(application.deploymentJobs().projectId().getAsLong())); pipelineObject.setString("tenant", application.id().tenant().value()); pipelineObject.setString("application", application.id().application().value()); pipelineObject.setString("instance", application.id().instance().value()); } response.setArray("brokenTenantPipelines"); return new SlimeJsonResponse(slime); } private HttpResponse athenzDomains(HttpRequest request) { Slime slime = new Slime(); Cursor response = slime.setObject(); Cursor array = response.setArray("data"); for (AthenzDomain athenzDomain : controller.getDomainList(request.getProperty("prefix"))) { array.addString(athenzDomain.getName()); } return new SlimeJsonResponse(slime); } private HttpResponse properties() { Slime slime = new Slime(); Cursor response = slime.setObject(); Cursor array = response.setArray("properties"); for (Map.Entry<PropertyId, Property> entry : controller.fetchPropertyList().entrySet()) { Cursor propertyObject = array.addObject(); propertyObject.setString("propertyid", entry.getKey().id()); propertyObject.setString("property", entry.getValue().id()); } return new SlimeJsonResponse(slime); } private HttpResponse tenant(String tenantName, HttpRequest request) { return controller.tenants().tenant(TenantName.from(tenantName)) .map(tenant -> tenant(tenant, request, true)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist")); } private HttpResponse tenant(Tenant tenant, HttpRequest request, boolean listApplications) { Slime slime = new Slime(); toSlime(slime.setObject(), tenant, request, listApplications); return new SlimeJsonResponse(slime); } private HttpResponse applications(String tenantName, HttpRequest request) { TenantName tenant = TenantName.from(tenantName); Slime slime = new Slime(); Cursor array = slime.setArray(); for (Application application : controller.applications().asList(tenant)) toSlime(application, array.addObject(), request); return new SlimeJsonResponse(slime); } private HttpResponse application(String tenantName, String applicationName, HttpRequest request) { Slime slime = new Slime(); toSlime(slime.setObject(), getApplication(tenantName, applicationName), request); return new SlimeJsonResponse(slime); } private HttpResponse setMajorVersion(String tenantName, String applicationName, HttpRequest request) { Application application = getApplication(tenantName, applicationName); Inspector majorVersionField = toSlime(request.getData()).get().field("majorVersion"); if ( ! majorVersionField.valid()) throw new IllegalArgumentException("Request body must contain a majorVersion field"); Integer majorVersion = majorVersionField.asLong() == 0 ? null : (int)majorVersionField.asLong(); controller.applications().lockIfPresent(application.id(), a -> controller.applications().store(a.withMajorVersion(majorVersion))); return new MessageResponse("Set major version to " + ( majorVersion == null ? "empty" : majorVersion)); } private Application getApplication(String tenantName, String applicationName) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, "default"); return controller.applications().get(applicationId) .orElseThrow(() -> new NotExistsException(applicationId + " not found")); } private HttpResponse trigger(ApplicationId id, JobType type, HttpRequest request) { String triggered = controller.applications().deploymentTrigger() .forceTrigger(id, type, request.getJDiscRequest().getUserPrincipal().getName()) .stream().map(JobType::jobName).collect(joining(", ")); return new MessageResponse(triggered.isEmpty() ? "Job " + type.jobName() + " for " + id + " not triggered" : "Triggered " + triggered + " for " + id); } private HttpResponse pause(ApplicationId id, JobType type) { Instant until = controller.clock().instant().plus(DeploymentTrigger.maxPause); controller.applications().deploymentTrigger().pauseJob(id, type, until); return new MessageResponse(type.jobName() + " for " + id + " paused for " + DeploymentTrigger.maxPause); } private void toSlime(Cursor object, Application application, HttpRequest request) { object.setString("application", application.id().application().value()); object.setString("instance", application.id().instance().value()); object.setString("deployments", withPath("/application/v4" + "/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value() + "/instance/" + application.id().instance().value() + "/job/", request.getUri()).toString()); application.deploymentJobs().statusOf(JobType.component) .flatMap(JobStatus::lastSuccess) .map(run -> run.application().source()) .ifPresent(source -> sourceRevisionToSlime(source, object.setObject("source"))); application.deploymentJobs().projectId() .ifPresent(id -> object.setLong("projectId", id)); if ( ! application.change().isEmpty()) { toSlime(object.setObject("deploying"), application.change()); } if ( ! application.outstandingChange().isEmpty()) { toSlime(object.setObject("outstandingChange"), application.outstandingChange()); } List<JobStatus> jobStatus = controller.applications().deploymentTrigger() .steps(application.deploymentSpec()) .sortedJobs(application.deploymentJobs().jobStatus().values()); object.setBool("deployedInternally", application.deploymentJobs().deployedInternally()); Cursor deploymentsArray = object.setArray("deploymentJobs"); for (JobStatus job : jobStatus) { Cursor jobObject = deploymentsArray.addObject(); jobObject.setString("type", job.type().jobName()); jobObject.setBool("success", job.isSuccess()); job.lastTriggered().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastTriggered"))); job.lastCompleted().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastCompleted"))); job.firstFailing().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("firstFailing"))); job.lastSuccess().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastSuccess"))); } Cursor changeBlockers = object.setArray("changeBlockers"); application.deploymentSpec().changeBlocker().forEach(changeBlocker -> { Cursor changeBlockerObject = changeBlockers.addObject(); changeBlockerObject.setBool("versions", changeBlocker.blocksVersions()); changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions()); changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId()); Cursor days = changeBlockerObject.setArray("days"); changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong); Cursor hours = changeBlockerObject.setArray("hours"); changeBlocker.window().hours().forEach(hours::addLong); }); object.setString("compileVersion", controller.applications().oldestInstalledPlatform(application.id()).toFullString()); application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion)); Cursor globalRotationsArray = object.setArray("globalRotations"); application.globalDnsName(controller.system()).ifPresent(rotation -> { globalRotationsArray.addString(rotation.url().toString()); globalRotationsArray.addString(rotation.secureUrl().toString()); globalRotationsArray.addString(rotation.oathUrl().toString()); object.setString("rotationId", application.rotation().get().asString()); }); Set<RoutingPolicy> routingPolicies = controller.applications().routingPolicies(application.id()); for (RoutingPolicy policy : routingPolicies) { for (RotationName rotation : policy.rotations()) { GlobalDnsName dnsName = new GlobalDnsName(application.id(), controller.system(), rotation); globalRotationsArray.addString(dnsName.oathUrl().toString()); } } List<Deployment> deployments = controller.applications().deploymentTrigger() .steps(application.deploymentSpec()) .sortedDeployments(application.deployments().values()); Cursor instancesArray = object.setArray("instances"); for (Deployment deployment : deployments) { Cursor deploymentObject = instancesArray.addObject(); deploymentObject.setString("environment", deployment.zone().environment().value()); deploymentObject.setString("region", deployment.zone().region().value()); deploymentObject.setString("instance", application.id().instance().value()); if (application.rotation().isPresent() && deployment.zone().environment() == Environment.prod) { toSlime(application.rotationStatus(deployment), deploymentObject); } if (recurseOverDeployments(request)) toSlime(deploymentObject, new DeploymentId(application.id(), deployment.zone()), deployment, request); else deploymentObject.setString("url", withPath(request.getUri().getPath() + "/environment/" + deployment.zone().environment().value() + "/region/" + deployment.zone().region().value() + "/instance/" + application.id().instance().value(), request.getUri()).toString()); } Cursor metricsObject = object.setObject("metrics"); metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality()); metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality()); Cursor activity = object.setObject("activity"); application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value())); application.owner().ifPresent(owner -> object.setString("owner", owner.username())); application.deploymentJobs().issueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value())); } private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); Application application = controller.applications().get(id) .orElseThrow(() -> new NotExistsException(id + " not found")); DeploymentId deploymentId = new DeploymentId(application.id(), ZoneId.from(environment, region)); Deployment deployment = application.deployments().get(deploymentId.zoneId()); if (deployment == null) throw new NotExistsException(application + " is not deployed in " + deploymentId.zoneId()); Slime slime = new Slime(); toSlime(slime.setObject(), deploymentId, deployment, request); return new SlimeJsonResponse(slime); } private void toSlime(Cursor object, Change change) { change.platform().ifPresent(version -> object.setString("version", version.toString())); change.application() .filter(version -> !version.isUnknown()) .ifPresent(version -> toSlime(version, object.setObject("revision"))); } private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) { Cursor serviceUrlArray = response.setArray("serviceUrls"); controller.applications().getDeploymentEndpoints(deploymentId) .ifPresent(endpoints -> endpoints.forEach(endpoint -> serviceUrlArray.addString(endpoint.toString()))); response.setString("nodes", withPath("/zone/v2/" + deploymentId.zoneId().environment() + "/" + deploymentId.zoneId().region() + "/nodes/v2/node/?&recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString()); controller.zoneRegistry().getLogServerUri(deploymentId) .ifPresent(elkUrl -> response.setString("elkUrl", elkUrl.toString())); response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString()); response.setString("version", deployment.version().toFullString()); response.setString("revision", deployment.applicationVersion().id()); response.setLong("deployTimeEpochMs", deployment.at().toEpochMilli()); controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zoneId()) .ifPresent(deploymentTimeToLive -> response.setLong("expiryTimeEpochMs", deployment.at().plus(deploymentTimeToLive).toEpochMilli())); controller.applications().require(deploymentId.applicationId()).deploymentJobs().projectId() .ifPresent(i -> response.setString("screwdriverId", String.valueOf(i))); sourceRevisionToSlime(deployment.applicationVersion().source(), response); Cursor activity = response.setObject("activity"); deployment.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); deployment.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); deployment.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); deployment.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); DeploymentCost appCost = deployment.calculateCost(); Cursor costObject = response.setObject("cost"); toSlime(appCost, costObject); DeploymentMetrics metrics = deployment.metrics(); Cursor metricsObject = response.setObject("metrics"); metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond()); metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond()); metricsObject.setDouble("documentCount", metrics.documentCount()); metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis()); metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis()); metrics.instant().ifPresent(instant -> metricsObject.setLong("lastUpdated", instant.toEpochMilli())); } private void toSlime(ApplicationVersion applicationVersion, Cursor object) { if (!applicationVersion.isUnknown()) { object.setString("hash", applicationVersion.id()); sourceRevisionToSlime(applicationVersion.source(), object.setObject("source")); } } private void sourceRevisionToSlime(Optional<SourceRevision> revision, Cursor object) { if ( ! revision.isPresent()) return; object.setString("gitRepository", revision.get().repository()); object.setString("gitBranch", revision.get().branch()); object.setString("gitCommit", revision.get().commit()); } private void toSlime(RotationStatus status, Cursor object) { Cursor bcpStatus = object.setObject("bcpStatus"); bcpStatus.setString("rotationStatus", status.name().toUpperCase()); } private URI monitoringSystemUri(DeploymentId deploymentId) { return controller.zoneRegistry().getMonitoringSystemUri(deploymentId); } private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) { Application application = controller.applications().require(ApplicationId.from(tenantName, applicationName, instanceName)); ZoneId zone = ZoneId.from(environment, region); Deployment deployment = application.deployments().get(zone); if (deployment == null) { throw new NotExistsException(application + " has no deployment in " + zone); } Inspector requestData = toSlime(request.getData()).get(); String reason = mandatory("reason", requestData).asString(); String agent = getUserPrincipal(request).getIdentity().getFullName(); long timestamp = controller.clock().instant().getEpochSecond(); EndpointStatus.Status status = inService ? EndpointStatus.Status.in : EndpointStatus.Status.out; EndpointStatus endpointStatus = new EndpointStatus(status, reason, agent, timestamp); controller.applications().setGlobalRotationStatus(new DeploymentId(application.id(), deployment.zone()), endpointStatus); return new MessageResponse(String.format("Successfully set %s in %s.%s %s service", application.id().toShortString(), deployment.zone().environment().value(), deployment.zone().region().value(), inService ? "in" : "out of")); } private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), ZoneId.from(environment, region)); Slime slime = new Slime(); Cursor array = slime.setObject().setArray("globalrotationoverride"); Map<RoutingEndpoint, EndpointStatus> status = controller.applications().globalRotationStatus(deploymentId); for (RoutingEndpoint endpoint : status.keySet()) { EndpointStatus currentStatus = status.get(endpoint); array.addString(endpoint.upstreamName()); Cursor statusObject = array.addObject(); statusObject.setString("status", currentStatus.getStatus().name()); statusObject.setString("reason", currentStatus.getReason() == null ? "" : currentStatus.getReason()); statusObject.setString("agent", currentStatus.getAgent() == null ? "" : currentStatus.getAgent()); statusObject.setLong("timestamp", currentStatus.getEpoch()); } return new SlimeJsonResponse(slime); } private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); Application application = controller.applications().require(applicationId); ZoneId zone = ZoneId.from(environment, region); if (!application.rotation().isPresent()) { throw new NotExistsException("global rotation does not exist for " + application); } Deployment deployment = application.deployments().get(zone); if (deployment == null) { throw new NotExistsException(application + " has no deployment in " + zone); } Slime slime = new Slime(); Cursor response = slime.setObject(); toSlime(application.rotationStatus(deployment), response); return new SlimeJsonResponse(slime); } private HttpResponse deploying(String tenant, String application, HttpRequest request) { Application app = controller.applications().require(ApplicationId.from(tenant, application, "default")); Slime slime = new Slime(); Cursor root = slime.setObject(); if (!app.change().isEmpty()) { app.change().platform().ifPresent(version -> root.setString("platform", version.toString())); app.change().application().ifPresent(applicationVersion -> root.setString("application", applicationVersion.id())); root.setBool("pinned", app.change().isPinned()); } return new SlimeJsonResponse(slime); } private HttpResponse suspended(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), ZoneId.from(environment, region)); boolean suspended = controller.applications().isSuspended(deploymentId); Slime slime = new Slime(); Cursor response = slime.setObject(); response.setBool("suspended", suspended); return new SlimeJsonResponse(slime); } private HttpResponse services(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationView applicationView = controller.getApplicationView(tenantName, applicationName, instanceName, environment, region); ServiceApiResponse response = new ServiceApiResponse(ZoneId.from(environment, region), new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(), controller.zoneRegistry().getConfigServerApiUris(ZoneId.from(environment, region)), request.getUri()); response.setResponse(applicationView); return response; } private HttpResponse service(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String restPath, HttpRequest request) { Map<?,?> result = controller.getServiceApiResponse(tenantName, applicationName, instanceName, environment, region, serviceName, restPath); ServiceApiResponse response = new ServiceApiResponse(ZoneId.from(environment, region), new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(), controller.zoneRegistry().getConfigServerApiUris(ZoneId.from(environment, region)), request.getUri()); response.setResponse(result, serviceName, restPath); return response; } private HttpResponse createUser(HttpRequest request) { Optional<UserId> user = getUserId(request); if ( ! user.isPresent() ) throw new ForbiddenException("Not authenticated or not an user."); String username = UserTenant.normalizeUser(user.get().id()); try { controller.tenants().create(UserTenant.create(username)); return new MessageResponse("Created user '" + username + "'"); } catch (AlreadyExistsException e) { return new MessageResponse("User '" + username + "' already exists"); } } private HttpResponse updateTenant(String tenantName, HttpRequest request) { Optional<AthenzTenant> tenant = controller.tenants().athenzTenant(TenantName.from(tenantName)); if ( ! tenant.isPresent()) return ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"); Inspector requestData = toSlime(request.getData()).get(); OktaAccessToken token = requireOktaAccessToken(request, "Could not update " + tenantName); controller.tenants().lockOrThrow(tenant.get().name(), lockedTenant -> { lockedTenant = lockedTenant.with(new Property(mandatory("property", requestData).asString())); lockedTenant = controller.tenants().withDomain( lockedTenant, new AthenzDomain(mandatory("athensDomain", requestData).asString()), token ); Optional<PropertyId> propertyId = optional("propertyId", requestData).map(PropertyId::new); if (propertyId.isPresent()) { lockedTenant = lockedTenant.with(propertyId.get()); } controller.tenants().store(lockedTenant); }); return tenant(controller.tenants().requireAthenzTenant(tenant.get().name()), request, true); } private HttpResponse createTenant(String tenantName, HttpRequest request) { Inspector requestData = toSlime(request.getData()).get(); AthenzTenant tenant = AthenzTenant.create(TenantName.from(tenantName), new AthenzDomain(mandatory("athensDomain", requestData).asString()), new Property(mandatory("property", requestData).asString()), optional("propertyId", requestData).map(PropertyId::new)); throwIfNotAthenzDomainAdmin(tenant.domain(), request); controller.tenants().create(tenant, requireOktaAccessToken(request, "Could not create " + tenantName)); return tenant(tenant, request, true); } private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) { Application application; try { application = controller.applications().createApplication(ApplicationId.from(tenantName, applicationName, "default"), getOktaAccessToken(request)); } catch (ZmsClientException e) { if (e.getErrorCode() == com.yahoo.jdisc.Response.Status.FORBIDDEN) throw new ForbiddenException("Not authorized to create application", e); else throw e; } Slime slime = new Slime(); toSlime(application, slime.setObject(), request); return new SlimeJsonResponse(slime); } /** Trigger deployment of the given Vespa version if a valid one is given, e.g., "7.8.9". */ private HttpResponse deployPlatform(String tenantName, String applicationName, String versionString, boolean pin) { ApplicationId id = ApplicationId.from(tenantName, applicationName, "default"); StringBuilder response = new StringBuilder(); controller.applications().lockOrThrow(id, application -> { Version version = Version.fromString(versionString); if (version.equals(Version.emptyVersion)) version = controller.systemVersion(); if ( ! systemHasVersion(version)) throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " + "Version is not active in this system. " + "Active versions: " + controller.versionStatus().versions() .stream() .map(VespaVersion::versionNumber) .map(Version::toString) .collect(joining(", "))); Change change = Change.of(version); if (pin) change = change.withPin(); controller.applications().deploymentTrigger().forceChange(id, change); response.append("Triggered " + change + " for " + id); }); return new MessageResponse(response.toString()); } /** Trigger deployment to the last known application package for the given application. */ private HttpResponse deployApplication(String tenantName, String applicationName) { ApplicationId id = ApplicationId.from(tenantName, applicationName, "default"); StringBuilder response = new StringBuilder(); controller.applications().lockOrThrow(id, application -> { Change change = Change.of(application.get().deploymentJobs().statusOf(JobType.component).get().lastSuccess().get().application()); controller.applications().deploymentTrigger().forceChange(id, change); response.append("Triggered " + change + " for " + id); }); return new MessageResponse(response.toString()); } /** Cancel ongoing change for given application, e.g., everything with {"cancel":"all"} */ private HttpResponse cancelDeploy(String tenantName, String applicationName, String choice) { ApplicationId id = ApplicationId.from(tenantName, applicationName, "default"); StringBuilder response = new StringBuilder(); controller.applications().lockOrThrow(id, application -> { Change change = application.get().change(); if (change.isEmpty()) { response.append("No deployment in progress for " + application + " at this time"); return; } ChangesToCancel cancel = ChangesToCancel.valueOf(choice.toUpperCase()); controller.applications().deploymentTrigger().cancelChange(id, cancel); response.append("Changed deployment from '" + change + "' to '" + controller.applications().require(id).change() + "' for " + application); }); return new MessageResponse(response.toString()); } /** Schedule restart of deployment, or specific host in a deployment */ private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), ZoneId.from(environment, region)); Optional<Hostname> hostname = Optional.ofNullable(request.getProperty("hostname")).map(Hostname::new); controller.applications().restart(deploymentId, hostname); return new StringResponse("Requested restart of " + path(TenantResource.API_PATH, tenantName, ApplicationResource.API_PATH, applicationName, EnvironmentResource.API_PATH, environment, "region", region, "instance", instanceName)); } private HttpResponse deploy(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = ZoneId.from(environment, region); Map<String, byte[]> dataParts = new MultipartParser().parse(request); if ( ! dataParts.containsKey("deployOptions")) return ErrorResponse.badRequest("Missing required form part 'deployOptions'"); Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get(); /* * Special handling of the zone application (the only system application with an application package) * Setting any other deployOptions here is not supported for now (e.g. specifying version), but * this might be handy later to handle emergency downgrades. */ boolean isZoneApplication = SystemApplication.zone.id().equals(applicationId); if (isZoneApplication) { String versionStr = deployOptions.field("vespaVersion").asString(); boolean versionPresent = !versionStr.isEmpty() && !versionStr.equals("null"); if (versionPresent) { throw new RuntimeException("Version not supported for system applications"); } if (controller.versionStatus().isUpgrading()) { throw new IllegalArgumentException("Deployment of system applications during a system upgrade is not allowed"); } Optional<VespaVersion> systemVersion = controller.versionStatus().systemVersion(); if (systemVersion.isEmpty()) { throw new IllegalArgumentException("Deployment of system applications is not permitted until system version is determined"); } ActivateResult result = controller.applications() .deploySystemApplicationPackage(SystemApplication.zone, zone, systemVersion.get().versionNumber()); return new SlimeJsonResponse(toSlime(result)); } /* * Normal applications from here */ Optional<ApplicationPackage> applicationPackage = Optional.ofNullable(dataParts.get("applicationZip")) .map(ApplicationPackage::new); Inspector sourceRevision = deployOptions.field("sourceRevision"); Inspector buildNumber = deployOptions.field("buildNumber"); if (sourceRevision.valid() != buildNumber.valid()) throw new IllegalArgumentException("Source revision and build number must both be provided, or not"); Optional<ApplicationVersion> applicationVersion = Optional.empty(); if (sourceRevision.valid()) { if (applicationPackage.isPresent()) throw new IllegalArgumentException("Application version and application package can't both be provided."); applicationVersion = Optional.of(ApplicationVersion.from(toSourceRevision(sourceRevision), buildNumber.asLong())); applicationPackage = Optional.of(controller.applications().getApplicationPackage(controller.applications().require(applicationId), applicationVersion.get())); } boolean deployDirectly = deployOptions.field("deployDirectly").asBool(); Optional<Version> vespaVersion = optional("vespaVersion", deployOptions).map(Version::new); /* * Deploy direct is when we want to redeploy the current application - retrieve version * info from the application package before deploying */ if(deployDirectly && !applicationPackage.isPresent() && !applicationVersion.isPresent() && !vespaVersion.isPresent()) { Optional<Deployment> deployment = controller.applications().get(applicationId) .map(Application::deployments) .flatMap(deployments -> Optional.ofNullable(deployments.get(zone))); if(!deployment.isPresent()) throw new IllegalArgumentException("Can't redeploy application, no deployment currently exist"); ApplicationVersion version = deployment.get().applicationVersion(); if(version.isUnknown()) throw new IllegalArgumentException("Can't redeploy application, application version is unknown"); applicationVersion = Optional.of(version); vespaVersion = Optional.of(deployment.get().version()); applicationPackage = Optional.of(controller.applications().getApplicationPackage(controller.applications().require(applicationId), applicationVersion.get())); } DeployOptions deployOptionsJsonClass = new DeployOptions(deployDirectly, vespaVersion, deployOptions.field("ignoreValidationErrors").asBool(), deployOptions.field("deployCurrentVersion").asBool()); ActivateResult result = controller.applications().deploy(applicationId, zone, applicationPackage, applicationVersion, deployOptionsJsonClass, Optional.of(getUserPrincipal(request).getIdentity())); return new SlimeJsonResponse(toSlime(result)); } private HttpResponse deleteTenant(String tenantName, HttpRequest request) { Optional<Tenant> tenant = controller.tenants().tenant(tenantName); if ( ! tenant.isPresent()) return ErrorResponse.notFoundError("Could not delete tenant '" + tenantName + "': Tenant not found"); if (tenant.get() instanceof AthenzTenant) { controller.tenants().deleteTenant((AthenzTenant) tenant.get(), requireOktaAccessToken(request, "Could not delete " + tenantName)); } else if (tenant.get() instanceof UserTenant) { controller.tenants().deleteTenant((UserTenant) tenant.get()); } else { throw new IllegalArgumentException("Unknown tenant type:" + tenant.get().getClass().getSimpleName() + ", for " + tenant.get()); } return tenant(tenant.get(), request, false); } private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, "default"); controller.applications().deleteApplication(id, getOktaAccessToken(request)); return new EmptyJsonResponse(); } private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { Application application = controller.applications().require(ApplicationId.from(tenantName, applicationName, instanceName)); controller.applications().deactivate(application.id(), ZoneId.from(environment, region)); return new StringResponse("Deactivated " + path(TenantResource.API_PATH, tenantName, ApplicationResource.API_PATH, applicationName, EnvironmentResource.API_PATH, environment, "region", region, "instance", instanceName)); } /** * Promote application Chef environments. To be used by component jobs only */ private HttpResponse promoteApplication(String tenantName, String applicationName, HttpRequest request) { try{ ApplicationChefEnvironment chefEnvironment = new ApplicationChefEnvironment(controller.system()); String sourceEnvironment = chefEnvironment.systemChefEnvironment(); String targetEnvironment = chefEnvironment.applicationSourceEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName)); controller.chefClient().copyChefEnvironment(sourceEnvironment, targetEnvironment); return new MessageResponse(String.format("Successfully copied environment %s to %s", sourceEnvironment, targetEnvironment)); } catch (Exception e) { log.log(LogLevel.ERROR, String.format("Error during Chef copy environment. (%s.%s)", tenantName, applicationName), e); return ErrorResponse.internalServerError("Unable to promote Chef environments for application"); } } /** * Promote application Chef environments for jobs that deploy applications */ private HttpResponse promoteApplicationDeployment(String tenantName, String applicationName, String environmentName, String regionName, String instanceName, HttpRequest request) { try { ApplicationChefEnvironment chefEnvironment = new ApplicationChefEnvironment(controller.system()); String sourceEnvironment = chefEnvironment.applicationSourceEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName)); String targetEnvironment = chefEnvironment.applicationTargetEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName), Environment.from(environmentName), RegionName.from(regionName)); controller.chefClient().copyChefEnvironment(sourceEnvironment, targetEnvironment); return new MessageResponse(String.format("Successfully copied environment %s to %s", sourceEnvironment, targetEnvironment)); } catch (Exception e) { log.log(LogLevel.ERROR, String.format("Error during Chef copy environment. (%s.%s %s.%s)", tenantName, applicationName, environmentName, regionName), e); return ErrorResponse.internalServerError("Unable to promote Chef environments for application"); } } private HttpResponse notifyJobCompletion(String tenant, String application, HttpRequest request) { try { DeploymentJobs.JobReport report = toJobReport(tenant, application, toSlime(request.getData()).get()); if ( report.jobType() == JobType.component && controller.applications().require(report.applicationId()).deploymentJobs().deployedInternally()) throw new IllegalArgumentException(report.applicationId() + " is set up to be deployed from internally, and no " + "longer accepts submissions from Screwdriver v3 jobs. If you need to revert " + "to the old pipeline, please file a ticket at yo/vespa-support and request this."); controller.applications().deploymentTrigger().notifyOfCompletion(report); return new MessageResponse("ok"); } catch (IllegalStateException e) { return ErrorResponse.badRequest(Exceptions.toMessageString(e)); } } private static DeploymentJobs.JobReport toJobReport(String tenantName, String applicationName, Inspector report) { Optional<DeploymentJobs.JobError> jobError = Optional.empty(); if (report.field("jobError").valid()) { jobError = Optional.of(DeploymentJobs.JobError.valueOf(report.field("jobError").asString())); } ApplicationId id = ApplicationId.from(tenantName, applicationName, report.field("instance").asString()); JobType type = JobType.fromJobName(report.field("jobName").asString()); long buildNumber = report.field("buildNumber").asLong(); if (type == JobType.component) return DeploymentJobs.JobReport.ofComponent(id, report.field("projectId").asLong(), buildNumber, jobError, toSourceRevision(report.field("sourceRevision"))); else return DeploymentJobs.JobReport.ofJob(id, type, buildNumber, jobError); } private static SourceRevision toSourceRevision(Inspector object) { if (!object.field("repository").valid() || !object.field("branch").valid() || !object.field("commit").valid()) { throw new IllegalArgumentException("Must specify \"repository\", \"branch\", and \"commit\"."); } return new SourceRevision(object.field("repository").asString(), object.field("branch").asString(), object.field("commit").asString()); } private Tenant getTenantOrThrow(String tenantName) { return controller.tenants().tenant(tenantName) .orElseThrow(() -> new NotExistsException(new TenantId(tenantName))); } private void toSlime(Cursor object, Tenant tenant, HttpRequest request, boolean listApplications) { object.setString("tenant", tenant.name().value()); object.setString("type", tentantType(tenant)); if (tenant instanceof AthenzTenant) { AthenzTenant athenzTenant = (AthenzTenant) tenant; object.setString("athensDomain", athenzTenant.domain().getName()); object.setString("property", athenzTenant.property().id()); athenzTenant.propertyId().ifPresent(id -> object.setString("propertyId", id.toString())); } Cursor applicationArray = object.setArray("applications"); if (listApplications) { for (Application application : controller.applications().asList(tenant.name())) { if (application.id().instance().isDefault()) { if (recurseOverApplications(request)) toSlime(applicationArray.addObject(), application, request); else toSlime(application, applicationArray.addObject(), request); } } } if (tenant instanceof AthenzTenant) { AthenzTenant athenzTenant = (AthenzTenant) tenant; athenzTenant.contact().ifPresent(c -> { object.setString("propertyUrl", c.propertyUrl().toString()); object.setString("contactsUrl", c.url().toString()); object.setString("issueCreationUrl", c.issueTrackerUrl().toString()); Cursor contactsArray = object.setArray("contacts"); c.persons().forEach(persons -> { Cursor personArray = contactsArray.addArray(); persons.forEach(personArray::addString); }); }); } } private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) { object.setString("tenant", tenant.name().value()); Cursor metaData = object.setObject("metaData"); metaData.setString("type", tentantType(tenant)); if (tenant instanceof AthenzTenant) { AthenzTenant athenzTenant = (AthenzTenant) tenant; metaData.setString("athensDomain", athenzTenant.domain().getName()); metaData.setString("property", athenzTenant.property().id()); } object.setString("url", withPath("/application/v4/tenant/" + tenant.name().value(), requestURI).toString()); } /** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */ private URI withPath(String newPath, URI uri) { try { return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, null, null); } catch (URISyntaxException e) { throw new RuntimeException("Will not happen", e); } } private long asLong(String valueOrNull, long defaultWhenNull) { if (valueOrNull == null) return defaultWhenNull; try { return Long.parseLong(valueOrNull); } catch (NumberFormatException e) { throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'"); } } private void toSlime(JobStatus.JobRun jobRun, Cursor object) { object.setLong("id", jobRun.id()); object.setString("version", jobRun.platform().toFullString()); if (!jobRun.application().isUnknown()) toSlime(jobRun.application(), object.setObject("revision")); object.setString("reason", jobRun.reason()); object.setLong("at", jobRun.at().toEpochMilli()); } private Slime toSlime(InputStream jsonStream) { try { byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000); return SlimeUtils.jsonToSlime(jsonBytes); } catch (IOException e) { throw new RuntimeException(); } } private void throwIfNotAthenzDomainAdmin(AthenzDomain tenantDomain, HttpRequest request) { AthenzIdentity identity = getUserPrincipal(request).getIdentity(); boolean isDomainAdmin = zmsClient.isDomainAdmin(identity, tenantDomain); if ( ! isDomainAdmin) { throw new ForbiddenException( String.format("The user '%s' is not admin in Athenz domain '%s'", identity.getFullName(), tenantDomain.getName())); } } private static Optional<UserId> getUserId(HttpRequest request) { return Optional.of(getUserPrincipal(request)) .map(AthenzPrincipal::getIdentity) .filter(AthenzUser.class::isInstance) .map(AthenzUser.class::cast) .map(AthenzUser::getName) .map(UserId::new); } private static AthenzPrincipal getUserPrincipal(HttpRequest request) { Principal principal = request.getJDiscRequest().getUserPrincipal(); if (principal == null) throw new InternalServerErrorException("Expected a user principal"); if (!(principal instanceof AthenzPrincipal)) throw new InternalServerErrorException( String.format("Expected principal of type %s, got %s", AthenzPrincipal.class.getSimpleName(), principal.getClass().getName())); return (AthenzPrincipal) principal; } private Inspector mandatory(String key, Inspector object) { if ( ! object.field(key).valid()) throw new IllegalArgumentException("'" + key + "' is missing"); return object.field(key); } private Optional<String> optional(String key, Inspector object) { return SlimeUtils.optionalString(object.field(key)); } private static String path(Object... elements) { return Joiner.on("/").join(elements); } private void toSlime(Application application, Cursor object, HttpRequest request) { object.setString("application", application.id().application().value()); object.setString("instance", application.id().instance().value()); object.setString("url", withPath("/application/v4/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value(), request.getUri()).toString()); } private Slime toSlime(ActivateResult result) { Slime slime = new Slime(); Cursor object = slime.setObject(); object.setString("revisionId", result.revisionId().id()); object.setLong("applicationZipSize", result.applicationZipSizeBytes()); Cursor logArray = object.setArray("prepareMessages"); if (result.prepareResponse().log != null) { for (Log logMessage : result.prepareResponse().log) { Cursor logObject = logArray.addObject(); logObject.setLong("time", logMessage.time); logObject.setString("level", logMessage.level); logObject.setString("message", logMessage.message); } } Cursor changeObject = object.setObject("configChangeActions"); Cursor restartActionsArray = changeObject.setArray("restart"); for (RestartAction restartAction : result.prepareResponse().configChangeActions.restartActions) { Cursor restartActionObject = restartActionsArray.addObject(); restartActionObject.setString("clusterName", restartAction.clusterName); restartActionObject.setString("clusterType", restartAction.clusterType); restartActionObject.setString("serviceType", restartAction.serviceType); serviceInfosToSlime(restartAction.services, restartActionObject.setArray("services")); stringsToSlime(restartAction.messages, restartActionObject.setArray("messages")); } Cursor refeedActionsArray = changeObject.setArray("refeed"); for (RefeedAction refeedAction : result.prepareResponse().configChangeActions.refeedActions) { Cursor refeedActionObject = refeedActionsArray.addObject(); refeedActionObject.setString("name", refeedAction.name); refeedActionObject.setBool("allowed", refeedAction.allowed); refeedActionObject.setString("documentType", refeedAction.documentType); refeedActionObject.setString("clusterName", refeedAction.clusterName); serviceInfosToSlime(refeedAction.services, refeedActionObject.setArray("services")); stringsToSlime(refeedAction.messages, refeedActionObject.setArray("messages")); } return slime; } private void serviceInfosToSlime(List<ServiceInfo> serviceInfoList, Cursor array) { for (ServiceInfo serviceInfo : serviceInfoList) { Cursor serviceInfoObject = array.addObject(); serviceInfoObject.setString("serviceName", serviceInfo.serviceName); serviceInfoObject.setString("serviceType", serviceInfo.serviceType); serviceInfoObject.setString("configId", serviceInfo.configId); serviceInfoObject.setString("hostName", serviceInfo.hostName); } } private void stringsToSlime(List<String> strings, Cursor array) { for (String string : strings) array.addString(string); } private String readToString(InputStream stream) { Scanner scanner = new Scanner(stream).useDelimiter("\\A"); if ( ! scanner.hasNext()) return null; return scanner.next(); } private boolean systemHasVersion(Version version) { return controller.versionStatus().versions().stream().anyMatch(v -> v.versionNumber().equals(version)); } public static void toSlime(DeploymentCost deploymentCost, Cursor object) { object.setLong("tco", (long)deploymentCost.getTco()); object.setLong("waste", (long)deploymentCost.getWaste()); object.setDouble("utilization", deploymentCost.getUtilization()); Cursor clustersObject = object.setObject("cluster"); for (Map.Entry<String, ClusterCost> clusterEntry : deploymentCost.getCluster().entrySet()) toSlime(clusterEntry.getValue(), clustersObject.setObject(clusterEntry.getKey())); } private static void toSlime(ClusterCost clusterCost, Cursor object) { object.setLong("count", clusterCost.getClusterInfo().getHostnames().size()); object.setString("resource", getResourceName(clusterCost.getResultUtilization())); object.setDouble("utilization", clusterCost.getResultUtilization().getMaxUtilization()); object.setLong("tco", (int)clusterCost.getTco()); object.setLong("waste", (int)clusterCost.getWaste()); object.setString("flavor", clusterCost.getClusterInfo().getFlavor()); object.setDouble("flavorCost", clusterCost.getClusterInfo().getFlavorCost()); object.setDouble("flavorCpu", clusterCost.getClusterInfo().getFlavorCPU()); object.setDouble("flavorMem", clusterCost.getClusterInfo().getFlavorMem()); object.setDouble("flavorDisk", clusterCost.getClusterInfo().getFlavorDisk()); object.setString("type", clusterCost.getClusterInfo().getClusterType().name()); Cursor utilObject = object.setObject("util"); utilObject.setDouble("cpu", clusterCost.getResultUtilization().getCpu()); utilObject.setDouble("mem", clusterCost.getResultUtilization().getMemory()); utilObject.setDouble("disk", clusterCost.getResultUtilization().getDisk()); utilObject.setDouble("diskBusy", clusterCost.getResultUtilization().getDiskBusy()); Cursor usageObject = object.setObject("usage"); usageObject.setDouble("cpu", clusterCost.getSystemUtilization().getCpu()); usageObject.setDouble("mem", clusterCost.getSystemUtilization().getMemory()); usageObject.setDouble("disk", clusterCost.getSystemUtilization().getDisk()); usageObject.setDouble("diskBusy", clusterCost.getSystemUtilization().getDiskBusy()); Cursor hostnamesArray = object.setArray("hostnames"); for (String hostname : clusterCost.getClusterInfo().getHostnames()) hostnamesArray.addString(hostname); } private static String getResourceName(ClusterUtilization utilization) { String name = "cpu"; double max = utilization.getMaxUtilization(); if (utilization.getMemory() == max) { name = "mem"; } else if (utilization.getDisk() == max) { name = "disk"; } else if (utilization.getDiskBusy() == max) { name = "diskbusy"; } return name; } private static boolean recurseOverTenants(HttpRequest request) { return recurseOverApplications(request) || "tenant".equals(request.getProperty("recursive")); } private static boolean recurseOverApplications(HttpRequest request) { return recurseOverDeployments(request) || "application".equals(request.getProperty("recursive")); } private static boolean recurseOverDeployments(HttpRequest request) { return ImmutableSet.of("all", "true", "deployment").contains(request.getProperty("recursive")); } private static String tentantType(Tenant tenant) { if (tenant instanceof AthenzTenant) { return "ATHENS"; } else if (tenant instanceof UserTenant) { return "USER"; } throw new IllegalArgumentException("Unknown tenant type: " + tenant.getClass().getSimpleName()); } private static OktaAccessToken requireOktaAccessToken(HttpRequest request, String message) { return getOktaAccessToken(request) .orElseThrow(() -> new IllegalArgumentException(message + ": No Okta Access Token provided")); } private static Optional<OktaAccessToken> getOktaAccessToken(HttpRequest request) { return Optional.ofNullable(request.getJDiscRequest().context().get("okta.access-token")) .map(attribute -> new OktaAccessToken((String) attribute)); } private static ApplicationId appIdFromPath(Path path) { return ApplicationId.from(path.get("tenant"), path.get("application"), path.get("instance")); } private static JobType jobTypeFromPath(Path path) { return JobType.fromJobName(path.get("jobtype")); } private static RunId runIdFromPath(Path path) { long number = Long.parseLong(path.get("number")); return new RunId(appIdFromPath(path), jobTypeFromPath(path), number); } private HttpResponse submit(String tenant, String application, HttpRequest request) { Map<String, byte[]> dataParts = new MultipartParser().parse(request); Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get(); SourceRevision sourceRevision = toSourceRevision(submitOptions); String authorEmail = submitOptions.field("authorEmail").asString(); long projectId = Math.max(1, submitOptions.field("projectId").asLong()); ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP)); controller.applications().verifyApplicationIdentityConfiguration(TenantName.from(tenant), applicationPackage, Optional.of(getUserPrincipal(request).getIdentity())); return JobControllerApiHandlerHelper.submitResponse(controller.jobController(), tenant, application, sourceRevision, authorEmail, projectId, applicationPackage, dataParts.get(EnvironmentResource.APPLICATION_TEST_ZIP)); } }
I honestly read the old version more easily, but I won't object :)
protected void maintain() { for (Tenant tenant : controller().tenants().asList()) { try { Optional<PropertyId> tenantPropertyId = Optional.empty(); if (tenant instanceof AthenzTenant) { tenantPropertyId = ((AthenzTenant) tenant).propertyId(); } Contact contact = contactRetriever.getContact(tenantPropertyId); controller().tenants().lockIfPresent(tenant.name(), lockedTenant -> controller().tenants().store(lockedTenant.with(contact))); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed to update contact information for " + tenant + ": " + Exceptions.toMessageString(e) + ". Retrying in " + maintenanceInterval()); } } }
Optional<PropertyId> tenantPropertyId = Optional.empty();
protected void maintain() { for (Tenant tenant : controller().tenants().asList()) { try { Optional<PropertyId> tenantPropertyId = Optional.empty(); if (tenant instanceof AthenzTenant) { tenantPropertyId = ((AthenzTenant) tenant).propertyId(); } Contact contact = contactRetriever.getContact(tenantPropertyId); controller().tenants().lockIfPresent(tenant.name(), lockedTenant -> controller().tenants().store(lockedTenant.with(contact))); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed to update contact information for " + tenant + ": " + Exceptions.toMessageString(e) + ". Retrying in " + maintenanceInterval()); } } }
class ContactInformationMaintainer extends Maintainer { private static final Logger log = Logger.getLogger(ContactInformationMaintainer.class.getName()); private final ContactRetriever contactRetriever; public ContactInformationMaintainer(Controller controller, Duration interval, JobControl jobControl, ContactRetriever contactRetriever) { super(controller, interval, jobControl, null, EnumSet.of(SystemName.cd, SystemName.main)); this.contactRetriever = Objects.requireNonNull(contactRetriever, "organization must be non-null"); } @Override }
class ContactInformationMaintainer extends Maintainer { private static final Logger log = Logger.getLogger(ContactInformationMaintainer.class.getName()); private final ContactRetriever contactRetriever; public ContactInformationMaintainer(Controller controller, Duration interval, JobControl jobControl, ContactRetriever contactRetriever) { super(controller, interval, jobControl, null, EnumSet.of(SystemName.cd, SystemName.main)); this.contactRetriever = Objects.requireNonNull(contactRetriever, "organization must be non-null"); } @Override }
Probably something to include for this path as well?
private HttpResponse handlePOST(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/promote")) return promoteApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/jobreport")) return notifyJobCompletion(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return submit(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return trigger(appIdFromPath(path), jobTypeFromPath(path), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return pause(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/promote")) return promoteApplicationDeployment(path.get("tenant"), path.get("application"), path.get("environment"), path.get("region"), path.get("instance"), request); return ErrorResponse.notFoundError("Nothing at " + path); }
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"));
private HttpResponse handlePOST(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/promote")) return promoteApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/platform")) return deployPlatform(path.get("tenant"), path.get("application"), false, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deployPlatform(path.get("tenant"), path.get("application"), true, request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/application")) return deployApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/jobreport")) return notifyJobCompletion(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return submit(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return trigger(appIdFromPath(path), jobTypeFromPath(path), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/pause")) return pause(appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/deploy")) return deploy(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/restart")) return restart(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/promote")) return promoteApplicationDeployment(path.get("tenant"), path.get("application"), path.get("environment"), path.get("region"), path.get("instance"), request); return ErrorResponse.notFoundError("Nothing at " + path); }
class ApplicationApiHandler extends LoggingRequestHandler { private final Controller controller; private final ZmsClientFacade zmsClient; @Inject public ApplicationApiHandler(LoggingRequestHandler.Context parentCtx, Controller controller, AthenzClientFactory athenzClientFactory) { super(parentCtx); this.controller = controller; this.zmsClient = new ZmsClientFacade(athenzClientFactory.createZmsClient(), athenzClientFactory.getControllerIdentity()); } @Override public Duration getTimeout() { return Duration.ofMinutes(20); } @Override public HttpResponse handle(HttpRequest request) { try { switch (request.getMethod()) { case GET: return handleGET(request); case PUT: return handlePUT(request); case POST: return handlePOST(request); case PATCH: return handlePATCH(request); case DELETE: return handleDELETE(request); case OPTIONS: return handleOPTIONS(); default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported"); } } catch (ForbiddenException e) { return ErrorResponse.forbidden(Exceptions.toMessageString(e)); } catch (NotAuthorizedException e) { return ErrorResponse.unauthorized(Exceptions.toMessageString(e)); } catch (NotExistsException e) { return ErrorResponse.notFoundError(Exceptions.toMessageString(e)); } catch (IllegalArgumentException e) { return ErrorResponse.badRequest(Exceptions.toMessageString(e)); } catch (ConfigServerException e) { return ErrorResponse.from(e); } catch (RuntimeException e) { log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e); return ErrorResponse.internalServerError(Exceptions.toMessageString(e)); } } private HttpResponse handleGET(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/")) return root(request); if (path.matches("/application/v4/user")) return authenticatedUser(request); if (path.matches("/application/v4/tenant")) return tenants(request); if (path.matches("/application/v4/tenant-pipeline")) return tenantPipelines(); if (path.matches("/application/v4/athensDomain")) return athenzDomains(request); if (path.matches("/application/v4/property")) return properties(); if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploying(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePUT(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/user")) return createUser(request); if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePATCH(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return setMajorVersion(path.get("tenant"), path.get("application"), request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleDELETE(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), "all"); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("choice")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return JobControllerApiHandlerHelper.unregisterResponse(controller.jobController(), path.get("tenant"), path.get("application")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.abortJobResponse(controller.jobController(), appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleOPTIONS() { EmptyJsonResponse response = new EmptyJsonResponse(); response.headers().put("Allow", "GET,PUT,POST,PATCH,DELETE,OPTIONS"); return response; } private HttpResponse recursiveRoot(HttpRequest request) { Slime slime = new Slime(); Cursor tenantArray = slime.setArray(); for (Tenant tenant : controller.tenants().asList()) toSlime(tenantArray.addObject(), tenant, request, true); return new SlimeJsonResponse(slime); } private HttpResponse root(HttpRequest request) { return recurseOverTenants(request) ? recursiveRoot(request) : new ResourceResponse(request, "user", "tenant", "tenant-pipeline", "athensDomain", "property"); } private HttpResponse authenticatedUser(HttpRequest request) { String userIdString = request.getProperty("userOverride"); if (userIdString == null) userIdString = getUserId(request) .map(UserId::id) .orElseThrow(() -> new ForbiddenException("You must be authenticated or specify userOverride")); UserId userId = new UserId(userIdString); List<Tenant> tenants = controller.tenants().asList(userId); Slime slime = new Slime(); Cursor response = slime.setObject(); response.setString("user", userId.id()); Cursor tenantsArray = response.setArray("tenants"); for (Tenant tenant : tenants) tenantInTenantsListToSlime(tenant, request.getUri(), tenantsArray.addObject()); response.setBool("tenantExists", tenants.stream().anyMatch(tenant -> tenant instanceof UserTenant && ((UserTenant) tenant).is(userId.id()))); return new SlimeJsonResponse(slime); } private HttpResponse tenants(HttpRequest request) { Slime slime = new Slime(); Cursor response = slime.setArray(); for (Tenant tenant : controller.tenants().asList()) tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject()); return new SlimeJsonResponse(slime); } /** Lists the screwdriver project id for each application */ private HttpResponse tenantPipelines() { Slime slime = new Slime(); Cursor response = slime.setObject(); Cursor pipelinesArray = response.setArray("tenantPipelines"); for (Application application : controller.applications().asList()) { if ( ! application.deploymentJobs().projectId().isPresent()) continue; Cursor pipelineObject = pipelinesArray.addObject(); pipelineObject.setString("screwdriverId", String.valueOf(application.deploymentJobs().projectId().getAsLong())); pipelineObject.setString("tenant", application.id().tenant().value()); pipelineObject.setString("application", application.id().application().value()); pipelineObject.setString("instance", application.id().instance().value()); } response.setArray("brokenTenantPipelines"); return new SlimeJsonResponse(slime); } private HttpResponse athenzDomains(HttpRequest request) { Slime slime = new Slime(); Cursor response = slime.setObject(); Cursor array = response.setArray("data"); for (AthenzDomain athenzDomain : controller.getDomainList(request.getProperty("prefix"))) { array.addString(athenzDomain.getName()); } return new SlimeJsonResponse(slime); } private HttpResponse properties() { Slime slime = new Slime(); Cursor response = slime.setObject(); Cursor array = response.setArray("properties"); for (Map.Entry<PropertyId, Property> entry : controller.fetchPropertyList().entrySet()) { Cursor propertyObject = array.addObject(); propertyObject.setString("propertyid", entry.getKey().id()); propertyObject.setString("property", entry.getValue().id()); } return new SlimeJsonResponse(slime); } private HttpResponse tenant(String tenantName, HttpRequest request) { return controller.tenants().tenant(TenantName.from(tenantName)) .map(tenant -> tenant(tenant, request, true)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist")); } private HttpResponse tenant(Tenant tenant, HttpRequest request, boolean listApplications) { Slime slime = new Slime(); toSlime(slime.setObject(), tenant, request, listApplications); return new SlimeJsonResponse(slime); } private HttpResponse applications(String tenantName, HttpRequest request) { TenantName tenant = TenantName.from(tenantName); Slime slime = new Slime(); Cursor array = slime.setArray(); for (Application application : controller.applications().asList(tenant)) toSlime(application, array.addObject(), request); return new SlimeJsonResponse(slime); } private HttpResponse application(String tenantName, String applicationName, HttpRequest request) { Slime slime = new Slime(); toSlime(slime.setObject(), getApplication(tenantName, applicationName), request); return new SlimeJsonResponse(slime); } private HttpResponse setMajorVersion(String tenantName, String applicationName, HttpRequest request) { Application application = getApplication(tenantName, applicationName); Inspector majorVersionField = toSlime(request.getData()).get().field("majorVersion"); if ( ! majorVersionField.valid()) throw new IllegalArgumentException("Request body must contain a majorVersion field"); Integer majorVersion = majorVersionField.asLong() == 0 ? null : (int)majorVersionField.asLong(); controller.applications().lockIfPresent(application.id(), a -> controller.applications().store(a.withMajorVersion(majorVersion))); return new MessageResponse("Set major version to " + ( majorVersion == null ? "empty" : majorVersion)); } private Application getApplication(String tenantName, String applicationName) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, "default"); return controller.applications().get(applicationId) .orElseThrow(() -> new NotExistsException(applicationId + " not found")); } private HttpResponse logs(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) { ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = ZoneId.from(environment, region); DeploymentId deployment = new DeploymentId(application, zone); if (queryParameters.containsKey("streaming")) { InputStream logStream = controller.configServer().getLogStream(deployment, queryParameters); return new HttpResponse(200) { @Override public void render(OutputStream outputStream) throws IOException { logStream.transferTo(outputStream); } }; } Optional<Logs> response = controller.configServer().getLogs(deployment, queryParameters); Slime slime = new Slime(); Cursor object = slime.setObject(); if (response.isPresent()) { response.get().logs().entrySet().stream().forEach(entry -> object.setString(entry.getKey(), entry.getValue())); } return new SlimeJsonResponse(slime); } private HttpResponse trigger(ApplicationId id, JobType type, HttpRequest request) { String triggered = controller.applications().deploymentTrigger() .forceTrigger(id, type, request.getJDiscRequest().getUserPrincipal().getName()) .stream().map(JobType::jobName).collect(joining(", ")); return new MessageResponse(triggered.isEmpty() ? "Job " + type.jobName() + " for " + id + " not triggered" : "Triggered " + triggered + " for " + id); } private HttpResponse pause(ApplicationId id, JobType type) { Instant until = controller.clock().instant().plus(DeploymentTrigger.maxPause); controller.applications().deploymentTrigger().pauseJob(id, type, until); return new MessageResponse(type.jobName() + " for " + id + " paused for " + DeploymentTrigger.maxPause); } private void toSlime(Cursor object, Application application, HttpRequest request) { object.setString("application", application.id().application().value()); object.setString("instance", application.id().instance().value()); object.setString("deployments", withPath("/application/v4" + "/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value() + "/instance/" + application.id().instance().value() + "/job/", request.getUri()).toString()); application.deploymentJobs().statusOf(JobType.component) .flatMap(JobStatus::lastSuccess) .map(run -> run.application().source()) .ifPresent(source -> sourceRevisionToSlime(source, object.setObject("source"))); application.deploymentJobs().projectId() .ifPresent(id -> object.setLong("projectId", id)); if ( ! application.change().isEmpty()) { toSlime(object.setObject("deploying"), application.change()); } if ( ! application.outstandingChange().isEmpty()) { toSlime(object.setObject("outstandingChange"), application.outstandingChange()); } List<JobStatus> jobStatus = controller.applications().deploymentTrigger() .steps(application.deploymentSpec()) .sortedJobs(application.deploymentJobs().jobStatus().values()); object.setBool("deployedInternally", application.deploymentJobs().deployedInternally()); Cursor deploymentsArray = object.setArray("deploymentJobs"); for (JobStatus job : jobStatus) { Cursor jobObject = deploymentsArray.addObject(); jobObject.setString("type", job.type().jobName()); jobObject.setBool("success", job.isSuccess()); job.lastTriggered().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastTriggered"))); job.lastCompleted().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastCompleted"))); job.firstFailing().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("firstFailing"))); job.lastSuccess().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastSuccess"))); } Cursor changeBlockers = object.setArray("changeBlockers"); application.deploymentSpec().changeBlocker().forEach(changeBlocker -> { Cursor changeBlockerObject = changeBlockers.addObject(); changeBlockerObject.setBool("versions", changeBlocker.blocksVersions()); changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions()); changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId()); Cursor days = changeBlockerObject.setArray("days"); changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong); Cursor hours = changeBlockerObject.setArray("hours"); changeBlocker.window().hours().forEach(hours::addLong); }); object.setString("compileVersion", controller.applications().oldestInstalledPlatform(application.id()).toFullString()); application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion)); Cursor globalRotationsArray = object.setArray("globalRotations"); application.globalDnsName(controller.system()).ifPresent(rotation -> { globalRotationsArray.addString(rotation.url().toString()); globalRotationsArray.addString(rotation.secureUrl().toString()); globalRotationsArray.addString(rotation.oathUrl().toString()); object.setString("rotationId", application.rotation().get().asString()); }); Set<RoutingPolicy> routingPolicies = controller.applications().routingPolicies(application.id()); for (RoutingPolicy policy : routingPolicies) { for (RotationName rotation : policy.rotations()) { GlobalDnsName dnsName = new GlobalDnsName(application.id(), controller.system(), rotation); globalRotationsArray.addString(dnsName.oathUrl().toString()); } } List<Deployment> deployments = controller.applications().deploymentTrigger() .steps(application.deploymentSpec()) .sortedDeployments(application.deployments().values()); Cursor instancesArray = object.setArray("instances"); for (Deployment deployment : deployments) { Cursor deploymentObject = instancesArray.addObject(); deploymentObject.setString("environment", deployment.zone().environment().value()); deploymentObject.setString("region", deployment.zone().region().value()); deploymentObject.setString("instance", application.id().instance().value()); if (application.rotation().isPresent() && deployment.zone().environment() == Environment.prod) { toSlime(application.rotationStatus(deployment), deploymentObject); } if (recurseOverDeployments(request)) toSlime(deploymentObject, new DeploymentId(application.id(), deployment.zone()), deployment, request); else deploymentObject.setString("url", withPath(request.getUri().getPath() + "/environment/" + deployment.zone().environment().value() + "/region/" + deployment.zone().region().value() + "/instance/" + application.id().instance().value(), request.getUri()).toString()); } Cursor metricsObject = object.setObject("metrics"); metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality()); metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality()); Cursor activity = object.setObject("activity"); application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value())); application.owner().ifPresent(owner -> object.setString("owner", owner.username())); application.deploymentJobs().issueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value())); } private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); Application application = controller.applications().get(id) .orElseThrow(() -> new NotExistsException(id + " not found")); DeploymentId deploymentId = new DeploymentId(application.id(), ZoneId.from(environment, region)); Deployment deployment = application.deployments().get(deploymentId.zoneId()); if (deployment == null) throw new NotExistsException(application + " is not deployed in " + deploymentId.zoneId()); Slime slime = new Slime(); toSlime(slime.setObject(), deploymentId, deployment, request); return new SlimeJsonResponse(slime); } private void toSlime(Cursor object, Change change) { change.platform().ifPresent(version -> object.setString("version", version.toString())); change.application() .filter(version -> !version.isUnknown()) .ifPresent(version -> toSlime(version, object.setObject("revision"))); } private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) { Cursor serviceUrlArray = response.setArray("serviceUrls"); controller.applications().getDeploymentEndpoints(deploymentId) .ifPresent(endpoints -> endpoints.forEach(endpoint -> serviceUrlArray.addString(endpoint.toString()))); response.setString("nodes", withPath("/zone/v2/" + deploymentId.zoneId().environment() + "/" + deploymentId.zoneId().region() + "/nodes/v2/node/?&recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString()); controller.zoneRegistry().getLogServerUri(deploymentId) .ifPresent(elkUrl -> response.setString("elkUrl", elkUrl.toString())); response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString()); response.setString("version", deployment.version().toFullString()); response.setString("revision", deployment.applicationVersion().id()); response.setLong("deployTimeEpochMs", deployment.at().toEpochMilli()); controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zoneId()) .ifPresent(deploymentTimeToLive -> response.setLong("expiryTimeEpochMs", deployment.at().plus(deploymentTimeToLive).toEpochMilli())); controller.applications().require(deploymentId.applicationId()).deploymentJobs().projectId() .ifPresent(i -> response.setString("screwdriverId", String.valueOf(i))); sourceRevisionToSlime(deployment.applicationVersion().source(), response); Cursor activity = response.setObject("activity"); deployment.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); deployment.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); deployment.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); deployment.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); DeploymentCost appCost = deployment.calculateCost(); Cursor costObject = response.setObject("cost"); toSlime(appCost, costObject); DeploymentMetrics metrics = deployment.metrics(); Cursor metricsObject = response.setObject("metrics"); metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond()); metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond()); metricsObject.setDouble("documentCount", metrics.documentCount()); metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis()); metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis()); metrics.instant().ifPresent(instant -> metricsObject.setLong("lastUpdated", instant.toEpochMilli())); } private void toSlime(ApplicationVersion applicationVersion, Cursor object) { if (!applicationVersion.isUnknown()) { object.setString("hash", applicationVersion.id()); sourceRevisionToSlime(applicationVersion.source(), object.setObject("source")); } } private void sourceRevisionToSlime(Optional<SourceRevision> revision, Cursor object) { if ( ! revision.isPresent()) return; object.setString("gitRepository", revision.get().repository()); object.setString("gitBranch", revision.get().branch()); object.setString("gitCommit", revision.get().commit()); } private void toSlime(RotationStatus status, Cursor object) { Cursor bcpStatus = object.setObject("bcpStatus"); bcpStatus.setString("rotationStatus", status.name().toUpperCase()); } private URI monitoringSystemUri(DeploymentId deploymentId) { return controller.zoneRegistry().getMonitoringSystemUri(deploymentId); } private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) { Application application = controller.applications().require(ApplicationId.from(tenantName, applicationName, instanceName)); ZoneId zone = ZoneId.from(environment, region); Deployment deployment = application.deployments().get(zone); if (deployment == null) { throw new NotExistsException(application + " has no deployment in " + zone); } Inspector requestData = toSlime(request.getData()).get(); String reason = mandatory("reason", requestData).asString(); String agent = getUserPrincipal(request).getIdentity().getFullName(); long timestamp = controller.clock().instant().getEpochSecond(); EndpointStatus.Status status = inService ? EndpointStatus.Status.in : EndpointStatus.Status.out; EndpointStatus endpointStatus = new EndpointStatus(status, reason, agent, timestamp); controller.applications().setGlobalRotationStatus(new DeploymentId(application.id(), deployment.zone()), endpointStatus); return new MessageResponse(String.format("Successfully set %s in %s.%s %s service", application.id().toShortString(), deployment.zone().environment().value(), deployment.zone().region().value(), inService ? "in" : "out of")); } private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), ZoneId.from(environment, region)); Slime slime = new Slime(); Cursor array = slime.setObject().setArray("globalrotationoverride"); Map<RoutingEndpoint, EndpointStatus> status = controller.applications().globalRotationStatus(deploymentId); for (RoutingEndpoint endpoint : status.keySet()) { EndpointStatus currentStatus = status.get(endpoint); array.addString(endpoint.upstreamName()); Cursor statusObject = array.addObject(); statusObject.setString("status", currentStatus.getStatus().name()); statusObject.setString("reason", currentStatus.getReason() == null ? "" : currentStatus.getReason()); statusObject.setString("agent", currentStatus.getAgent() == null ? "" : currentStatus.getAgent()); statusObject.setLong("timestamp", currentStatus.getEpoch()); } return new SlimeJsonResponse(slime); } private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); Application application = controller.applications().require(applicationId); ZoneId zone = ZoneId.from(environment, region); if (!application.rotation().isPresent()) { throw new NotExistsException("global rotation does not exist for " + application); } Deployment deployment = application.deployments().get(zone); if (deployment == null) { throw new NotExistsException(application + " has no deployment in " + zone); } Slime slime = new Slime(); Cursor response = slime.setObject(); toSlime(application.rotationStatus(deployment), response); return new SlimeJsonResponse(slime); } private HttpResponse deploying(String tenant, String application, HttpRequest request) { Application app = controller.applications().require(ApplicationId.from(tenant, application, "default")); Slime slime = new Slime(); Cursor root = slime.setObject(); if (!app.change().isEmpty()) { app.change().platform().ifPresent(version -> root.setString("platform", version.toString())); app.change().application().ifPresent(applicationVersion -> root.setString("application", applicationVersion.id())); root.setBool("pinned", app.change().isPinned()); } return new SlimeJsonResponse(slime); } private HttpResponse suspended(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), ZoneId.from(environment, region)); boolean suspended = controller.applications().isSuspended(deploymentId); Slime slime = new Slime(); Cursor response = slime.setObject(); response.setBool("suspended", suspended); return new SlimeJsonResponse(slime); } private HttpResponse services(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationView applicationView = controller.getApplicationView(tenantName, applicationName, instanceName, environment, region); ServiceApiResponse response = new ServiceApiResponse(ZoneId.from(environment, region), new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(), controller.zoneRegistry().getConfigServerApiUris(ZoneId.from(environment, region)), request.getUri()); response.setResponse(applicationView); return response; } private HttpResponse service(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String restPath, HttpRequest request) { Map<?,?> result = controller.getServiceApiResponse(tenantName, applicationName, instanceName, environment, region, serviceName, restPath); ServiceApiResponse response = new ServiceApiResponse(ZoneId.from(environment, region), new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(), controller.zoneRegistry().getConfigServerApiUris(ZoneId.from(environment, region)), request.getUri()); response.setResponse(result, serviceName, restPath); return response; } private HttpResponse createUser(HttpRequest request) { Optional<UserId> user = getUserId(request); if ( ! user.isPresent() ) throw new ForbiddenException("Not authenticated or not an user."); String username = UserTenant.normalizeUser(user.get().id()); try { controller.tenants().create(UserTenant.create(username)); return new MessageResponse("Created user '" + username + "'"); } catch (AlreadyExistsException e) { return new MessageResponse("User '" + username + "' already exists"); } } private HttpResponse updateTenant(String tenantName, HttpRequest request) { Optional<AthenzTenant> tenant = controller.tenants().athenzTenant(TenantName.from(tenantName)); if ( ! tenant.isPresent()) return ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"); Inspector requestData = toSlime(request.getData()).get(); OktaAccessToken token = requireOktaAccessToken(request, "Could not update " + tenantName); controller.tenants().lockOrThrow(tenant.get().name(), lockedTenant -> { lockedTenant = lockedTenant.with(new Property(mandatory("property", requestData).asString())); lockedTenant = controller.tenants().withDomain( lockedTenant, new AthenzDomain(mandatory("athensDomain", requestData).asString()), token ); Optional<PropertyId> propertyId = optional("propertyId", requestData).map(PropertyId::new); if (propertyId.isPresent()) { lockedTenant = lockedTenant.with(propertyId.get()); } controller.tenants().store(lockedTenant); }); return tenant(controller.tenants().requireAthenzTenant(tenant.get().name()), request, true); } private HttpResponse createTenant(String tenantName, HttpRequest request) { Inspector requestData = toSlime(request.getData()).get(); AthenzTenant tenant = AthenzTenant.create(TenantName.from(tenantName), new AthenzDomain(mandatory("athensDomain", requestData).asString()), new Property(mandatory("property", requestData).asString()), optional("propertyId", requestData).map(PropertyId::new)); throwIfNotAthenzDomainAdmin(tenant.domain(), request); controller.tenants().create(tenant, requireOktaAccessToken(request, "Could not create " + tenantName)); return tenant(tenant, request, true); } private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) { Application application; try { application = controller.applications().createApplication(ApplicationId.from(tenantName, applicationName, "default"), getOktaAccessToken(request)); } catch (ZmsClientException e) { if (e.getErrorCode() == com.yahoo.jdisc.Response.Status.FORBIDDEN) throw new ForbiddenException("Not authorized to create application", e); else throw e; } Slime slime = new Slime(); toSlime(application, slime.setObject(), request); return new SlimeJsonResponse(slime); } /** Trigger deployment of the given Vespa version if a valid one is given, e.g., "7.8.9". */ private HttpResponse deployPlatform(String tenantName, String applicationName, boolean pin, HttpRequest request) { request = controller.auditLogger().log(request); String versionString = readToString(request.getData()); ApplicationId id = ApplicationId.from(tenantName, applicationName, "default"); StringBuilder response = new StringBuilder(); controller.applications().lockOrThrow(id, application -> { Version version = Version.fromString(versionString); if (version.equals(Version.emptyVersion)) version = controller.systemVersion(); if ( ! systemHasVersion(version)) throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " + "Version is not active in this system. " + "Active versions: " + controller.versionStatus().versions() .stream() .map(VespaVersion::versionNumber) .map(Version::toString) .collect(joining(", "))); Change change = Change.of(version); if (pin) change = change.withPin(); controller.applications().deploymentTrigger().forceChange(id, change); response.append("Triggered " + change + " for " + id); }); return new MessageResponse(response.toString()); } /** Trigger deployment to the last known application package for the given application. */ private HttpResponse deployApplication(String tenantName, String applicationName) { ApplicationId id = ApplicationId.from(tenantName, applicationName, "default"); StringBuilder response = new StringBuilder(); controller.applications().lockOrThrow(id, application -> { Change change = Change.of(application.get().deploymentJobs().statusOf(JobType.component).get().lastSuccess().get().application()); controller.applications().deploymentTrigger().forceChange(id, change); response.append("Triggered " + change + " for " + id); }); return new MessageResponse(response.toString()); } /** Cancel ongoing change for given application, e.g., everything with {"cancel":"all"} */ private HttpResponse cancelDeploy(String tenantName, String applicationName, String choice) { ApplicationId id = ApplicationId.from(tenantName, applicationName, "default"); StringBuilder response = new StringBuilder(); controller.applications().lockOrThrow(id, application -> { Change change = application.get().change(); if (change.isEmpty()) { response.append("No deployment in progress for " + application + " at this time"); return; } ChangesToCancel cancel = ChangesToCancel.valueOf(choice.toUpperCase()); controller.applications().deploymentTrigger().cancelChange(id, cancel); response.append("Changed deployment from '" + change + "' to '" + controller.applications().require(id).change() + "' for " + application); }); return new MessageResponse(response.toString()); } /** Schedule restart of deployment, or specific host in a deployment */ private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), ZoneId.from(environment, region)); Optional<Hostname> hostname = Optional.ofNullable(request.getProperty("hostname")).map(Hostname::new); controller.applications().restart(deploymentId, hostname); return new StringResponse("Requested restart of " + path(TenantResource.API_PATH, tenantName, ApplicationResource.API_PATH, applicationName, EnvironmentResource.API_PATH, environment, "region", region, "instance", instanceName)); } private HttpResponse deploy(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = ZoneId.from(environment, region); Map<String, byte[]> dataParts = new MultipartParser().parse(request); if ( ! dataParts.containsKey("deployOptions")) return ErrorResponse.badRequest("Missing required form part 'deployOptions'"); Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get(); /* * Special handling of the zone application (the only system application with an application package) * Setting any other deployOptions here is not supported for now (e.g. specifying version), but * this might be handy later to handle emergency downgrades. */ boolean isZoneApplication = SystemApplication.zone.id().equals(applicationId); if (isZoneApplication) { String versionStr = deployOptions.field("vespaVersion").asString(); boolean versionPresent = !versionStr.isEmpty() && !versionStr.equals("null"); if (versionPresent) { throw new RuntimeException("Version not supported for system applications"); } if (controller.versionStatus().isUpgrading()) { throw new IllegalArgumentException("Deployment of system applications during a system upgrade is not allowed"); } Optional<VespaVersion> systemVersion = controller.versionStatus().systemVersion(); if (systemVersion.isEmpty()) { throw new IllegalArgumentException("Deployment of system applications is not permitted until system version is determined"); } ActivateResult result = controller.applications() .deploySystemApplicationPackage(SystemApplication.zone, zone, systemVersion.get().versionNumber()); return new SlimeJsonResponse(toSlime(result)); } /* * Normal applications from here */ Optional<ApplicationPackage> applicationPackage = Optional.ofNullable(dataParts.get("applicationZip")) .map(ApplicationPackage::new); Inspector sourceRevision = deployOptions.field("sourceRevision"); Inspector buildNumber = deployOptions.field("buildNumber"); if (sourceRevision.valid() != buildNumber.valid()) throw new IllegalArgumentException("Source revision and build number must both be provided, or not"); Optional<ApplicationVersion> applicationVersion = Optional.empty(); if (sourceRevision.valid()) { if (applicationPackage.isPresent()) throw new IllegalArgumentException("Application version and application package can't both be provided."); applicationVersion = Optional.of(ApplicationVersion.from(toSourceRevision(sourceRevision), buildNumber.asLong())); applicationPackage = Optional.of(controller.applications().getApplicationPackage(controller.applications().require(applicationId), applicationVersion.get())); } boolean deployDirectly = deployOptions.field("deployDirectly").asBool(); Optional<Version> vespaVersion = optional("vespaVersion", deployOptions).map(Version::new); /* * Deploy direct is when we want to redeploy the current application - retrieve version * info from the application package before deploying */ if(deployDirectly && !applicationPackage.isPresent() && !applicationVersion.isPresent() && !vespaVersion.isPresent()) { Optional<Deployment> deployment = controller.applications().get(applicationId) .map(Application::deployments) .flatMap(deployments -> Optional.ofNullable(deployments.get(zone))); if(!deployment.isPresent()) throw new IllegalArgumentException("Can't redeploy application, no deployment currently exist"); ApplicationVersion version = deployment.get().applicationVersion(); if(version.isUnknown()) throw new IllegalArgumentException("Can't redeploy application, application version is unknown"); applicationVersion = Optional.of(version); vespaVersion = Optional.of(deployment.get().version()); applicationPackage = Optional.of(controller.applications().getApplicationPackage(controller.applications().require(applicationId), applicationVersion.get())); } DeployOptions deployOptionsJsonClass = new DeployOptions(deployDirectly, vespaVersion, deployOptions.field("ignoreValidationErrors").asBool(), deployOptions.field("deployCurrentVersion").asBool()); ActivateResult result = controller.applications().deploy(applicationId, zone, applicationPackage, applicationVersion, deployOptionsJsonClass, Optional.of(getUserPrincipal(request).getIdentity())); return new SlimeJsonResponse(toSlime(result)); } private HttpResponse deleteTenant(String tenantName, HttpRequest request) { Optional<Tenant> tenant = controller.tenants().tenant(tenantName); if ( ! tenant.isPresent()) return ErrorResponse.notFoundError("Could not delete tenant '" + tenantName + "': Tenant not found"); if (tenant.get() instanceof AthenzTenant) { controller.tenants().deleteTenant((AthenzTenant) tenant.get(), requireOktaAccessToken(request, "Could not delete " + tenantName)); } else if (tenant.get() instanceof UserTenant) { controller.tenants().deleteTenant((UserTenant) tenant.get()); } else { throw new IllegalArgumentException("Unknown tenant type:" + tenant.get().getClass().getSimpleName() + ", for " + tenant.get()); } return tenant(tenant.get(), request, false); } private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, "default"); controller.applications().deleteApplication(id, getOktaAccessToken(request)); return new EmptyJsonResponse(); } private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { Application application = controller.applications().require(ApplicationId.from(tenantName, applicationName, instanceName)); controller.applications().deactivate(application.id(), ZoneId.from(environment, region)); return new StringResponse("Deactivated " + path(TenantResource.API_PATH, tenantName, ApplicationResource.API_PATH, applicationName, EnvironmentResource.API_PATH, environment, "region", region, "instance", instanceName)); } /** * Promote application Chef environments. To be used by component jobs only */ private HttpResponse promoteApplication(String tenantName, String applicationName, HttpRequest request) { try{ ApplicationChefEnvironment chefEnvironment = new ApplicationChefEnvironment(controller.system()); String sourceEnvironment = chefEnvironment.systemChefEnvironment(); String targetEnvironment = chefEnvironment.applicationSourceEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName)); controller.chefClient().copyChefEnvironment(sourceEnvironment, targetEnvironment); return new MessageResponse(String.format("Successfully copied environment %s to %s", sourceEnvironment, targetEnvironment)); } catch (Exception e) { log.log(LogLevel.ERROR, String.format("Error during Chef copy environment. (%s.%s)", tenantName, applicationName), e); return ErrorResponse.internalServerError("Unable to promote Chef environments for application"); } } /** * Promote application Chef environments for jobs that deploy applications */ private HttpResponse promoteApplicationDeployment(String tenantName, String applicationName, String environmentName, String regionName, String instanceName, HttpRequest request) { try { ApplicationChefEnvironment chefEnvironment = new ApplicationChefEnvironment(controller.system()); String sourceEnvironment = chefEnvironment.applicationSourceEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName)); String targetEnvironment = chefEnvironment.applicationTargetEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName), Environment.from(environmentName), RegionName.from(regionName)); controller.chefClient().copyChefEnvironment(sourceEnvironment, targetEnvironment); return new MessageResponse(String.format("Successfully copied environment %s to %s", sourceEnvironment, targetEnvironment)); } catch (Exception e) { log.log(LogLevel.ERROR, String.format("Error during Chef copy environment. (%s.%s %s.%s)", tenantName, applicationName, environmentName, regionName), e); return ErrorResponse.internalServerError("Unable to promote Chef environments for application"); } } private HttpResponse notifyJobCompletion(String tenant, String application, HttpRequest request) { try { DeploymentJobs.JobReport report = toJobReport(tenant, application, toSlime(request.getData()).get()); if ( report.jobType() == JobType.component && controller.applications().require(report.applicationId()).deploymentJobs().deployedInternally()) throw new IllegalArgumentException(report.applicationId() + " is set up to be deployed from internally, and no " + "longer accepts submissions from Screwdriver v3 jobs. If you need to revert " + "to the old pipeline, please file a ticket at yo/vespa-support and request this."); controller.applications().deploymentTrigger().notifyOfCompletion(report); return new MessageResponse("ok"); } catch (IllegalStateException e) { return ErrorResponse.badRequest(Exceptions.toMessageString(e)); } } private static DeploymentJobs.JobReport toJobReport(String tenantName, String applicationName, Inspector report) { Optional<DeploymentJobs.JobError> jobError = Optional.empty(); if (report.field("jobError").valid()) { jobError = Optional.of(DeploymentJobs.JobError.valueOf(report.field("jobError").asString())); } ApplicationId id = ApplicationId.from(tenantName, applicationName, report.field("instance").asString()); JobType type = JobType.fromJobName(report.field("jobName").asString()); long buildNumber = report.field("buildNumber").asLong(); if (type == JobType.component) return DeploymentJobs.JobReport.ofComponent(id, report.field("projectId").asLong(), buildNumber, jobError, toSourceRevision(report.field("sourceRevision"))); else return DeploymentJobs.JobReport.ofJob(id, type, buildNumber, jobError); } private static SourceRevision toSourceRevision(Inspector object) { if (!object.field("repository").valid() || !object.field("branch").valid() || !object.field("commit").valid()) { throw new IllegalArgumentException("Must specify \"repository\", \"branch\", and \"commit\"."); } return new SourceRevision(object.field("repository").asString(), object.field("branch").asString(), object.field("commit").asString()); } private Tenant getTenantOrThrow(String tenantName) { return controller.tenants().tenant(tenantName) .orElseThrow(() -> new NotExistsException(new TenantId(tenantName))); } private void toSlime(Cursor object, Tenant tenant, HttpRequest request, boolean listApplications) { object.setString("tenant", tenant.name().value()); object.setString("type", tentantType(tenant)); if (tenant instanceof AthenzTenant) { AthenzTenant athenzTenant = (AthenzTenant) tenant; object.setString("athensDomain", athenzTenant.domain().getName()); object.setString("property", athenzTenant.property().id()); athenzTenant.propertyId().ifPresent(id -> object.setString("propertyId", id.toString())); } Cursor applicationArray = object.setArray("applications"); if (listApplications) { for (Application application : controller.applications().asList(tenant.name())) { if (application.id().instance().isDefault()) { if (recurseOverApplications(request)) toSlime(applicationArray.addObject(), application, request); else toSlime(application, applicationArray.addObject(), request); } } } if (tenant instanceof AthenzTenant) { AthenzTenant athenzTenant = (AthenzTenant) tenant; athenzTenant.contact().ifPresent(c -> { object.setString("propertyUrl", c.propertyUrl().toString()); object.setString("contactsUrl", c.url().toString()); object.setString("issueCreationUrl", c.issueTrackerUrl().toString()); Cursor contactsArray = object.setArray("contacts"); c.persons().forEach(persons -> { Cursor personArray = contactsArray.addArray(); persons.forEach(personArray::addString); }); }); } } private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) { object.setString("tenant", tenant.name().value()); Cursor metaData = object.setObject("metaData"); metaData.setString("type", tentantType(tenant)); if (tenant instanceof AthenzTenant) { AthenzTenant athenzTenant = (AthenzTenant) tenant; metaData.setString("athensDomain", athenzTenant.domain().getName()); metaData.setString("property", athenzTenant.property().id()); } object.setString("url", withPath("/application/v4/tenant/" + tenant.name().value(), requestURI).toString()); } /** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */ private URI withPath(String newPath, URI uri) { try { return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, null, null); } catch (URISyntaxException e) { throw new RuntimeException("Will not happen", e); } } private long asLong(String valueOrNull, long defaultWhenNull) { if (valueOrNull == null) return defaultWhenNull; try { return Long.parseLong(valueOrNull); } catch (NumberFormatException e) { throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'"); } } private void toSlime(JobStatus.JobRun jobRun, Cursor object) { object.setLong("id", jobRun.id()); object.setString("version", jobRun.platform().toFullString()); if (!jobRun.application().isUnknown()) toSlime(jobRun.application(), object.setObject("revision")); object.setString("reason", jobRun.reason()); object.setLong("at", jobRun.at().toEpochMilli()); } private Slime toSlime(InputStream jsonStream) { try { byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000); return SlimeUtils.jsonToSlime(jsonBytes); } catch (IOException e) { throw new RuntimeException(); } } private void throwIfNotAthenzDomainAdmin(AthenzDomain tenantDomain, HttpRequest request) { AthenzIdentity identity = getUserPrincipal(request).getIdentity(); boolean isDomainAdmin = zmsClient.isDomainAdmin(identity, tenantDomain); if ( ! isDomainAdmin) { throw new ForbiddenException( String.format("The user '%s' is not admin in Athenz domain '%s'", identity.getFullName(), tenantDomain.getName())); } } private static Optional<UserId> getUserId(HttpRequest request) { return Optional.of(getUserPrincipal(request)) .map(AthenzPrincipal::getIdentity) .filter(AthenzUser.class::isInstance) .map(AthenzUser.class::cast) .map(AthenzUser::getName) .map(UserId::new); } private static AthenzPrincipal getUserPrincipal(HttpRequest request) { Principal principal = request.getJDiscRequest().getUserPrincipal(); if (principal == null) throw new InternalServerErrorException("Expected a user principal"); if (!(principal instanceof AthenzPrincipal)) throw new InternalServerErrorException( String.format("Expected principal of type %s, got %s", AthenzPrincipal.class.getSimpleName(), principal.getClass().getName())); return (AthenzPrincipal) principal; } private Inspector mandatory(String key, Inspector object) { if ( ! object.field(key).valid()) throw new IllegalArgumentException("'" + key + "' is missing"); return object.field(key); } private Optional<String> optional(String key, Inspector object) { return SlimeUtils.optionalString(object.field(key)); } private static String path(Object... elements) { return Joiner.on("/").join(elements); } private void toSlime(Application application, Cursor object, HttpRequest request) { object.setString("application", application.id().application().value()); object.setString("instance", application.id().instance().value()); object.setString("url", withPath("/application/v4/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value(), request.getUri()).toString()); } private Slime toSlime(ActivateResult result) { Slime slime = new Slime(); Cursor object = slime.setObject(); object.setString("revisionId", result.revisionId().id()); object.setLong("applicationZipSize", result.applicationZipSizeBytes()); Cursor logArray = object.setArray("prepareMessages"); if (result.prepareResponse().log != null) { for (Log logMessage : result.prepareResponse().log) { Cursor logObject = logArray.addObject(); logObject.setLong("time", logMessage.time); logObject.setString("level", logMessage.level); logObject.setString("message", logMessage.message); } } Cursor changeObject = object.setObject("configChangeActions"); Cursor restartActionsArray = changeObject.setArray("restart"); for (RestartAction restartAction : result.prepareResponse().configChangeActions.restartActions) { Cursor restartActionObject = restartActionsArray.addObject(); restartActionObject.setString("clusterName", restartAction.clusterName); restartActionObject.setString("clusterType", restartAction.clusterType); restartActionObject.setString("serviceType", restartAction.serviceType); serviceInfosToSlime(restartAction.services, restartActionObject.setArray("services")); stringsToSlime(restartAction.messages, restartActionObject.setArray("messages")); } Cursor refeedActionsArray = changeObject.setArray("refeed"); for (RefeedAction refeedAction : result.prepareResponse().configChangeActions.refeedActions) { Cursor refeedActionObject = refeedActionsArray.addObject(); refeedActionObject.setString("name", refeedAction.name); refeedActionObject.setBool("allowed", refeedAction.allowed); refeedActionObject.setString("documentType", refeedAction.documentType); refeedActionObject.setString("clusterName", refeedAction.clusterName); serviceInfosToSlime(refeedAction.services, refeedActionObject.setArray("services")); stringsToSlime(refeedAction.messages, refeedActionObject.setArray("messages")); } return slime; } private void serviceInfosToSlime(List<ServiceInfo> serviceInfoList, Cursor array) { for (ServiceInfo serviceInfo : serviceInfoList) { Cursor serviceInfoObject = array.addObject(); serviceInfoObject.setString("serviceName", serviceInfo.serviceName); serviceInfoObject.setString("serviceType", serviceInfo.serviceType); serviceInfoObject.setString("configId", serviceInfo.configId); serviceInfoObject.setString("hostName", serviceInfo.hostName); } } private void stringsToSlime(List<String> strings, Cursor array) { for (String string : strings) array.addString(string); } private String readToString(InputStream stream) { Scanner scanner = new Scanner(stream).useDelimiter("\\A"); if ( ! scanner.hasNext()) return null; return scanner.next(); } private boolean systemHasVersion(Version version) { return controller.versionStatus().versions().stream().anyMatch(v -> v.versionNumber().equals(version)); } public static void toSlime(DeploymentCost deploymentCost, Cursor object) { object.setLong("tco", (long)deploymentCost.getTco()); object.setLong("waste", (long)deploymentCost.getWaste()); object.setDouble("utilization", deploymentCost.getUtilization()); Cursor clustersObject = object.setObject("cluster"); for (Map.Entry<String, ClusterCost> clusterEntry : deploymentCost.getCluster().entrySet()) toSlime(clusterEntry.getValue(), clustersObject.setObject(clusterEntry.getKey())); } private static void toSlime(ClusterCost clusterCost, Cursor object) { object.setLong("count", clusterCost.getClusterInfo().getHostnames().size()); object.setString("resource", getResourceName(clusterCost.getResultUtilization())); object.setDouble("utilization", clusterCost.getResultUtilization().getMaxUtilization()); object.setLong("tco", (int)clusterCost.getTco()); object.setLong("waste", (int)clusterCost.getWaste()); object.setString("flavor", clusterCost.getClusterInfo().getFlavor()); object.setDouble("flavorCost", clusterCost.getClusterInfo().getFlavorCost()); object.setDouble("flavorCpu", clusterCost.getClusterInfo().getFlavorCPU()); object.setDouble("flavorMem", clusterCost.getClusterInfo().getFlavorMem()); object.setDouble("flavorDisk", clusterCost.getClusterInfo().getFlavorDisk()); object.setString("type", clusterCost.getClusterInfo().getClusterType().name()); Cursor utilObject = object.setObject("util"); utilObject.setDouble("cpu", clusterCost.getResultUtilization().getCpu()); utilObject.setDouble("mem", clusterCost.getResultUtilization().getMemory()); utilObject.setDouble("disk", clusterCost.getResultUtilization().getDisk()); utilObject.setDouble("diskBusy", clusterCost.getResultUtilization().getDiskBusy()); Cursor usageObject = object.setObject("usage"); usageObject.setDouble("cpu", clusterCost.getSystemUtilization().getCpu()); usageObject.setDouble("mem", clusterCost.getSystemUtilization().getMemory()); usageObject.setDouble("disk", clusterCost.getSystemUtilization().getDisk()); usageObject.setDouble("diskBusy", clusterCost.getSystemUtilization().getDiskBusy()); Cursor hostnamesArray = object.setArray("hostnames"); for (String hostname : clusterCost.getClusterInfo().getHostnames()) hostnamesArray.addString(hostname); } private static String getResourceName(ClusterUtilization utilization) { String name = "cpu"; double max = utilization.getMaxUtilization(); if (utilization.getMemory() == max) { name = "mem"; } else if (utilization.getDisk() == max) { name = "disk"; } else if (utilization.getDiskBusy() == max) { name = "diskbusy"; } return name; } private static boolean recurseOverTenants(HttpRequest request) { return recurseOverApplications(request) || "tenant".equals(request.getProperty("recursive")); } private static boolean recurseOverApplications(HttpRequest request) { return recurseOverDeployments(request) || "application".equals(request.getProperty("recursive")); } private static boolean recurseOverDeployments(HttpRequest request) { return ImmutableSet.of("all", "true", "deployment").contains(request.getProperty("recursive")); } private static String tentantType(Tenant tenant) { if (tenant instanceof AthenzTenant) { return "ATHENS"; } else if (tenant instanceof UserTenant) { return "USER"; } throw new IllegalArgumentException("Unknown tenant type: " + tenant.getClass().getSimpleName()); } private static OktaAccessToken requireOktaAccessToken(HttpRequest request, String message) { return getOktaAccessToken(request) .orElseThrow(() -> new IllegalArgumentException(message + ": No Okta Access Token provided")); } private static Optional<OktaAccessToken> getOktaAccessToken(HttpRequest request) { return Optional.ofNullable(request.getJDiscRequest().context().get("okta.access-token")) .map(attribute -> new OktaAccessToken((String) attribute)); } private static ApplicationId appIdFromPath(Path path) { return ApplicationId.from(path.get("tenant"), path.get("application"), path.get("instance")); } private static JobType jobTypeFromPath(Path path) { return JobType.fromJobName(path.get("jobtype")); } private static RunId runIdFromPath(Path path) { long number = Long.parseLong(path.get("number")); return new RunId(appIdFromPath(path), jobTypeFromPath(path), number); } private HttpResponse submit(String tenant, String application, HttpRequest request) { Map<String, byte[]> dataParts = new MultipartParser().parse(request); Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get(); SourceRevision sourceRevision = toSourceRevision(submitOptions); String authorEmail = submitOptions.field("authorEmail").asString(); long projectId = Math.max(1, submitOptions.field("projectId").asLong()); ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP)); controller.applications().verifyApplicationIdentityConfiguration(TenantName.from(tenant), applicationPackage, Optional.of(getUserPrincipal(request).getIdentity())); return JobControllerApiHandlerHelper.submitResponse(controller.jobController(), tenant, application, sourceRevision, authorEmail, projectId, applicationPackage, dataParts.get(EnvironmentResource.APPLICATION_TEST_ZIP)); } }
class ApplicationApiHandler extends LoggingRequestHandler { private final Controller controller; private final ZmsClientFacade zmsClient; @Inject public ApplicationApiHandler(LoggingRequestHandler.Context parentCtx, Controller controller, AthenzClientFactory athenzClientFactory) { super(parentCtx); this.controller = controller; this.zmsClient = new ZmsClientFacade(athenzClientFactory.createZmsClient(), athenzClientFactory.getControllerIdentity()); } @Override public Duration getTimeout() { return Duration.ofMinutes(20); } @Override public HttpResponse handle(HttpRequest request) { try { switch (request.getMethod()) { case GET: return handleGET(request); case PUT: return handlePUT(request); case POST: return handlePOST(request); case PATCH: return handlePATCH(request); case DELETE: return handleDELETE(request); case OPTIONS: return handleOPTIONS(); default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported"); } } catch (ForbiddenException e) { return ErrorResponse.forbidden(Exceptions.toMessageString(e)); } catch (NotAuthorizedException e) { return ErrorResponse.unauthorized(Exceptions.toMessageString(e)); } catch (NotExistsException e) { return ErrorResponse.notFoundError(Exceptions.toMessageString(e)); } catch (IllegalArgumentException e) { return ErrorResponse.badRequest(Exceptions.toMessageString(e)); } catch (ConfigServerException e) { return ErrorResponse.from(e); } catch (RuntimeException e) { log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e); return ErrorResponse.internalServerError(Exceptions.toMessageString(e)); } } private HttpResponse handleGET(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/")) return root(request); if (path.matches("/application/v4/user")) return authenticatedUser(request); if (path.matches("/application/v4/tenant")) return tenants(request); if (path.matches("/application/v4/tenant-pipeline")) return tenantPipelines(); if (path.matches("/application/v4/athensDomain")) return athenzDomains(request); if (path.matches("/application/v4/property")) return properties(); if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploying(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/logs")) return logs(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request.propertyMap()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)), request.getUri()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/suspended")) return suspended(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service")) return services(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/service/{service}/{*}")) return service(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), path.get("service"), path.getRest(), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation")) return rotationStatus(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return getGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region")); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePUT(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/user")) return createUser(request); if (path.matches("/application/v4/tenant/{tenant}")) return updateTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), false, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handlePATCH(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return setMajorVersion(path.get("tenant"), path.get("application"), request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleDELETE(HttpRequest request) { Path path = new Path(request.getUri().getPath()); if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"), "all"); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/{choice}")) return cancelDeploy(path.get("tenant"), path.get("application"), path.get("choice")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/submit")) return JobControllerApiHandlerHelper.unregisterResponse(controller.jobController(), path.get("tenant"), path.get("application")); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.abortJobResponse(controller.jobController(), appIdFromPath(path), jobTypeFromPath(path)); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request); if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override")) return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request); return ErrorResponse.notFoundError("Nothing at " + path); } private HttpResponse handleOPTIONS() { EmptyJsonResponse response = new EmptyJsonResponse(); response.headers().put("Allow", "GET,PUT,POST,PATCH,DELETE,OPTIONS"); return response; } private HttpResponse recursiveRoot(HttpRequest request) { Slime slime = new Slime(); Cursor tenantArray = slime.setArray(); for (Tenant tenant : controller.tenants().asList()) toSlime(tenantArray.addObject(), tenant, request, true); return new SlimeJsonResponse(slime); } private HttpResponse root(HttpRequest request) { return recurseOverTenants(request) ? recursiveRoot(request) : new ResourceResponse(request, "user", "tenant", "tenant-pipeline", "athensDomain", "property"); } private HttpResponse authenticatedUser(HttpRequest request) { String userIdString = request.getProperty("userOverride"); if (userIdString == null) userIdString = getUserId(request) .map(UserId::id) .orElseThrow(() -> new ForbiddenException("You must be authenticated or specify userOverride")); UserId userId = new UserId(userIdString); List<Tenant> tenants = controller.tenants().asList(userId); Slime slime = new Slime(); Cursor response = slime.setObject(); response.setString("user", userId.id()); Cursor tenantsArray = response.setArray("tenants"); for (Tenant tenant : tenants) tenantInTenantsListToSlime(tenant, request.getUri(), tenantsArray.addObject()); response.setBool("tenantExists", tenants.stream().anyMatch(tenant -> tenant instanceof UserTenant && ((UserTenant) tenant).is(userId.id()))); return new SlimeJsonResponse(slime); } private HttpResponse tenants(HttpRequest request) { Slime slime = new Slime(); Cursor response = slime.setArray(); for (Tenant tenant : controller.tenants().asList()) tenantInTenantsListToSlime(tenant, request.getUri(), response.addObject()); return new SlimeJsonResponse(slime); } /** Lists the screwdriver project id for each application */ private HttpResponse tenantPipelines() { Slime slime = new Slime(); Cursor response = slime.setObject(); Cursor pipelinesArray = response.setArray("tenantPipelines"); for (Application application : controller.applications().asList()) { if ( ! application.deploymentJobs().projectId().isPresent()) continue; Cursor pipelineObject = pipelinesArray.addObject(); pipelineObject.setString("screwdriverId", String.valueOf(application.deploymentJobs().projectId().getAsLong())); pipelineObject.setString("tenant", application.id().tenant().value()); pipelineObject.setString("application", application.id().application().value()); pipelineObject.setString("instance", application.id().instance().value()); } response.setArray("brokenTenantPipelines"); return new SlimeJsonResponse(slime); } private HttpResponse athenzDomains(HttpRequest request) { Slime slime = new Slime(); Cursor response = slime.setObject(); Cursor array = response.setArray("data"); for (AthenzDomain athenzDomain : controller.getDomainList(request.getProperty("prefix"))) { array.addString(athenzDomain.getName()); } return new SlimeJsonResponse(slime); } private HttpResponse properties() { Slime slime = new Slime(); Cursor response = slime.setObject(); Cursor array = response.setArray("properties"); for (Map.Entry<PropertyId, Property> entry : controller.fetchPropertyList().entrySet()) { Cursor propertyObject = array.addObject(); propertyObject.setString("propertyid", entry.getKey().id()); propertyObject.setString("property", entry.getValue().id()); } return new SlimeJsonResponse(slime); } private HttpResponse tenant(String tenantName, HttpRequest request) { return controller.tenants().tenant(TenantName.from(tenantName)) .map(tenant -> tenant(tenant, request, true)) .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist")); } private HttpResponse tenant(Tenant tenant, HttpRequest request, boolean listApplications) { Slime slime = new Slime(); toSlime(slime.setObject(), tenant, request, listApplications); return new SlimeJsonResponse(slime); } private HttpResponse applications(String tenantName, HttpRequest request) { TenantName tenant = TenantName.from(tenantName); Slime slime = new Slime(); Cursor array = slime.setArray(); for (Application application : controller.applications().asList(tenant)) toSlime(application, array.addObject(), request); return new SlimeJsonResponse(slime); } private HttpResponse application(String tenantName, String applicationName, HttpRequest request) { Slime slime = new Slime(); toSlime(slime.setObject(), getApplication(tenantName, applicationName), request); return new SlimeJsonResponse(slime); } private HttpResponse setMajorVersion(String tenantName, String applicationName, HttpRequest request) { Application application = getApplication(tenantName, applicationName); Inspector majorVersionField = toSlime(request.getData()).get().field("majorVersion"); if ( ! majorVersionField.valid()) throw new IllegalArgumentException("Request body must contain a majorVersion field"); Integer majorVersion = majorVersionField.asLong() == 0 ? null : (int)majorVersionField.asLong(); controller.applications().lockIfPresent(application.id(), a -> controller.applications().store(a.withMajorVersion(majorVersion))); return new MessageResponse("Set major version to " + ( majorVersion == null ? "empty" : majorVersion)); } private Application getApplication(String tenantName, String applicationName) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, "default"); return controller.applications().get(applicationId) .orElseThrow(() -> new NotExistsException(applicationId + " not found")); } private HttpResponse logs(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) { ApplicationId application = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = ZoneId.from(environment, region); DeploymentId deployment = new DeploymentId(application, zone); if (queryParameters.containsKey("streaming")) { InputStream logStream = controller.configServer().getLogStream(deployment, queryParameters); return new HttpResponse(200) { @Override public void render(OutputStream outputStream) throws IOException { logStream.transferTo(outputStream); } }; } Optional<Logs> response = controller.configServer().getLogs(deployment, queryParameters); Slime slime = new Slime(); Cursor object = slime.setObject(); if (response.isPresent()) { response.get().logs().entrySet().stream().forEach(entry -> object.setString(entry.getKey(), entry.getValue())); } return new SlimeJsonResponse(slime); } private HttpResponse trigger(ApplicationId id, JobType type, HttpRequest request) { String triggered = controller.applications().deploymentTrigger() .forceTrigger(id, type, request.getJDiscRequest().getUserPrincipal().getName()) .stream().map(JobType::jobName).collect(joining(", ")); return new MessageResponse(triggered.isEmpty() ? "Job " + type.jobName() + " for " + id + " not triggered" : "Triggered " + triggered + " for " + id); } private HttpResponse pause(ApplicationId id, JobType type) { Instant until = controller.clock().instant().plus(DeploymentTrigger.maxPause); controller.applications().deploymentTrigger().pauseJob(id, type, until); return new MessageResponse(type.jobName() + " for " + id + " paused for " + DeploymentTrigger.maxPause); } private void toSlime(Cursor object, Application application, HttpRequest request) { object.setString("application", application.id().application().value()); object.setString("instance", application.id().instance().value()); object.setString("deployments", withPath("/application/v4" + "/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value() + "/instance/" + application.id().instance().value() + "/job/", request.getUri()).toString()); application.deploymentJobs().statusOf(JobType.component) .flatMap(JobStatus::lastSuccess) .map(run -> run.application().source()) .ifPresent(source -> sourceRevisionToSlime(source, object.setObject("source"))); application.deploymentJobs().projectId() .ifPresent(id -> object.setLong("projectId", id)); if ( ! application.change().isEmpty()) { toSlime(object.setObject("deploying"), application.change()); } if ( ! application.outstandingChange().isEmpty()) { toSlime(object.setObject("outstandingChange"), application.outstandingChange()); } List<JobStatus> jobStatus = controller.applications().deploymentTrigger() .steps(application.deploymentSpec()) .sortedJobs(application.deploymentJobs().jobStatus().values()); object.setBool("deployedInternally", application.deploymentJobs().deployedInternally()); Cursor deploymentsArray = object.setArray("deploymentJobs"); for (JobStatus job : jobStatus) { Cursor jobObject = deploymentsArray.addObject(); jobObject.setString("type", job.type().jobName()); jobObject.setBool("success", job.isSuccess()); job.lastTriggered().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastTriggered"))); job.lastCompleted().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastCompleted"))); job.firstFailing().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("firstFailing"))); job.lastSuccess().ifPresent(jobRun -> toSlime(jobRun, jobObject.setObject("lastSuccess"))); } Cursor changeBlockers = object.setArray("changeBlockers"); application.deploymentSpec().changeBlocker().forEach(changeBlocker -> { Cursor changeBlockerObject = changeBlockers.addObject(); changeBlockerObject.setBool("versions", changeBlocker.blocksVersions()); changeBlockerObject.setBool("revisions", changeBlocker.blocksRevisions()); changeBlockerObject.setString("timeZone", changeBlocker.window().zone().getId()); Cursor days = changeBlockerObject.setArray("days"); changeBlocker.window().days().stream().map(DayOfWeek::getValue).forEach(days::addLong); Cursor hours = changeBlockerObject.setArray("hours"); changeBlocker.window().hours().forEach(hours::addLong); }); object.setString("compileVersion", controller.applications().oldestInstalledPlatform(application.id()).toFullString()); application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion)); Cursor globalRotationsArray = object.setArray("globalRotations"); application.globalDnsName(controller.system()).ifPresent(rotation -> { globalRotationsArray.addString(rotation.url().toString()); globalRotationsArray.addString(rotation.secureUrl().toString()); globalRotationsArray.addString(rotation.oathUrl().toString()); object.setString("rotationId", application.rotation().get().asString()); }); Set<RoutingPolicy> routingPolicies = controller.applications().routingPolicies(application.id()); for (RoutingPolicy policy : routingPolicies) { for (RotationName rotation : policy.rotations()) { GlobalDnsName dnsName = new GlobalDnsName(application.id(), controller.system(), rotation); globalRotationsArray.addString(dnsName.oathUrl().toString()); } } List<Deployment> deployments = controller.applications().deploymentTrigger() .steps(application.deploymentSpec()) .sortedDeployments(application.deployments().values()); Cursor instancesArray = object.setArray("instances"); for (Deployment deployment : deployments) { Cursor deploymentObject = instancesArray.addObject(); deploymentObject.setString("environment", deployment.zone().environment().value()); deploymentObject.setString("region", deployment.zone().region().value()); deploymentObject.setString("instance", application.id().instance().value()); if (application.rotation().isPresent() && deployment.zone().environment() == Environment.prod) { toSlime(application.rotationStatus(deployment), deploymentObject); } if (recurseOverDeployments(request)) toSlime(deploymentObject, new DeploymentId(application.id(), deployment.zone()), deployment, request); else deploymentObject.setString("url", withPath(request.getUri().getPath() + "/environment/" + deployment.zone().environment().value() + "/region/" + deployment.zone().region().value() + "/instance/" + application.id().instance().value(), request.getUri()).toString()); } Cursor metricsObject = object.setObject("metrics"); metricsObject.setDouble("queryServiceQuality", application.metrics().queryServiceQuality()); metricsObject.setDouble("writeServiceQuality", application.metrics().writeServiceQuality()); Cursor activity = object.setObject("activity"); application.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); application.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); application.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value())); application.owner().ifPresent(owner -> object.setString("owner", owner.username())); application.deploymentJobs().issueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value())); } private HttpResponse deployment(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName); Application application = controller.applications().get(id) .orElseThrow(() -> new NotExistsException(id + " not found")); DeploymentId deploymentId = new DeploymentId(application.id(), ZoneId.from(environment, region)); Deployment deployment = application.deployments().get(deploymentId.zoneId()); if (deployment == null) throw new NotExistsException(application + " is not deployed in " + deploymentId.zoneId()); Slime slime = new Slime(); toSlime(slime.setObject(), deploymentId, deployment, request); return new SlimeJsonResponse(slime); } private void toSlime(Cursor object, Change change) { change.platform().ifPresent(version -> object.setString("version", version.toString())); change.application() .filter(version -> !version.isUnknown()) .ifPresent(version -> toSlime(version, object.setObject("revision"))); } private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) { Cursor serviceUrlArray = response.setArray("serviceUrls"); controller.applications().getDeploymentEndpoints(deploymentId) .ifPresent(endpoints -> endpoints.forEach(endpoint -> serviceUrlArray.addString(endpoint.toString()))); response.setString("nodes", withPath("/zone/v2/" + deploymentId.zoneId().environment() + "/" + deploymentId.zoneId().region() + "/nodes/v2/node/?&recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString()); controller.zoneRegistry().getLogServerUri(deploymentId) .ifPresent(elkUrl -> response.setString("elkUrl", elkUrl.toString())); response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString()); response.setString("version", deployment.version().toFullString()); response.setString("revision", deployment.applicationVersion().id()); response.setLong("deployTimeEpochMs", deployment.at().toEpochMilli()); controller.zoneRegistry().getDeploymentTimeToLive(deploymentId.zoneId()) .ifPresent(deploymentTimeToLive -> response.setLong("expiryTimeEpochMs", deployment.at().plus(deploymentTimeToLive).toEpochMilli())); controller.applications().require(deploymentId.applicationId()).deploymentJobs().projectId() .ifPresent(i -> response.setString("screwdriverId", String.valueOf(i))); sourceRevisionToSlime(deployment.applicationVersion().source(), response); Cursor activity = response.setObject("activity"); deployment.activity().lastQueried().ifPresent(instant -> activity.setLong("lastQueried", instant.toEpochMilli())); deployment.activity().lastWritten().ifPresent(instant -> activity.setLong("lastWritten", instant.toEpochMilli())); deployment.activity().lastQueriesPerSecond().ifPresent(value -> activity.setDouble("lastQueriesPerSecond", value)); deployment.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value)); DeploymentCost appCost = deployment.calculateCost(); Cursor costObject = response.setObject("cost"); toSlime(appCost, costObject); DeploymentMetrics metrics = deployment.metrics(); Cursor metricsObject = response.setObject("metrics"); metricsObject.setDouble("queriesPerSecond", metrics.queriesPerSecond()); metricsObject.setDouble("writesPerSecond", metrics.writesPerSecond()); metricsObject.setDouble("documentCount", metrics.documentCount()); metricsObject.setDouble("queryLatencyMillis", metrics.queryLatencyMillis()); metricsObject.setDouble("writeLatencyMillis", metrics.writeLatencyMillis()); metrics.instant().ifPresent(instant -> metricsObject.setLong("lastUpdated", instant.toEpochMilli())); } private void toSlime(ApplicationVersion applicationVersion, Cursor object) { if (!applicationVersion.isUnknown()) { object.setString("hash", applicationVersion.id()); sourceRevisionToSlime(applicationVersion.source(), object.setObject("source")); } } private void sourceRevisionToSlime(Optional<SourceRevision> revision, Cursor object) { if ( ! revision.isPresent()) return; object.setString("gitRepository", revision.get().repository()); object.setString("gitBranch", revision.get().branch()); object.setString("gitCommit", revision.get().commit()); } private void toSlime(RotationStatus status, Cursor object) { Cursor bcpStatus = object.setObject("bcpStatus"); bcpStatus.setString("rotationStatus", status.name().toUpperCase()); } private URI monitoringSystemUri(DeploymentId deploymentId) { return controller.zoneRegistry().getMonitoringSystemUri(deploymentId); } private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) { Application application = controller.applications().require(ApplicationId.from(tenantName, applicationName, instanceName)); ZoneId zone = ZoneId.from(environment, region); Deployment deployment = application.deployments().get(zone); if (deployment == null) { throw new NotExistsException(application + " has no deployment in " + zone); } Inspector requestData = toSlime(request.getData()).get(); String reason = mandatory("reason", requestData).asString(); String agent = getUserPrincipal(request).getIdentity().getFullName(); long timestamp = controller.clock().instant().getEpochSecond(); EndpointStatus.Status status = inService ? EndpointStatus.Status.in : EndpointStatus.Status.out; EndpointStatus endpointStatus = new EndpointStatus(status, reason, agent, timestamp); controller.applications().setGlobalRotationStatus(new DeploymentId(application.id(), deployment.zone()), endpointStatus); return new MessageResponse(String.format("Successfully set %s in %s.%s %s service", application.id().toShortString(), deployment.zone().environment().value(), deployment.zone().region().value(), inService ? "in" : "out of")); } private HttpResponse getGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), ZoneId.from(environment, region)); Slime slime = new Slime(); Cursor array = slime.setObject().setArray("globalrotationoverride"); Map<RoutingEndpoint, EndpointStatus> status = controller.applications().globalRotationStatus(deploymentId); for (RoutingEndpoint endpoint : status.keySet()) { EndpointStatus currentStatus = status.get(endpoint); array.addString(endpoint.upstreamName()); Cursor statusObject = array.addObject(); statusObject.setString("status", currentStatus.getStatus().name()); statusObject.setString("reason", currentStatus.getReason() == null ? "" : currentStatus.getReason()); statusObject.setString("agent", currentStatus.getAgent() == null ? "" : currentStatus.getAgent()); statusObject.setLong("timestamp", currentStatus.getEpoch()); } return new SlimeJsonResponse(slime); } private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); Application application = controller.applications().require(applicationId); ZoneId zone = ZoneId.from(environment, region); if (!application.rotation().isPresent()) { throw new NotExistsException("global rotation does not exist for " + application); } Deployment deployment = application.deployments().get(zone); if (deployment == null) { throw new NotExistsException(application + " has no deployment in " + zone); } Slime slime = new Slime(); Cursor response = slime.setObject(); toSlime(application.rotationStatus(deployment), response); return new SlimeJsonResponse(slime); } private HttpResponse deploying(String tenant, String application, HttpRequest request) { Application app = controller.applications().require(ApplicationId.from(tenant, application, "default")); Slime slime = new Slime(); Cursor root = slime.setObject(); if (!app.change().isEmpty()) { app.change().platform().ifPresent(version -> root.setString("platform", version.toString())); app.change().application().ifPresent(applicationVersion -> root.setString("application", applicationVersion.id())); root.setBool("pinned", app.change().isPinned()); } return new SlimeJsonResponse(slime); } private HttpResponse suspended(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), ZoneId.from(environment, region)); boolean suspended = controller.applications().isSuspended(deploymentId); Slime slime = new Slime(); Cursor response = slime.setObject(); response.setBool("suspended", suspended); return new SlimeJsonResponse(slime); } private HttpResponse services(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationView applicationView = controller.getApplicationView(tenantName, applicationName, instanceName, environment, region); ServiceApiResponse response = new ServiceApiResponse(ZoneId.from(environment, region), new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(), controller.zoneRegistry().getConfigServerApiUris(ZoneId.from(environment, region)), request.getUri()); response.setResponse(applicationView); return response; } private HttpResponse service(String tenantName, String applicationName, String instanceName, String environment, String region, String serviceName, String restPath, HttpRequest request) { Map<?,?> result = controller.getServiceApiResponse(tenantName, applicationName, instanceName, environment, region, serviceName, restPath); ServiceApiResponse response = new ServiceApiResponse(ZoneId.from(environment, region), new ApplicationId.Builder().tenant(tenantName).applicationName(applicationName).instanceName(instanceName).build(), controller.zoneRegistry().getConfigServerApiUris(ZoneId.from(environment, region)), request.getUri()); response.setResponse(result, serviceName, restPath); return response; } private HttpResponse createUser(HttpRequest request) { Optional<UserId> user = getUserId(request); if ( ! user.isPresent() ) throw new ForbiddenException("Not authenticated or not an user."); String username = UserTenant.normalizeUser(user.get().id()); try { controller.tenants().create(UserTenant.create(username)); return new MessageResponse("Created user '" + username + "'"); } catch (AlreadyExistsException e) { return new MessageResponse("User '" + username + "' already exists"); } } private HttpResponse updateTenant(String tenantName, HttpRequest request) { Optional<AthenzTenant> tenant = controller.tenants().athenzTenant(TenantName.from(tenantName)); if ( ! tenant.isPresent()) return ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist"); Inspector requestData = toSlime(request.getData()).get(); OktaAccessToken token = requireOktaAccessToken(request, "Could not update " + tenantName); controller.tenants().lockOrThrow(tenant.get().name(), lockedTenant -> { lockedTenant = lockedTenant.with(new Property(mandatory("property", requestData).asString())); lockedTenant = controller.tenants().withDomain( lockedTenant, new AthenzDomain(mandatory("athensDomain", requestData).asString()), token ); Optional<PropertyId> propertyId = optional("propertyId", requestData).map(PropertyId::new); if (propertyId.isPresent()) { lockedTenant = lockedTenant.with(propertyId.get()); } controller.tenants().store(lockedTenant); }); return tenant(controller.tenants().requireAthenzTenant(tenant.get().name()), request, true); } private HttpResponse createTenant(String tenantName, HttpRequest request) { Inspector requestData = toSlime(request.getData()).get(); AthenzTenant tenant = AthenzTenant.create(TenantName.from(tenantName), new AthenzDomain(mandatory("athensDomain", requestData).asString()), new Property(mandatory("property", requestData).asString()), optional("propertyId", requestData).map(PropertyId::new)); throwIfNotAthenzDomainAdmin(tenant.domain(), request); controller.tenants().create(tenant, requireOktaAccessToken(request, "Could not create " + tenantName)); return tenant(tenant, request, true); } private HttpResponse createApplication(String tenantName, String applicationName, HttpRequest request) { Application application; try { application = controller.applications().createApplication(ApplicationId.from(tenantName, applicationName, "default"), getOktaAccessToken(request)); } catch (ZmsClientException e) { if (e.getErrorCode() == com.yahoo.jdisc.Response.Status.FORBIDDEN) throw new ForbiddenException("Not authorized to create application", e); else throw e; } Slime slime = new Slime(); toSlime(application, slime.setObject(), request); return new SlimeJsonResponse(slime); } /** Trigger deployment of the given Vespa version if a valid one is given, e.g., "7.8.9". */ private HttpResponse deployPlatform(String tenantName, String applicationName, boolean pin, HttpRequest request) { request = controller.auditLogger().log(request); String versionString = readToString(request.getData()); ApplicationId id = ApplicationId.from(tenantName, applicationName, "default"); StringBuilder response = new StringBuilder(); controller.applications().lockOrThrow(id, application -> { Version version = Version.fromString(versionString); if (version.equals(Version.emptyVersion)) version = controller.systemVersion(); if ( ! systemHasVersion(version)) throw new IllegalArgumentException("Cannot trigger deployment of version '" + version + "': " + "Version is not active in this system. " + "Active versions: " + controller.versionStatus().versions() .stream() .map(VespaVersion::versionNumber) .map(Version::toString) .collect(joining(", "))); Change change = Change.of(version); if (pin) change = change.withPin(); controller.applications().deploymentTrigger().forceChange(id, change); response.append("Triggered " + change + " for " + id); }); return new MessageResponse(response.toString()); } /** Trigger deployment to the last known application package for the given application. */ private HttpResponse deployApplication(String tenantName, String applicationName, HttpRequest request) { controller.auditLogger().log(request); ApplicationId id = ApplicationId.from(tenantName, applicationName, "default"); StringBuilder response = new StringBuilder(); controller.applications().lockOrThrow(id, application -> { Change change = Change.of(application.get().deploymentJobs().statusOf(JobType.component).get().lastSuccess().get().application()); controller.applications().deploymentTrigger().forceChange(id, change); response.append("Triggered " + change + " for " + id); }); return new MessageResponse(response.toString()); } /** Cancel ongoing change for given application, e.g., everything with {"cancel":"all"} */ private HttpResponse cancelDeploy(String tenantName, String applicationName, String choice) { ApplicationId id = ApplicationId.from(tenantName, applicationName, "default"); StringBuilder response = new StringBuilder(); controller.applications().lockOrThrow(id, application -> { Change change = application.get().change(); if (change.isEmpty()) { response.append("No deployment in progress for " + application + " at this time"); return; } ChangesToCancel cancel = ChangesToCancel.valueOf(choice.toUpperCase()); controller.applications().deploymentTrigger().cancelChange(id, cancel); response.append("Changed deployment from '" + change + "' to '" + controller.applications().require(id).change() + "' for " + application); }); return new MessageResponse(response.toString()); } /** Schedule restart of deployment, or specific host in a deployment */ private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), ZoneId.from(environment, region)); Optional<Hostname> hostname = Optional.ofNullable(request.getProperty("hostname")).map(Hostname::new); controller.applications().restart(deploymentId, hostname); return new StringResponse("Requested restart of " + path(TenantResource.API_PATH, tenantName, ApplicationResource.API_PATH, applicationName, EnvironmentResource.API_PATH, environment, "region", region, "instance", instanceName)); } private HttpResponse deploy(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName); ZoneId zone = ZoneId.from(environment, region); Map<String, byte[]> dataParts = new MultipartParser().parse(request); if ( ! dataParts.containsKey("deployOptions")) return ErrorResponse.badRequest("Missing required form part 'deployOptions'"); Inspector deployOptions = SlimeUtils.jsonToSlime(dataParts.get("deployOptions")).get(); /* * Special handling of the zone application (the only system application with an application package) * Setting any other deployOptions here is not supported for now (e.g. specifying version), but * this might be handy later to handle emergency downgrades. */ boolean isZoneApplication = SystemApplication.zone.id().equals(applicationId); if (isZoneApplication) { String versionStr = deployOptions.field("vespaVersion").asString(); boolean versionPresent = !versionStr.isEmpty() && !versionStr.equals("null"); if (versionPresent) { throw new RuntimeException("Version not supported for system applications"); } if (controller.versionStatus().isUpgrading()) { throw new IllegalArgumentException("Deployment of system applications during a system upgrade is not allowed"); } Optional<VespaVersion> systemVersion = controller.versionStatus().systemVersion(); if (systemVersion.isEmpty()) { throw new IllegalArgumentException("Deployment of system applications is not permitted until system version is determined"); } ActivateResult result = controller.applications() .deploySystemApplicationPackage(SystemApplication.zone, zone, systemVersion.get().versionNumber()); return new SlimeJsonResponse(toSlime(result)); } /* * Normal applications from here */ Optional<ApplicationPackage> applicationPackage = Optional.ofNullable(dataParts.get("applicationZip")) .map(ApplicationPackage::new); Inspector sourceRevision = deployOptions.field("sourceRevision"); Inspector buildNumber = deployOptions.field("buildNumber"); if (sourceRevision.valid() != buildNumber.valid()) throw new IllegalArgumentException("Source revision and build number must both be provided, or not"); Optional<ApplicationVersion> applicationVersion = Optional.empty(); if (sourceRevision.valid()) { if (applicationPackage.isPresent()) throw new IllegalArgumentException("Application version and application package can't both be provided."); applicationVersion = Optional.of(ApplicationVersion.from(toSourceRevision(sourceRevision), buildNumber.asLong())); applicationPackage = Optional.of(controller.applications().getApplicationPackage(controller.applications().require(applicationId), applicationVersion.get())); } boolean deployDirectly = deployOptions.field("deployDirectly").asBool(); Optional<Version> vespaVersion = optional("vespaVersion", deployOptions).map(Version::new); /* * Deploy direct is when we want to redeploy the current application - retrieve version * info from the application package before deploying */ if(deployDirectly && !applicationPackage.isPresent() && !applicationVersion.isPresent() && !vespaVersion.isPresent()) { Optional<Deployment> deployment = controller.applications().get(applicationId) .map(Application::deployments) .flatMap(deployments -> Optional.ofNullable(deployments.get(zone))); if(!deployment.isPresent()) throw new IllegalArgumentException("Can't redeploy application, no deployment currently exist"); ApplicationVersion version = deployment.get().applicationVersion(); if(version.isUnknown()) throw new IllegalArgumentException("Can't redeploy application, application version is unknown"); applicationVersion = Optional.of(version); vespaVersion = Optional.of(deployment.get().version()); applicationPackage = Optional.of(controller.applications().getApplicationPackage(controller.applications().require(applicationId), applicationVersion.get())); } DeployOptions deployOptionsJsonClass = new DeployOptions(deployDirectly, vespaVersion, deployOptions.field("ignoreValidationErrors").asBool(), deployOptions.field("deployCurrentVersion").asBool()); ActivateResult result = controller.applications().deploy(applicationId, zone, applicationPackage, applicationVersion, deployOptionsJsonClass, Optional.of(getUserPrincipal(request).getIdentity())); return new SlimeJsonResponse(toSlime(result)); } private HttpResponse deleteTenant(String tenantName, HttpRequest request) { Optional<Tenant> tenant = controller.tenants().tenant(tenantName); if ( ! tenant.isPresent()) return ErrorResponse.notFoundError("Could not delete tenant '" + tenantName + "': Tenant not found"); if (tenant.get() instanceof AthenzTenant) { controller.tenants().deleteTenant((AthenzTenant) tenant.get(), requireOktaAccessToken(request, "Could not delete " + tenantName)); } else if (tenant.get() instanceof UserTenant) { controller.tenants().deleteTenant((UserTenant) tenant.get()); } else { throw new IllegalArgumentException("Unknown tenant type:" + tenant.get().getClass().getSimpleName() + ", for " + tenant.get()); } return tenant(tenant.get(), request, false); } private HttpResponse deleteApplication(String tenantName, String applicationName, HttpRequest request) { ApplicationId id = ApplicationId.from(tenantName, applicationName, "default"); controller.applications().deleteApplication(id, getOktaAccessToken(request)); return new EmptyJsonResponse(); } private HttpResponse deactivate(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) { Application application = controller.applications().require(ApplicationId.from(tenantName, applicationName, instanceName)); controller.applications().deactivate(application.id(), ZoneId.from(environment, region)); return new StringResponse("Deactivated " + path(TenantResource.API_PATH, tenantName, ApplicationResource.API_PATH, applicationName, EnvironmentResource.API_PATH, environment, "region", region, "instance", instanceName)); } /** * Promote application Chef environments. To be used by component jobs only */ private HttpResponse promoteApplication(String tenantName, String applicationName, HttpRequest request) { try{ ApplicationChefEnvironment chefEnvironment = new ApplicationChefEnvironment(controller.system()); String sourceEnvironment = chefEnvironment.systemChefEnvironment(); String targetEnvironment = chefEnvironment.applicationSourceEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName)); controller.chefClient().copyChefEnvironment(sourceEnvironment, targetEnvironment); return new MessageResponse(String.format("Successfully copied environment %s to %s", sourceEnvironment, targetEnvironment)); } catch (Exception e) { log.log(LogLevel.ERROR, String.format("Error during Chef copy environment. (%s.%s)", tenantName, applicationName), e); return ErrorResponse.internalServerError("Unable to promote Chef environments for application"); } } /** * Promote application Chef environments for jobs that deploy applications */ private HttpResponse promoteApplicationDeployment(String tenantName, String applicationName, String environmentName, String regionName, String instanceName, HttpRequest request) { try { ApplicationChefEnvironment chefEnvironment = new ApplicationChefEnvironment(controller.system()); String sourceEnvironment = chefEnvironment.applicationSourceEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName)); String targetEnvironment = chefEnvironment.applicationTargetEnvironment(TenantName.from(tenantName), ApplicationName.from(applicationName), Environment.from(environmentName), RegionName.from(regionName)); controller.chefClient().copyChefEnvironment(sourceEnvironment, targetEnvironment); return new MessageResponse(String.format("Successfully copied environment %s to %s", sourceEnvironment, targetEnvironment)); } catch (Exception e) { log.log(LogLevel.ERROR, String.format("Error during Chef copy environment. (%s.%s %s.%s)", tenantName, applicationName, environmentName, regionName), e); return ErrorResponse.internalServerError("Unable to promote Chef environments for application"); } } private HttpResponse notifyJobCompletion(String tenant, String application, HttpRequest request) { try { DeploymentJobs.JobReport report = toJobReport(tenant, application, toSlime(request.getData()).get()); if ( report.jobType() == JobType.component && controller.applications().require(report.applicationId()).deploymentJobs().deployedInternally()) throw new IllegalArgumentException(report.applicationId() + " is set up to be deployed from internally, and no " + "longer accepts submissions from Screwdriver v3 jobs. If you need to revert " + "to the old pipeline, please file a ticket at yo/vespa-support and request this."); controller.applications().deploymentTrigger().notifyOfCompletion(report); return new MessageResponse("ok"); } catch (IllegalStateException e) { return ErrorResponse.badRequest(Exceptions.toMessageString(e)); } } private static DeploymentJobs.JobReport toJobReport(String tenantName, String applicationName, Inspector report) { Optional<DeploymentJobs.JobError> jobError = Optional.empty(); if (report.field("jobError").valid()) { jobError = Optional.of(DeploymentJobs.JobError.valueOf(report.field("jobError").asString())); } ApplicationId id = ApplicationId.from(tenantName, applicationName, report.field("instance").asString()); JobType type = JobType.fromJobName(report.field("jobName").asString()); long buildNumber = report.field("buildNumber").asLong(); if (type == JobType.component) return DeploymentJobs.JobReport.ofComponent(id, report.field("projectId").asLong(), buildNumber, jobError, toSourceRevision(report.field("sourceRevision"))); else return DeploymentJobs.JobReport.ofJob(id, type, buildNumber, jobError); } private static SourceRevision toSourceRevision(Inspector object) { if (!object.field("repository").valid() || !object.field("branch").valid() || !object.field("commit").valid()) { throw new IllegalArgumentException("Must specify \"repository\", \"branch\", and \"commit\"."); } return new SourceRevision(object.field("repository").asString(), object.field("branch").asString(), object.field("commit").asString()); } private Tenant getTenantOrThrow(String tenantName) { return controller.tenants().tenant(tenantName) .orElseThrow(() -> new NotExistsException(new TenantId(tenantName))); } private void toSlime(Cursor object, Tenant tenant, HttpRequest request, boolean listApplications) { object.setString("tenant", tenant.name().value()); object.setString("type", tentantType(tenant)); if (tenant instanceof AthenzTenant) { AthenzTenant athenzTenant = (AthenzTenant) tenant; object.setString("athensDomain", athenzTenant.domain().getName()); object.setString("property", athenzTenant.property().id()); athenzTenant.propertyId().ifPresent(id -> object.setString("propertyId", id.toString())); } Cursor applicationArray = object.setArray("applications"); if (listApplications) { for (Application application : controller.applications().asList(tenant.name())) { if (application.id().instance().isDefault()) { if (recurseOverApplications(request)) toSlime(applicationArray.addObject(), application, request); else toSlime(application, applicationArray.addObject(), request); } } } if (tenant instanceof AthenzTenant) { AthenzTenant athenzTenant = (AthenzTenant) tenant; athenzTenant.contact().ifPresent(c -> { object.setString("propertyUrl", c.propertyUrl().toString()); object.setString("contactsUrl", c.url().toString()); object.setString("issueCreationUrl", c.issueTrackerUrl().toString()); Cursor contactsArray = object.setArray("contacts"); c.persons().forEach(persons -> { Cursor personArray = contactsArray.addArray(); persons.forEach(personArray::addString); }); }); } } private void tenantInTenantsListToSlime(Tenant tenant, URI requestURI, Cursor object) { object.setString("tenant", tenant.name().value()); Cursor metaData = object.setObject("metaData"); metaData.setString("type", tentantType(tenant)); if (tenant instanceof AthenzTenant) { AthenzTenant athenzTenant = (AthenzTenant) tenant; metaData.setString("athensDomain", athenzTenant.domain().getName()); metaData.setString("property", athenzTenant.property().id()); } object.setString("url", withPath("/application/v4/tenant/" + tenant.name().value(), requestURI).toString()); } /** Returns a copy of the given URI with the host and port from the given URI and the path set to the given path */ private URI withPath(String newPath, URI uri) { try { return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), newPath, null, null); } catch (URISyntaxException e) { throw new RuntimeException("Will not happen", e); } } private long asLong(String valueOrNull, long defaultWhenNull) { if (valueOrNull == null) return defaultWhenNull; try { return Long.parseLong(valueOrNull); } catch (NumberFormatException e) { throw new IllegalArgumentException("Expected an integer but got '" + valueOrNull + "'"); } } private void toSlime(JobStatus.JobRun jobRun, Cursor object) { object.setLong("id", jobRun.id()); object.setString("version", jobRun.platform().toFullString()); if (!jobRun.application().isUnknown()) toSlime(jobRun.application(), object.setObject("revision")); object.setString("reason", jobRun.reason()); object.setLong("at", jobRun.at().toEpochMilli()); } private Slime toSlime(InputStream jsonStream) { try { byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000); return SlimeUtils.jsonToSlime(jsonBytes); } catch (IOException e) { throw new RuntimeException(); } } private void throwIfNotAthenzDomainAdmin(AthenzDomain tenantDomain, HttpRequest request) { AthenzIdentity identity = getUserPrincipal(request).getIdentity(); boolean isDomainAdmin = zmsClient.isDomainAdmin(identity, tenantDomain); if ( ! isDomainAdmin) { throw new ForbiddenException( String.format("The user '%s' is not admin in Athenz domain '%s'", identity.getFullName(), tenantDomain.getName())); } } private static Optional<UserId> getUserId(HttpRequest request) { return Optional.of(getUserPrincipal(request)) .map(AthenzPrincipal::getIdentity) .filter(AthenzUser.class::isInstance) .map(AthenzUser.class::cast) .map(AthenzUser::getName) .map(UserId::new); } private static AthenzPrincipal getUserPrincipal(HttpRequest request) { Principal principal = request.getJDiscRequest().getUserPrincipal(); if (principal == null) throw new InternalServerErrorException("Expected a user principal"); if (!(principal instanceof AthenzPrincipal)) throw new InternalServerErrorException( String.format("Expected principal of type %s, got %s", AthenzPrincipal.class.getSimpleName(), principal.getClass().getName())); return (AthenzPrincipal) principal; } private Inspector mandatory(String key, Inspector object) { if ( ! object.field(key).valid()) throw new IllegalArgumentException("'" + key + "' is missing"); return object.field(key); } private Optional<String> optional(String key, Inspector object) { return SlimeUtils.optionalString(object.field(key)); } private static String path(Object... elements) { return Joiner.on("/").join(elements); } private void toSlime(Application application, Cursor object, HttpRequest request) { object.setString("application", application.id().application().value()); object.setString("instance", application.id().instance().value()); object.setString("url", withPath("/application/v4/tenant/" + application.id().tenant().value() + "/application/" + application.id().application().value(), request.getUri()).toString()); } private Slime toSlime(ActivateResult result) { Slime slime = new Slime(); Cursor object = slime.setObject(); object.setString("revisionId", result.revisionId().id()); object.setLong("applicationZipSize", result.applicationZipSizeBytes()); Cursor logArray = object.setArray("prepareMessages"); if (result.prepareResponse().log != null) { for (Log logMessage : result.prepareResponse().log) { Cursor logObject = logArray.addObject(); logObject.setLong("time", logMessage.time); logObject.setString("level", logMessage.level); logObject.setString("message", logMessage.message); } } Cursor changeObject = object.setObject("configChangeActions"); Cursor restartActionsArray = changeObject.setArray("restart"); for (RestartAction restartAction : result.prepareResponse().configChangeActions.restartActions) { Cursor restartActionObject = restartActionsArray.addObject(); restartActionObject.setString("clusterName", restartAction.clusterName); restartActionObject.setString("clusterType", restartAction.clusterType); restartActionObject.setString("serviceType", restartAction.serviceType); serviceInfosToSlime(restartAction.services, restartActionObject.setArray("services")); stringsToSlime(restartAction.messages, restartActionObject.setArray("messages")); } Cursor refeedActionsArray = changeObject.setArray("refeed"); for (RefeedAction refeedAction : result.prepareResponse().configChangeActions.refeedActions) { Cursor refeedActionObject = refeedActionsArray.addObject(); refeedActionObject.setString("name", refeedAction.name); refeedActionObject.setBool("allowed", refeedAction.allowed); refeedActionObject.setString("documentType", refeedAction.documentType); refeedActionObject.setString("clusterName", refeedAction.clusterName); serviceInfosToSlime(refeedAction.services, refeedActionObject.setArray("services")); stringsToSlime(refeedAction.messages, refeedActionObject.setArray("messages")); } return slime; } private void serviceInfosToSlime(List<ServiceInfo> serviceInfoList, Cursor array) { for (ServiceInfo serviceInfo : serviceInfoList) { Cursor serviceInfoObject = array.addObject(); serviceInfoObject.setString("serviceName", serviceInfo.serviceName); serviceInfoObject.setString("serviceType", serviceInfo.serviceType); serviceInfoObject.setString("configId", serviceInfo.configId); serviceInfoObject.setString("hostName", serviceInfo.hostName); } } private void stringsToSlime(List<String> strings, Cursor array) { for (String string : strings) array.addString(string); } private String readToString(InputStream stream) { Scanner scanner = new Scanner(stream).useDelimiter("\\A"); if ( ! scanner.hasNext()) return null; return scanner.next(); } private boolean systemHasVersion(Version version) { return controller.versionStatus().versions().stream().anyMatch(v -> v.versionNumber().equals(version)); } public static void toSlime(DeploymentCost deploymentCost, Cursor object) { object.setLong("tco", (long)deploymentCost.getTco()); object.setLong("waste", (long)deploymentCost.getWaste()); object.setDouble("utilization", deploymentCost.getUtilization()); Cursor clustersObject = object.setObject("cluster"); for (Map.Entry<String, ClusterCost> clusterEntry : deploymentCost.getCluster().entrySet()) toSlime(clusterEntry.getValue(), clustersObject.setObject(clusterEntry.getKey())); } private static void toSlime(ClusterCost clusterCost, Cursor object) { object.setLong("count", clusterCost.getClusterInfo().getHostnames().size()); object.setString("resource", getResourceName(clusterCost.getResultUtilization())); object.setDouble("utilization", clusterCost.getResultUtilization().getMaxUtilization()); object.setLong("tco", (int)clusterCost.getTco()); object.setLong("waste", (int)clusterCost.getWaste()); object.setString("flavor", clusterCost.getClusterInfo().getFlavor()); object.setDouble("flavorCost", clusterCost.getClusterInfo().getFlavorCost()); object.setDouble("flavorCpu", clusterCost.getClusterInfo().getFlavorCPU()); object.setDouble("flavorMem", clusterCost.getClusterInfo().getFlavorMem()); object.setDouble("flavorDisk", clusterCost.getClusterInfo().getFlavorDisk()); object.setString("type", clusterCost.getClusterInfo().getClusterType().name()); Cursor utilObject = object.setObject("util"); utilObject.setDouble("cpu", clusterCost.getResultUtilization().getCpu()); utilObject.setDouble("mem", clusterCost.getResultUtilization().getMemory()); utilObject.setDouble("disk", clusterCost.getResultUtilization().getDisk()); utilObject.setDouble("diskBusy", clusterCost.getResultUtilization().getDiskBusy()); Cursor usageObject = object.setObject("usage"); usageObject.setDouble("cpu", clusterCost.getSystemUtilization().getCpu()); usageObject.setDouble("mem", clusterCost.getSystemUtilization().getMemory()); usageObject.setDouble("disk", clusterCost.getSystemUtilization().getDisk()); usageObject.setDouble("diskBusy", clusterCost.getSystemUtilization().getDiskBusy()); Cursor hostnamesArray = object.setArray("hostnames"); for (String hostname : clusterCost.getClusterInfo().getHostnames()) hostnamesArray.addString(hostname); } private static String getResourceName(ClusterUtilization utilization) { String name = "cpu"; double max = utilization.getMaxUtilization(); if (utilization.getMemory() == max) { name = "mem"; } else if (utilization.getDisk() == max) { name = "disk"; } else if (utilization.getDiskBusy() == max) { name = "diskbusy"; } return name; } private static boolean recurseOverTenants(HttpRequest request) { return recurseOverApplications(request) || "tenant".equals(request.getProperty("recursive")); } private static boolean recurseOverApplications(HttpRequest request) { return recurseOverDeployments(request) || "application".equals(request.getProperty("recursive")); } private static boolean recurseOverDeployments(HttpRequest request) { return ImmutableSet.of("all", "true", "deployment").contains(request.getProperty("recursive")); } private static String tentantType(Tenant tenant) { if (tenant instanceof AthenzTenant) { return "ATHENS"; } else if (tenant instanceof UserTenant) { return "USER"; } throw new IllegalArgumentException("Unknown tenant type: " + tenant.getClass().getSimpleName()); } private static OktaAccessToken requireOktaAccessToken(HttpRequest request, String message) { return getOktaAccessToken(request) .orElseThrow(() -> new IllegalArgumentException(message + ": No Okta Access Token provided")); } private static Optional<OktaAccessToken> getOktaAccessToken(HttpRequest request) { return Optional.ofNullable(request.getJDiscRequest().context().get("okta.access-token")) .map(attribute -> new OktaAccessToken((String) attribute)); } private static ApplicationId appIdFromPath(Path path) { return ApplicationId.from(path.get("tenant"), path.get("application"), path.get("instance")); } private static JobType jobTypeFromPath(Path path) { return JobType.fromJobName(path.get("jobtype")); } private static RunId runIdFromPath(Path path) { long number = Long.parseLong(path.get("number")); return new RunId(appIdFromPath(path), jobTypeFromPath(path), number); } private HttpResponse submit(String tenant, String application, HttpRequest request) { Map<String, byte[]> dataParts = new MultipartParser().parse(request); Inspector submitOptions = SlimeUtils.jsonToSlime(dataParts.get(EnvironmentResource.SUBMIT_OPTIONS)).get(); SourceRevision sourceRevision = toSourceRevision(submitOptions); String authorEmail = submitOptions.field("authorEmail").asString(); long projectId = Math.max(1, submitOptions.field("projectId").asLong()); ApplicationPackage applicationPackage = new ApplicationPackage(dataParts.get(EnvironmentResource.APPLICATION_ZIP)); controller.applications().verifyApplicationIdentityConfiguration(TenantName.from(tenant), applicationPackage, Optional.of(getUserPrincipal(request).getIdentity())); return JobControllerApiHandlerHelper.submitResponse(controller.jobController(), tenant, application, sourceRevision, authorEmail, projectId, applicationPackage, dataParts.get(EnvironmentResource.APPLICATION_TEST_ZIP)); } }
I don't think this is valid, it needs to be "BEGIN CERTIFICATE" and 5 dashes, not 3. :-) Let's make sure we have some basic verification of the PEM file on upload so we can return a proper error message at that time, and not just fail on submit later.
public void testSerialization() { DeploymentSpec deploymentSpec = DeploymentSpec.fromXml("<deployment version='1.0'>" + " <staging/>" + "</deployment>"); ValidationOverrides validationOverrides = ValidationOverrides.fromXml("<validation-overrides version='1.0'>" + " <allow until='2017-06-15'>deployment-removal</allow>" + "</validation-overrides>"); List<Deployment> deployments = new ArrayList<>(); ApplicationVersion applicationVersion1 = ApplicationVersion.from(new SourceRevision("repo1", "branch1", "commit1"), 31); ApplicationVersion applicationVersion2 = ApplicationVersion .from(new SourceRevision("repo1", "branch1", "commit1"), 32, "a@b", Version.fromString("6.3.1"), Instant.ofEpochMilli(496)); Instant activityAt = Instant.parse("2018-06-01T10:15:30.00Z"); deployments.add(new Deployment(zone1, applicationVersion1, Version.fromString("1.2.3"), Instant.ofEpochMilli(3))); deployments.add(new Deployment(zone2, applicationVersion2, Version.fromString("1.2.3"), Instant.ofEpochMilli(5), createClusterUtils(3, 0.2), createClusterInfo(3, 4), new DeploymentMetrics(2, 3, 4, 5, 6, Optional.of(Instant.now().truncatedTo(ChronoUnit.MILLIS))), DeploymentActivity.create(Optional.of(activityAt), Optional.of(activityAt), OptionalDouble.of(200), OptionalDouble.of(10)))); OptionalLong projectId = OptionalLong.of(123L); List<JobStatus> statusList = new ArrayList<>(); statusList.add(JobStatus.initial(JobType.systemTest) .withTriggering(Version.fromString("5.6.7"), ApplicationVersion.unknown, empty(), "Test", Instant.ofEpochMilli(7)) .withCompletion(30, empty(), Instant.ofEpochMilli(8)) .withPause(OptionalLong.of(1L << 32))); statusList.add(JobStatus.initial(JobType.stagingTest) .withTriggering(Version.fromString("5.6.6"), ApplicationVersion.unknown, empty(), "Test 2", Instant.ofEpochMilli(5)) .withCompletion(11, Optional.of(JobError.unknown), Instant.ofEpochMilli(6))); statusList.add(JobStatus.initial(JobType.from(main, zone1).get()) .withTriggering(Version.fromString("5.6.6"), ApplicationVersion.unknown, deployments.stream().findFirst(), "Test 3", Instant.ofEpochMilli(6)) .withCompletion(11, empty(), Instant.ofEpochMilli(7))); DeploymentJobs deploymentJobs = new DeploymentJobs(projectId, statusList, empty(), true); Map<HostName, RotationStatus> rotationStatus = new TreeMap<>(); rotationStatus.put(HostName.from("rot1.fqdn"), RotationStatus.in); rotationStatus.put(HostName.from("rot2.fqdn"), RotationStatus.out); Application original = new Application(ApplicationId.from("t1", "a1", "i1"), Instant.now().truncatedTo(ChronoUnit.MILLIS), deploymentSpec, validationOverrides, deployments, deploymentJobs, Change.of(Version.fromString("6.7")).withPin(), Change.of(ApplicationVersion.from(new SourceRevision("repo", "master", "deadcafe"), 42)), Optional.of(IssueId.from("1234")), Optional.of(User.from("by-username")), OptionalInt.of(7), new MetricsService.ApplicationMetrics(0.5, 0.9), Optional.of("---begin---\nKEY\n---end---"), Optional.of(new RotationId("my-rotation")), rotationStatus); Application serialized = applicationSerializer.fromSlime(applicationSerializer.toSlime(original)); assertEquals(original.id(), serialized.id()); assertEquals(original.createdAt(), serialized.createdAt()); assertEquals(original.deploymentSpec().xmlForm(), serialized.deploymentSpec().xmlForm()); assertEquals(original.validationOverrides().xmlForm(), serialized.validationOverrides().xmlForm()); assertEquals(2, serialized.deployments().size()); assertEquals(original.deployments().get(zone1).applicationVersion(), serialized.deployments().get(zone1).applicationVersion()); assertEquals(original.deployments().get(zone2).applicationVersion(), serialized.deployments().get(zone2).applicationVersion()); assertEquals(original.deployments().get(zone1).version(), serialized.deployments().get(zone1).version()); assertEquals(original.deployments().get(zone2).version(), serialized.deployments().get(zone2).version()); assertEquals(original.deployments().get(zone1).at(), serialized.deployments().get(zone1).at()); assertEquals(original.deployments().get(zone2).at(), serialized.deployments().get(zone2).at()); assertEquals(original.deployments().get(zone2).activity().lastQueried().get(), serialized.deployments().get(zone2).activity().lastQueried().get()); assertEquals(original.deployments().get(zone2).activity().lastWritten().get(), serialized.deployments().get(zone2).activity().lastWritten().get()); assertEquals(original.deploymentJobs().projectId(), serialized.deploymentJobs().projectId()); assertEquals(original.deploymentJobs().jobStatus().size(), serialized.deploymentJobs().jobStatus().size()); assertEquals( original.deploymentJobs().jobStatus().get(JobType.systemTest), serialized.deploymentJobs().jobStatus().get(JobType.systemTest)); assertEquals( original.deploymentJobs().jobStatus().get(JobType.stagingTest), serialized.deploymentJobs().jobStatus().get(JobType.stagingTest)); assertEquals(original.outstandingChange(), serialized.outstandingChange()); assertEquals(original.ownershipIssueId(), serialized.ownershipIssueId()); assertEquals(original.owner(), serialized.owner()); assertEquals(original.majorVersion(), serialized.majorVersion()); assertEquals(original.change(), serialized.change()); assertEquals(original.pemDeployKey(), serialized.pemDeployKey()); assertEquals(original.rotation().get(), serialized.rotation().get()); assertEquals(original.rotationStatus(), serialized.rotationStatus()); assertEquals(0, serialized.deployments().get(zone1).clusterUtils().size()); assertEquals(3, serialized.deployments().get(zone2).clusterUtils().size()); assertEquals(0.4, serialized.deployments().get(zone2).clusterUtils().get(ClusterSpec.Id.from("id2")).getCpu(), 0.01); assertEquals(0.2, serialized.deployments().get(zone2).clusterUtils().get(ClusterSpec.Id.from("id1")).getCpu(), 0.01); assertEquals(0.2, serialized.deployments().get(zone2).clusterUtils().get(ClusterSpec.Id.from("id1")).getMemory(), 0.01); assertEquals(3, serialized.deployments().get(zone2).clusterInfo().size()); assertEquals(10, serialized.deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getFlavorCost()); assertEquals(ClusterSpec.Type.content, serialized.deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getClusterType()); assertEquals("flavor2", serialized.deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getFlavor()); assertEquals(4, serialized.deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getHostnames().size()); assertEquals(2, serialized.deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getFlavorCPU(), Double.MIN_VALUE); assertEquals(4, serialized.deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getFlavorMem(), Double.MIN_VALUE); assertEquals(50, serialized.deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getFlavorDisk(), Double.MIN_VALUE); assertEquals(original.metrics().queryServiceQuality(), serialized.metrics().queryServiceQuality(), Double.MIN_VALUE); assertEquals(original.metrics().writeServiceQuality(), serialized.metrics().writeServiceQuality(), Double.MIN_VALUE); assertEquals(original.deployments().get(zone2).metrics().queriesPerSecond(), serialized.deployments().get(zone2).metrics().queriesPerSecond(), Double.MIN_VALUE); assertEquals(original.deployments().get(zone2).metrics().writesPerSecond(), serialized.deployments().get(zone2).metrics().writesPerSecond(), Double.MIN_VALUE); assertEquals(original.deployments().get(zone2).metrics().documentCount(), serialized.deployments().get(zone2).metrics().documentCount(), Double.MIN_VALUE); assertEquals(original.deployments().get(zone2).metrics().queryLatencyMillis(), serialized.deployments().get(zone2).metrics().queryLatencyMillis(), Double.MIN_VALUE); assertEquals(original.deployments().get(zone2).metrics().writeLatencyMillis(), serialized.deployments().get(zone2).metrics().writeLatencyMillis(), Double.MIN_VALUE); assertEquals(original.deployments().get(zone2).metrics().instant(), serialized.deployments().get(zone2).metrics().instant()); { Application original2 = writable(original).withChange(Change.of(ApplicationVersion.from(new SourceRevision("repo1", "branch1", "commit1"), 42))).get(); Application serialized2 = applicationSerializer.fromSlime(applicationSerializer.toSlime(original2)); assertEquals(original2.change(), serialized2.change()); assertEquals(serialized2.change().application().get().source(), original2.change().application().get().source()); Application original3 = writable(original).withChange(Change.of(ApplicationVersion.from(new SourceRevision("a", "b", "c"), 42))).get(); Application serialized3 = applicationSerializer.fromSlime(applicationSerializer.toSlime(original3)); assertEquals(original3.change(), serialized3.change()); assertEquals(serialized3.change().application().get().source(), original3.change().application().get().source()); Application original4 = writable(original).withChange(Change.empty()).get(); Application serialized4 = applicationSerializer.fromSlime(applicationSerializer.toSlime(original4)); assertEquals(original4.change(), serialized4.change()); Application original5 = writable(original).withChange(Change.of(ApplicationVersion.from(new SourceRevision("a", "b", "c"), 42))).get(); Application serialized5 = applicationSerializer.fromSlime(applicationSerializer.toSlime(original5)); assertEquals(original5.change(), serialized5.change()); Application original6 = writable(original).withOutstandingChange(Change.of(ApplicationVersion.from(new SourceRevision("a", "b", "c"), 42))).get(); Application serialized6 = applicationSerializer.fromSlime(applicationSerializer.toSlime(original6)); assertEquals(original6.outstandingChange(), serialized6.outstandingChange()); } }
Optional.of("---begin---\nKEY\n---end---"),
public void testSerialization() { DeploymentSpec deploymentSpec = DeploymentSpec.fromXml("<deployment version='1.0'>" + " <staging/>" + "</deployment>"); ValidationOverrides validationOverrides = ValidationOverrides.fromXml("<validation-overrides version='1.0'>" + " <allow until='2017-06-15'>deployment-removal</allow>" + "</validation-overrides>"); List<Deployment> deployments = new ArrayList<>(); ApplicationVersion applicationVersion1 = ApplicationVersion.from(new SourceRevision("repo1", "branch1", "commit1"), 31); ApplicationVersion applicationVersion2 = ApplicationVersion .from(new SourceRevision("repo1", "branch1", "commit1"), 32, "a@b", Version.fromString("6.3.1"), Instant.ofEpochMilli(496)); Instant activityAt = Instant.parse("2018-06-01T10:15:30.00Z"); deployments.add(new Deployment(zone1, applicationVersion1, Version.fromString("1.2.3"), Instant.ofEpochMilli(3))); deployments.add(new Deployment(zone2, applicationVersion2, Version.fromString("1.2.3"), Instant.ofEpochMilli(5), createClusterUtils(3, 0.2), createClusterInfo(3, 4), new DeploymentMetrics(2, 3, 4, 5, 6, Optional.of(Instant.now().truncatedTo(ChronoUnit.MILLIS))), DeploymentActivity.create(Optional.of(activityAt), Optional.of(activityAt), OptionalDouble.of(200), OptionalDouble.of(10)))); OptionalLong projectId = OptionalLong.of(123L); List<JobStatus> statusList = new ArrayList<>(); statusList.add(JobStatus.initial(JobType.systemTest) .withTriggering(Version.fromString("5.6.7"), ApplicationVersion.unknown, empty(), "Test", Instant.ofEpochMilli(7)) .withCompletion(30, empty(), Instant.ofEpochMilli(8)) .withPause(OptionalLong.of(1L << 32))); statusList.add(JobStatus.initial(JobType.stagingTest) .withTriggering(Version.fromString("5.6.6"), ApplicationVersion.unknown, empty(), "Test 2", Instant.ofEpochMilli(5)) .withCompletion(11, Optional.of(JobError.unknown), Instant.ofEpochMilli(6))); statusList.add(JobStatus.initial(JobType.from(main, zone1).get()) .withTriggering(Version.fromString("5.6.6"), ApplicationVersion.unknown, deployments.stream().findFirst(), "Test 3", Instant.ofEpochMilli(6)) .withCompletion(11, empty(), Instant.ofEpochMilli(7))); DeploymentJobs deploymentJobs = new DeploymentJobs(projectId, statusList, empty(), true); Map<HostName, RotationStatus> rotationStatus = new TreeMap<>(); rotationStatus.put(HostName.from("rot1.fqdn"), RotationStatus.in); rotationStatus.put(HostName.from("rot2.fqdn"), RotationStatus.out); Application original = new Application(ApplicationId.from("t1", "a1", "i1"), Instant.now().truncatedTo(ChronoUnit.MILLIS), deploymentSpec, validationOverrides, deployments, deploymentJobs, Change.of(Version.fromString("6.7")).withPin(), Change.of(ApplicationVersion.from(new SourceRevision("repo", "master", "deadcafe"), 42)), Optional.of(IssueId.from("1234")), Optional.of(User.from("by-username")), OptionalInt.of(7), new MetricsService.ApplicationMetrics(0.5, 0.9), Optional.of("---begin---\nKEY\n---end---"), Optional.of(new RotationId("my-rotation")), rotationStatus); Application serialized = applicationSerializer.fromSlime(applicationSerializer.toSlime(original)); assertEquals(original.id(), serialized.id()); assertEquals(original.createdAt(), serialized.createdAt()); assertEquals(original.deploymentSpec().xmlForm(), serialized.deploymentSpec().xmlForm()); assertEquals(original.validationOverrides().xmlForm(), serialized.validationOverrides().xmlForm()); assertEquals(2, serialized.deployments().size()); assertEquals(original.deployments().get(zone1).applicationVersion(), serialized.deployments().get(zone1).applicationVersion()); assertEquals(original.deployments().get(zone2).applicationVersion(), serialized.deployments().get(zone2).applicationVersion()); assertEquals(original.deployments().get(zone1).version(), serialized.deployments().get(zone1).version()); assertEquals(original.deployments().get(zone2).version(), serialized.deployments().get(zone2).version()); assertEquals(original.deployments().get(zone1).at(), serialized.deployments().get(zone1).at()); assertEquals(original.deployments().get(zone2).at(), serialized.deployments().get(zone2).at()); assertEquals(original.deployments().get(zone2).activity().lastQueried().get(), serialized.deployments().get(zone2).activity().lastQueried().get()); assertEquals(original.deployments().get(zone2).activity().lastWritten().get(), serialized.deployments().get(zone2).activity().lastWritten().get()); assertEquals(original.deploymentJobs().projectId(), serialized.deploymentJobs().projectId()); assertEquals(original.deploymentJobs().jobStatus().size(), serialized.deploymentJobs().jobStatus().size()); assertEquals( original.deploymentJobs().jobStatus().get(JobType.systemTest), serialized.deploymentJobs().jobStatus().get(JobType.systemTest)); assertEquals( original.deploymentJobs().jobStatus().get(JobType.stagingTest), serialized.deploymentJobs().jobStatus().get(JobType.stagingTest)); assertEquals(original.outstandingChange(), serialized.outstandingChange()); assertEquals(original.ownershipIssueId(), serialized.ownershipIssueId()); assertEquals(original.owner(), serialized.owner()); assertEquals(original.majorVersion(), serialized.majorVersion()); assertEquals(original.change(), serialized.change()); assertEquals(original.pemDeployKey(), serialized.pemDeployKey()); assertEquals(original.rotation().get(), serialized.rotation().get()); assertEquals(original.rotationStatus(), serialized.rotationStatus()); assertEquals(0, serialized.deployments().get(zone1).clusterUtils().size()); assertEquals(3, serialized.deployments().get(zone2).clusterUtils().size()); assertEquals(0.4, serialized.deployments().get(zone2).clusterUtils().get(ClusterSpec.Id.from("id2")).getCpu(), 0.01); assertEquals(0.2, serialized.deployments().get(zone2).clusterUtils().get(ClusterSpec.Id.from("id1")).getCpu(), 0.01); assertEquals(0.2, serialized.deployments().get(zone2).clusterUtils().get(ClusterSpec.Id.from("id1")).getMemory(), 0.01); assertEquals(3, serialized.deployments().get(zone2).clusterInfo().size()); assertEquals(10, serialized.deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getFlavorCost()); assertEquals(ClusterSpec.Type.content, serialized.deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getClusterType()); assertEquals("flavor2", serialized.deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getFlavor()); assertEquals(4, serialized.deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getHostnames().size()); assertEquals(2, serialized.deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getFlavorCPU(), Double.MIN_VALUE); assertEquals(4, serialized.deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getFlavorMem(), Double.MIN_VALUE); assertEquals(50, serialized.deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getFlavorDisk(), Double.MIN_VALUE); assertEquals(original.metrics().queryServiceQuality(), serialized.metrics().queryServiceQuality(), Double.MIN_VALUE); assertEquals(original.metrics().writeServiceQuality(), serialized.metrics().writeServiceQuality(), Double.MIN_VALUE); assertEquals(original.deployments().get(zone2).metrics().queriesPerSecond(), serialized.deployments().get(zone2).metrics().queriesPerSecond(), Double.MIN_VALUE); assertEquals(original.deployments().get(zone2).metrics().writesPerSecond(), serialized.deployments().get(zone2).metrics().writesPerSecond(), Double.MIN_VALUE); assertEquals(original.deployments().get(zone2).metrics().documentCount(), serialized.deployments().get(zone2).metrics().documentCount(), Double.MIN_VALUE); assertEquals(original.deployments().get(zone2).metrics().queryLatencyMillis(), serialized.deployments().get(zone2).metrics().queryLatencyMillis(), Double.MIN_VALUE); assertEquals(original.deployments().get(zone2).metrics().writeLatencyMillis(), serialized.deployments().get(zone2).metrics().writeLatencyMillis(), Double.MIN_VALUE); assertEquals(original.deployments().get(zone2).metrics().instant(), serialized.deployments().get(zone2).metrics().instant()); { Application original2 = writable(original).withChange(Change.of(ApplicationVersion.from(new SourceRevision("repo1", "branch1", "commit1"), 42))).get(); Application serialized2 = applicationSerializer.fromSlime(applicationSerializer.toSlime(original2)); assertEquals(original2.change(), serialized2.change()); assertEquals(serialized2.change().application().get().source(), original2.change().application().get().source()); Application original3 = writable(original).withChange(Change.of(ApplicationVersion.from(new SourceRevision("a", "b", "c"), 42))).get(); Application serialized3 = applicationSerializer.fromSlime(applicationSerializer.toSlime(original3)); assertEquals(original3.change(), serialized3.change()); assertEquals(serialized3.change().application().get().source(), original3.change().application().get().source()); Application original4 = writable(original).withChange(Change.empty()).get(); Application serialized4 = applicationSerializer.fromSlime(applicationSerializer.toSlime(original4)); assertEquals(original4.change(), serialized4.change()); Application original5 = writable(original).withChange(Change.of(ApplicationVersion.from(new SourceRevision("a", "b", "c"), 42))).get(); Application serialized5 = applicationSerializer.fromSlime(applicationSerializer.toSlime(original5)); assertEquals(original5.change(), serialized5.change()); Application original6 = writable(original).withOutstandingChange(Change.of(ApplicationVersion.from(new SourceRevision("a", "b", "c"), 42))).get(); Application serialized6 = applicationSerializer.fromSlime(applicationSerializer.toSlime(original6)); assertEquals(original6.outstandingChange(), serialized6.outstandingChange()); } }
class ApplicationSerializerTest { private static final ApplicationSerializer applicationSerializer = new ApplicationSerializer(); private static final Path testData = Paths.get("src/test/java/com/yahoo/vespa/hosted/controller/persistence/testdata/"); private static final ZoneId zone1 = ZoneId.from("prod", "us-west-1"); private static final ZoneId zone2 = ZoneId.from("prod", "us-east-3"); @Test private Map<ClusterSpec.Id, ClusterInfo> createClusterInfo(int clusters, int hosts) { Map<ClusterSpec.Id, ClusterInfo> result = new HashMap<>(); for (int cluster = 0; cluster < clusters; cluster++) { List<String> hostnames = new ArrayList<>(); for (int host = 0; host < hosts; host++) { hostnames.add("hostname" + cluster*host + host); } result.put(ClusterSpec.Id.from("id" + cluster), new ClusterInfo("flavor" + cluster, 10, 2, 4, 50, ClusterSpec.Type.content, hostnames)); } return result; } private Map<ClusterSpec.Id, ClusterUtilization> createClusterUtils(int clusters, double inc) { Map<ClusterSpec.Id, ClusterUtilization> result = new HashMap<>(); ClusterUtilization util = new ClusterUtilization(0,0,0,0); for (int cluster = 0; cluster < clusters; cluster++) { double agg = cluster*inc; result.put(ClusterSpec.Id.from("id" + cluster), new ClusterUtilization( util.getMemory()+ agg, util.getCpu()+ agg, util.getDisk() + agg, util.getDiskBusy() + agg)); } return result; } @Test public void testCompleteApplicationDeserialization() throws Exception { byte[] applicationJson = Files.readAllBytes(testData.resolve("complete-application.json")); applicationSerializer.fromSlime(SlimeUtils.jsonToSlime(applicationJson)); } }
class ApplicationSerializerTest { private static final ApplicationSerializer applicationSerializer = new ApplicationSerializer(); private static final Path testData = Paths.get("src/test/java/com/yahoo/vespa/hosted/controller/persistence/testdata/"); private static final ZoneId zone1 = ZoneId.from("prod", "us-west-1"); private static final ZoneId zone2 = ZoneId.from("prod", "us-east-3"); @Test private Map<ClusterSpec.Id, ClusterInfo> createClusterInfo(int clusters, int hosts) { Map<ClusterSpec.Id, ClusterInfo> result = new HashMap<>(); for (int cluster = 0; cluster < clusters; cluster++) { List<String> hostnames = new ArrayList<>(); for (int host = 0; host < hosts; host++) { hostnames.add("hostname" + cluster*host + host); } result.put(ClusterSpec.Id.from("id" + cluster), new ClusterInfo("flavor" + cluster, 10, 2, 4, 50, ClusterSpec.Type.content, hostnames)); } return result; } private Map<ClusterSpec.Id, ClusterUtilization> createClusterUtils(int clusters, double inc) { Map<ClusterSpec.Id, ClusterUtilization> result = new HashMap<>(); ClusterUtilization util = new ClusterUtilization(0,0,0,0); for (int cluster = 0; cluster < clusters; cluster++) { double agg = cluster*inc; result.put(ClusterSpec.Id.from("id" + cluster), new ClusterUtilization( util.getMemory()+ agg, util.getCpu()+ agg, util.getDisk() + agg, util.getDiskBusy() + agg)); } return result; } @Test public void testCompleteApplicationDeserialization() throws Exception { byte[] applicationJson = Files.readAllBytes(testData.resolve("complete-application.json")); applicationSerializer.fromSlime(SlimeUtils.jsonToSlime(applicationJson)); } }
Since this is for a public key in pem format it should be: -----BEGIN PUBLIC KEY----- KEY.... -----END PUBLIC KEY-----
public void testSerialization() { DeploymentSpec deploymentSpec = DeploymentSpec.fromXml("<deployment version='1.0'>" + " <staging/>" + "</deployment>"); ValidationOverrides validationOverrides = ValidationOverrides.fromXml("<validation-overrides version='1.0'>" + " <allow until='2017-06-15'>deployment-removal</allow>" + "</validation-overrides>"); List<Deployment> deployments = new ArrayList<>(); ApplicationVersion applicationVersion1 = ApplicationVersion.from(new SourceRevision("repo1", "branch1", "commit1"), 31); ApplicationVersion applicationVersion2 = ApplicationVersion .from(new SourceRevision("repo1", "branch1", "commit1"), 32, "a@b", Version.fromString("6.3.1"), Instant.ofEpochMilli(496)); Instant activityAt = Instant.parse("2018-06-01T10:15:30.00Z"); deployments.add(new Deployment(zone1, applicationVersion1, Version.fromString("1.2.3"), Instant.ofEpochMilli(3))); deployments.add(new Deployment(zone2, applicationVersion2, Version.fromString("1.2.3"), Instant.ofEpochMilli(5), createClusterUtils(3, 0.2), createClusterInfo(3, 4), new DeploymentMetrics(2, 3, 4, 5, 6, Optional.of(Instant.now().truncatedTo(ChronoUnit.MILLIS))), DeploymentActivity.create(Optional.of(activityAt), Optional.of(activityAt), OptionalDouble.of(200), OptionalDouble.of(10)))); OptionalLong projectId = OptionalLong.of(123L); List<JobStatus> statusList = new ArrayList<>(); statusList.add(JobStatus.initial(JobType.systemTest) .withTriggering(Version.fromString("5.6.7"), ApplicationVersion.unknown, empty(), "Test", Instant.ofEpochMilli(7)) .withCompletion(30, empty(), Instant.ofEpochMilli(8)) .withPause(OptionalLong.of(1L << 32))); statusList.add(JobStatus.initial(JobType.stagingTest) .withTriggering(Version.fromString("5.6.6"), ApplicationVersion.unknown, empty(), "Test 2", Instant.ofEpochMilli(5)) .withCompletion(11, Optional.of(JobError.unknown), Instant.ofEpochMilli(6))); statusList.add(JobStatus.initial(JobType.from(main, zone1).get()) .withTriggering(Version.fromString("5.6.6"), ApplicationVersion.unknown, deployments.stream().findFirst(), "Test 3", Instant.ofEpochMilli(6)) .withCompletion(11, empty(), Instant.ofEpochMilli(7))); DeploymentJobs deploymentJobs = new DeploymentJobs(projectId, statusList, empty(), true); Map<HostName, RotationStatus> rotationStatus = new TreeMap<>(); rotationStatus.put(HostName.from("rot1.fqdn"), RotationStatus.in); rotationStatus.put(HostName.from("rot2.fqdn"), RotationStatus.out); Application original = new Application(ApplicationId.from("t1", "a1", "i1"), Instant.now().truncatedTo(ChronoUnit.MILLIS), deploymentSpec, validationOverrides, deployments, deploymentJobs, Change.of(Version.fromString("6.7")).withPin(), Change.of(ApplicationVersion.from(new SourceRevision("repo", "master", "deadcafe"), 42)), Optional.of(IssueId.from("1234")), Optional.of(User.from("by-username")), OptionalInt.of(7), new MetricsService.ApplicationMetrics(0.5, 0.9), Optional.of("---begin---\nKEY\n---end---"), Optional.of(new RotationId("my-rotation")), rotationStatus); Application serialized = applicationSerializer.fromSlime(applicationSerializer.toSlime(original)); assertEquals(original.id(), serialized.id()); assertEquals(original.createdAt(), serialized.createdAt()); assertEquals(original.deploymentSpec().xmlForm(), serialized.deploymentSpec().xmlForm()); assertEquals(original.validationOverrides().xmlForm(), serialized.validationOverrides().xmlForm()); assertEquals(2, serialized.deployments().size()); assertEquals(original.deployments().get(zone1).applicationVersion(), serialized.deployments().get(zone1).applicationVersion()); assertEquals(original.deployments().get(zone2).applicationVersion(), serialized.deployments().get(zone2).applicationVersion()); assertEquals(original.deployments().get(zone1).version(), serialized.deployments().get(zone1).version()); assertEquals(original.deployments().get(zone2).version(), serialized.deployments().get(zone2).version()); assertEquals(original.deployments().get(zone1).at(), serialized.deployments().get(zone1).at()); assertEquals(original.deployments().get(zone2).at(), serialized.deployments().get(zone2).at()); assertEquals(original.deployments().get(zone2).activity().lastQueried().get(), serialized.deployments().get(zone2).activity().lastQueried().get()); assertEquals(original.deployments().get(zone2).activity().lastWritten().get(), serialized.deployments().get(zone2).activity().lastWritten().get()); assertEquals(original.deploymentJobs().projectId(), serialized.deploymentJobs().projectId()); assertEquals(original.deploymentJobs().jobStatus().size(), serialized.deploymentJobs().jobStatus().size()); assertEquals( original.deploymentJobs().jobStatus().get(JobType.systemTest), serialized.deploymentJobs().jobStatus().get(JobType.systemTest)); assertEquals( original.deploymentJobs().jobStatus().get(JobType.stagingTest), serialized.deploymentJobs().jobStatus().get(JobType.stagingTest)); assertEquals(original.outstandingChange(), serialized.outstandingChange()); assertEquals(original.ownershipIssueId(), serialized.ownershipIssueId()); assertEquals(original.owner(), serialized.owner()); assertEquals(original.majorVersion(), serialized.majorVersion()); assertEquals(original.change(), serialized.change()); assertEquals(original.pemDeployKey(), serialized.pemDeployKey()); assertEquals(original.rotation().get(), serialized.rotation().get()); assertEquals(original.rotationStatus(), serialized.rotationStatus()); assertEquals(0, serialized.deployments().get(zone1).clusterUtils().size()); assertEquals(3, serialized.deployments().get(zone2).clusterUtils().size()); assertEquals(0.4, serialized.deployments().get(zone2).clusterUtils().get(ClusterSpec.Id.from("id2")).getCpu(), 0.01); assertEquals(0.2, serialized.deployments().get(zone2).clusterUtils().get(ClusterSpec.Id.from("id1")).getCpu(), 0.01); assertEquals(0.2, serialized.deployments().get(zone2).clusterUtils().get(ClusterSpec.Id.from("id1")).getMemory(), 0.01); assertEquals(3, serialized.deployments().get(zone2).clusterInfo().size()); assertEquals(10, serialized.deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getFlavorCost()); assertEquals(ClusterSpec.Type.content, serialized.deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getClusterType()); assertEquals("flavor2", serialized.deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getFlavor()); assertEquals(4, serialized.deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getHostnames().size()); assertEquals(2, serialized.deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getFlavorCPU(), Double.MIN_VALUE); assertEquals(4, serialized.deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getFlavorMem(), Double.MIN_VALUE); assertEquals(50, serialized.deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getFlavorDisk(), Double.MIN_VALUE); assertEquals(original.metrics().queryServiceQuality(), serialized.metrics().queryServiceQuality(), Double.MIN_VALUE); assertEquals(original.metrics().writeServiceQuality(), serialized.metrics().writeServiceQuality(), Double.MIN_VALUE); assertEquals(original.deployments().get(zone2).metrics().queriesPerSecond(), serialized.deployments().get(zone2).metrics().queriesPerSecond(), Double.MIN_VALUE); assertEquals(original.deployments().get(zone2).metrics().writesPerSecond(), serialized.deployments().get(zone2).metrics().writesPerSecond(), Double.MIN_VALUE); assertEquals(original.deployments().get(zone2).metrics().documentCount(), serialized.deployments().get(zone2).metrics().documentCount(), Double.MIN_VALUE); assertEquals(original.deployments().get(zone2).metrics().queryLatencyMillis(), serialized.deployments().get(zone2).metrics().queryLatencyMillis(), Double.MIN_VALUE); assertEquals(original.deployments().get(zone2).metrics().writeLatencyMillis(), serialized.deployments().get(zone2).metrics().writeLatencyMillis(), Double.MIN_VALUE); assertEquals(original.deployments().get(zone2).metrics().instant(), serialized.deployments().get(zone2).metrics().instant()); { Application original2 = writable(original).withChange(Change.of(ApplicationVersion.from(new SourceRevision("repo1", "branch1", "commit1"), 42))).get(); Application serialized2 = applicationSerializer.fromSlime(applicationSerializer.toSlime(original2)); assertEquals(original2.change(), serialized2.change()); assertEquals(serialized2.change().application().get().source(), original2.change().application().get().source()); Application original3 = writable(original).withChange(Change.of(ApplicationVersion.from(new SourceRevision("a", "b", "c"), 42))).get(); Application serialized3 = applicationSerializer.fromSlime(applicationSerializer.toSlime(original3)); assertEquals(original3.change(), serialized3.change()); assertEquals(serialized3.change().application().get().source(), original3.change().application().get().source()); Application original4 = writable(original).withChange(Change.empty()).get(); Application serialized4 = applicationSerializer.fromSlime(applicationSerializer.toSlime(original4)); assertEquals(original4.change(), serialized4.change()); Application original5 = writable(original).withChange(Change.of(ApplicationVersion.from(new SourceRevision("a", "b", "c"), 42))).get(); Application serialized5 = applicationSerializer.fromSlime(applicationSerializer.toSlime(original5)); assertEquals(original5.change(), serialized5.change()); Application original6 = writable(original).withOutstandingChange(Change.of(ApplicationVersion.from(new SourceRevision("a", "b", "c"), 42))).get(); Application serialized6 = applicationSerializer.fromSlime(applicationSerializer.toSlime(original6)); assertEquals(original6.outstandingChange(), serialized6.outstandingChange()); } }
Optional.of("---begin---\nKEY\n---end---"),
public void testSerialization() { DeploymentSpec deploymentSpec = DeploymentSpec.fromXml("<deployment version='1.0'>" + " <staging/>" + "</deployment>"); ValidationOverrides validationOverrides = ValidationOverrides.fromXml("<validation-overrides version='1.0'>" + " <allow until='2017-06-15'>deployment-removal</allow>" + "</validation-overrides>"); List<Deployment> deployments = new ArrayList<>(); ApplicationVersion applicationVersion1 = ApplicationVersion.from(new SourceRevision("repo1", "branch1", "commit1"), 31); ApplicationVersion applicationVersion2 = ApplicationVersion .from(new SourceRevision("repo1", "branch1", "commit1"), 32, "a@b", Version.fromString("6.3.1"), Instant.ofEpochMilli(496)); Instant activityAt = Instant.parse("2018-06-01T10:15:30.00Z"); deployments.add(new Deployment(zone1, applicationVersion1, Version.fromString("1.2.3"), Instant.ofEpochMilli(3))); deployments.add(new Deployment(zone2, applicationVersion2, Version.fromString("1.2.3"), Instant.ofEpochMilli(5), createClusterUtils(3, 0.2), createClusterInfo(3, 4), new DeploymentMetrics(2, 3, 4, 5, 6, Optional.of(Instant.now().truncatedTo(ChronoUnit.MILLIS))), DeploymentActivity.create(Optional.of(activityAt), Optional.of(activityAt), OptionalDouble.of(200), OptionalDouble.of(10)))); OptionalLong projectId = OptionalLong.of(123L); List<JobStatus> statusList = new ArrayList<>(); statusList.add(JobStatus.initial(JobType.systemTest) .withTriggering(Version.fromString("5.6.7"), ApplicationVersion.unknown, empty(), "Test", Instant.ofEpochMilli(7)) .withCompletion(30, empty(), Instant.ofEpochMilli(8)) .withPause(OptionalLong.of(1L << 32))); statusList.add(JobStatus.initial(JobType.stagingTest) .withTriggering(Version.fromString("5.6.6"), ApplicationVersion.unknown, empty(), "Test 2", Instant.ofEpochMilli(5)) .withCompletion(11, Optional.of(JobError.unknown), Instant.ofEpochMilli(6))); statusList.add(JobStatus.initial(JobType.from(main, zone1).get()) .withTriggering(Version.fromString("5.6.6"), ApplicationVersion.unknown, deployments.stream().findFirst(), "Test 3", Instant.ofEpochMilli(6)) .withCompletion(11, empty(), Instant.ofEpochMilli(7))); DeploymentJobs deploymentJobs = new DeploymentJobs(projectId, statusList, empty(), true); Map<HostName, RotationStatus> rotationStatus = new TreeMap<>(); rotationStatus.put(HostName.from("rot1.fqdn"), RotationStatus.in); rotationStatus.put(HostName.from("rot2.fqdn"), RotationStatus.out); Application original = new Application(ApplicationId.from("t1", "a1", "i1"), Instant.now().truncatedTo(ChronoUnit.MILLIS), deploymentSpec, validationOverrides, deployments, deploymentJobs, Change.of(Version.fromString("6.7")).withPin(), Change.of(ApplicationVersion.from(new SourceRevision("repo", "master", "deadcafe"), 42)), Optional.of(IssueId.from("1234")), Optional.of(User.from("by-username")), OptionalInt.of(7), new MetricsService.ApplicationMetrics(0.5, 0.9), Optional.of("---begin---\nKEY\n---end---"), Optional.of(new RotationId("my-rotation")), rotationStatus); Application serialized = applicationSerializer.fromSlime(applicationSerializer.toSlime(original)); assertEquals(original.id(), serialized.id()); assertEquals(original.createdAt(), serialized.createdAt()); assertEquals(original.deploymentSpec().xmlForm(), serialized.deploymentSpec().xmlForm()); assertEquals(original.validationOverrides().xmlForm(), serialized.validationOverrides().xmlForm()); assertEquals(2, serialized.deployments().size()); assertEquals(original.deployments().get(zone1).applicationVersion(), serialized.deployments().get(zone1).applicationVersion()); assertEquals(original.deployments().get(zone2).applicationVersion(), serialized.deployments().get(zone2).applicationVersion()); assertEquals(original.deployments().get(zone1).version(), serialized.deployments().get(zone1).version()); assertEquals(original.deployments().get(zone2).version(), serialized.deployments().get(zone2).version()); assertEquals(original.deployments().get(zone1).at(), serialized.deployments().get(zone1).at()); assertEquals(original.deployments().get(zone2).at(), serialized.deployments().get(zone2).at()); assertEquals(original.deployments().get(zone2).activity().lastQueried().get(), serialized.deployments().get(zone2).activity().lastQueried().get()); assertEquals(original.deployments().get(zone2).activity().lastWritten().get(), serialized.deployments().get(zone2).activity().lastWritten().get()); assertEquals(original.deploymentJobs().projectId(), serialized.deploymentJobs().projectId()); assertEquals(original.deploymentJobs().jobStatus().size(), serialized.deploymentJobs().jobStatus().size()); assertEquals( original.deploymentJobs().jobStatus().get(JobType.systemTest), serialized.deploymentJobs().jobStatus().get(JobType.systemTest)); assertEquals( original.deploymentJobs().jobStatus().get(JobType.stagingTest), serialized.deploymentJobs().jobStatus().get(JobType.stagingTest)); assertEquals(original.outstandingChange(), serialized.outstandingChange()); assertEquals(original.ownershipIssueId(), serialized.ownershipIssueId()); assertEquals(original.owner(), serialized.owner()); assertEquals(original.majorVersion(), serialized.majorVersion()); assertEquals(original.change(), serialized.change()); assertEquals(original.pemDeployKey(), serialized.pemDeployKey()); assertEquals(original.rotation().get(), serialized.rotation().get()); assertEquals(original.rotationStatus(), serialized.rotationStatus()); assertEquals(0, serialized.deployments().get(zone1).clusterUtils().size()); assertEquals(3, serialized.deployments().get(zone2).clusterUtils().size()); assertEquals(0.4, serialized.deployments().get(zone2).clusterUtils().get(ClusterSpec.Id.from("id2")).getCpu(), 0.01); assertEquals(0.2, serialized.deployments().get(zone2).clusterUtils().get(ClusterSpec.Id.from("id1")).getCpu(), 0.01); assertEquals(0.2, serialized.deployments().get(zone2).clusterUtils().get(ClusterSpec.Id.from("id1")).getMemory(), 0.01); assertEquals(3, serialized.deployments().get(zone2).clusterInfo().size()); assertEquals(10, serialized.deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getFlavorCost()); assertEquals(ClusterSpec.Type.content, serialized.deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getClusterType()); assertEquals("flavor2", serialized.deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getFlavor()); assertEquals(4, serialized.deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getHostnames().size()); assertEquals(2, serialized.deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getFlavorCPU(), Double.MIN_VALUE); assertEquals(4, serialized.deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getFlavorMem(), Double.MIN_VALUE); assertEquals(50, serialized.deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getFlavorDisk(), Double.MIN_VALUE); assertEquals(original.metrics().queryServiceQuality(), serialized.metrics().queryServiceQuality(), Double.MIN_VALUE); assertEquals(original.metrics().writeServiceQuality(), serialized.metrics().writeServiceQuality(), Double.MIN_VALUE); assertEquals(original.deployments().get(zone2).metrics().queriesPerSecond(), serialized.deployments().get(zone2).metrics().queriesPerSecond(), Double.MIN_VALUE); assertEquals(original.deployments().get(zone2).metrics().writesPerSecond(), serialized.deployments().get(zone2).metrics().writesPerSecond(), Double.MIN_VALUE); assertEquals(original.deployments().get(zone2).metrics().documentCount(), serialized.deployments().get(zone2).metrics().documentCount(), Double.MIN_VALUE); assertEquals(original.deployments().get(zone2).metrics().queryLatencyMillis(), serialized.deployments().get(zone2).metrics().queryLatencyMillis(), Double.MIN_VALUE); assertEquals(original.deployments().get(zone2).metrics().writeLatencyMillis(), serialized.deployments().get(zone2).metrics().writeLatencyMillis(), Double.MIN_VALUE); assertEquals(original.deployments().get(zone2).metrics().instant(), serialized.deployments().get(zone2).metrics().instant()); { Application original2 = writable(original).withChange(Change.of(ApplicationVersion.from(new SourceRevision("repo1", "branch1", "commit1"), 42))).get(); Application serialized2 = applicationSerializer.fromSlime(applicationSerializer.toSlime(original2)); assertEquals(original2.change(), serialized2.change()); assertEquals(serialized2.change().application().get().source(), original2.change().application().get().source()); Application original3 = writable(original).withChange(Change.of(ApplicationVersion.from(new SourceRevision("a", "b", "c"), 42))).get(); Application serialized3 = applicationSerializer.fromSlime(applicationSerializer.toSlime(original3)); assertEquals(original3.change(), serialized3.change()); assertEquals(serialized3.change().application().get().source(), original3.change().application().get().source()); Application original4 = writable(original).withChange(Change.empty()).get(); Application serialized4 = applicationSerializer.fromSlime(applicationSerializer.toSlime(original4)); assertEquals(original4.change(), serialized4.change()); Application original5 = writable(original).withChange(Change.of(ApplicationVersion.from(new SourceRevision("a", "b", "c"), 42))).get(); Application serialized5 = applicationSerializer.fromSlime(applicationSerializer.toSlime(original5)); assertEquals(original5.change(), serialized5.change()); Application original6 = writable(original).withOutstandingChange(Change.of(ApplicationVersion.from(new SourceRevision("a", "b", "c"), 42))).get(); Application serialized6 = applicationSerializer.fromSlime(applicationSerializer.toSlime(original6)); assertEquals(original6.outstandingChange(), serialized6.outstandingChange()); } }
class ApplicationSerializerTest { private static final ApplicationSerializer applicationSerializer = new ApplicationSerializer(); private static final Path testData = Paths.get("src/test/java/com/yahoo/vespa/hosted/controller/persistence/testdata/"); private static final ZoneId zone1 = ZoneId.from("prod", "us-west-1"); private static final ZoneId zone2 = ZoneId.from("prod", "us-east-3"); @Test private Map<ClusterSpec.Id, ClusterInfo> createClusterInfo(int clusters, int hosts) { Map<ClusterSpec.Id, ClusterInfo> result = new HashMap<>(); for (int cluster = 0; cluster < clusters; cluster++) { List<String> hostnames = new ArrayList<>(); for (int host = 0; host < hosts; host++) { hostnames.add("hostname" + cluster*host + host); } result.put(ClusterSpec.Id.from("id" + cluster), new ClusterInfo("flavor" + cluster, 10, 2, 4, 50, ClusterSpec.Type.content, hostnames)); } return result; } private Map<ClusterSpec.Id, ClusterUtilization> createClusterUtils(int clusters, double inc) { Map<ClusterSpec.Id, ClusterUtilization> result = new HashMap<>(); ClusterUtilization util = new ClusterUtilization(0,0,0,0); for (int cluster = 0; cluster < clusters; cluster++) { double agg = cluster*inc; result.put(ClusterSpec.Id.from("id" + cluster), new ClusterUtilization( util.getMemory()+ agg, util.getCpu()+ agg, util.getDisk() + agg, util.getDiskBusy() + agg)); } return result; } @Test public void testCompleteApplicationDeserialization() throws Exception { byte[] applicationJson = Files.readAllBytes(testData.resolve("complete-application.json")); applicationSerializer.fromSlime(SlimeUtils.jsonToSlime(applicationJson)); } }
class ApplicationSerializerTest { private static final ApplicationSerializer applicationSerializer = new ApplicationSerializer(); private static final Path testData = Paths.get("src/test/java/com/yahoo/vespa/hosted/controller/persistence/testdata/"); private static final ZoneId zone1 = ZoneId.from("prod", "us-west-1"); private static final ZoneId zone2 = ZoneId.from("prod", "us-east-3"); @Test private Map<ClusterSpec.Id, ClusterInfo> createClusterInfo(int clusters, int hosts) { Map<ClusterSpec.Id, ClusterInfo> result = new HashMap<>(); for (int cluster = 0; cluster < clusters; cluster++) { List<String> hostnames = new ArrayList<>(); for (int host = 0; host < hosts; host++) { hostnames.add("hostname" + cluster*host + host); } result.put(ClusterSpec.Id.from("id" + cluster), new ClusterInfo("flavor" + cluster, 10, 2, 4, 50, ClusterSpec.Type.content, hostnames)); } return result; } private Map<ClusterSpec.Id, ClusterUtilization> createClusterUtils(int clusters, double inc) { Map<ClusterSpec.Id, ClusterUtilization> result = new HashMap<>(); ClusterUtilization util = new ClusterUtilization(0,0,0,0); for (int cluster = 0; cluster < clusters; cluster++) { double agg = cluster*inc; result.put(ClusterSpec.Id.from("id" + cluster), new ClusterUtilization( util.getMemory()+ agg, util.getCpu()+ agg, util.getDisk() + agg, util.getDiskBusy() + agg)); } return result; } @Test public void testCompleteApplicationDeserialization() throws Exception { byte[] applicationJson = Files.readAllBytes(testData.resolve("complete-application.json")); applicationSerializer.fromSlime(SlimeUtils.jsonToSlime(applicationJson)); } }
Definitely. With submissions, we should do all the verification we can before storing the package.
public void testSerialization() { DeploymentSpec deploymentSpec = DeploymentSpec.fromXml("<deployment version='1.0'>" + " <staging/>" + "</deployment>"); ValidationOverrides validationOverrides = ValidationOverrides.fromXml("<validation-overrides version='1.0'>" + " <allow until='2017-06-15'>deployment-removal</allow>" + "</validation-overrides>"); List<Deployment> deployments = new ArrayList<>(); ApplicationVersion applicationVersion1 = ApplicationVersion.from(new SourceRevision("repo1", "branch1", "commit1"), 31); ApplicationVersion applicationVersion2 = ApplicationVersion .from(new SourceRevision("repo1", "branch1", "commit1"), 32, "a@b", Version.fromString("6.3.1"), Instant.ofEpochMilli(496)); Instant activityAt = Instant.parse("2018-06-01T10:15:30.00Z"); deployments.add(new Deployment(zone1, applicationVersion1, Version.fromString("1.2.3"), Instant.ofEpochMilli(3))); deployments.add(new Deployment(zone2, applicationVersion2, Version.fromString("1.2.3"), Instant.ofEpochMilli(5), createClusterUtils(3, 0.2), createClusterInfo(3, 4), new DeploymentMetrics(2, 3, 4, 5, 6, Optional.of(Instant.now().truncatedTo(ChronoUnit.MILLIS))), DeploymentActivity.create(Optional.of(activityAt), Optional.of(activityAt), OptionalDouble.of(200), OptionalDouble.of(10)))); OptionalLong projectId = OptionalLong.of(123L); List<JobStatus> statusList = new ArrayList<>(); statusList.add(JobStatus.initial(JobType.systemTest) .withTriggering(Version.fromString("5.6.7"), ApplicationVersion.unknown, empty(), "Test", Instant.ofEpochMilli(7)) .withCompletion(30, empty(), Instant.ofEpochMilli(8)) .withPause(OptionalLong.of(1L << 32))); statusList.add(JobStatus.initial(JobType.stagingTest) .withTriggering(Version.fromString("5.6.6"), ApplicationVersion.unknown, empty(), "Test 2", Instant.ofEpochMilli(5)) .withCompletion(11, Optional.of(JobError.unknown), Instant.ofEpochMilli(6))); statusList.add(JobStatus.initial(JobType.from(main, zone1).get()) .withTriggering(Version.fromString("5.6.6"), ApplicationVersion.unknown, deployments.stream().findFirst(), "Test 3", Instant.ofEpochMilli(6)) .withCompletion(11, empty(), Instant.ofEpochMilli(7))); DeploymentJobs deploymentJobs = new DeploymentJobs(projectId, statusList, empty(), true); Map<HostName, RotationStatus> rotationStatus = new TreeMap<>(); rotationStatus.put(HostName.from("rot1.fqdn"), RotationStatus.in); rotationStatus.put(HostName.from("rot2.fqdn"), RotationStatus.out); Application original = new Application(ApplicationId.from("t1", "a1", "i1"), Instant.now().truncatedTo(ChronoUnit.MILLIS), deploymentSpec, validationOverrides, deployments, deploymentJobs, Change.of(Version.fromString("6.7")).withPin(), Change.of(ApplicationVersion.from(new SourceRevision("repo", "master", "deadcafe"), 42)), Optional.of(IssueId.from("1234")), Optional.of(User.from("by-username")), OptionalInt.of(7), new MetricsService.ApplicationMetrics(0.5, 0.9), Optional.of("---begin---\nKEY\n---end---"), Optional.of(new RotationId("my-rotation")), rotationStatus); Application serialized = applicationSerializer.fromSlime(applicationSerializer.toSlime(original)); assertEquals(original.id(), serialized.id()); assertEquals(original.createdAt(), serialized.createdAt()); assertEquals(original.deploymentSpec().xmlForm(), serialized.deploymentSpec().xmlForm()); assertEquals(original.validationOverrides().xmlForm(), serialized.validationOverrides().xmlForm()); assertEquals(2, serialized.deployments().size()); assertEquals(original.deployments().get(zone1).applicationVersion(), serialized.deployments().get(zone1).applicationVersion()); assertEquals(original.deployments().get(zone2).applicationVersion(), serialized.deployments().get(zone2).applicationVersion()); assertEquals(original.deployments().get(zone1).version(), serialized.deployments().get(zone1).version()); assertEquals(original.deployments().get(zone2).version(), serialized.deployments().get(zone2).version()); assertEquals(original.deployments().get(zone1).at(), serialized.deployments().get(zone1).at()); assertEquals(original.deployments().get(zone2).at(), serialized.deployments().get(zone2).at()); assertEquals(original.deployments().get(zone2).activity().lastQueried().get(), serialized.deployments().get(zone2).activity().lastQueried().get()); assertEquals(original.deployments().get(zone2).activity().lastWritten().get(), serialized.deployments().get(zone2).activity().lastWritten().get()); assertEquals(original.deploymentJobs().projectId(), serialized.deploymentJobs().projectId()); assertEquals(original.deploymentJobs().jobStatus().size(), serialized.deploymentJobs().jobStatus().size()); assertEquals( original.deploymentJobs().jobStatus().get(JobType.systemTest), serialized.deploymentJobs().jobStatus().get(JobType.systemTest)); assertEquals( original.deploymentJobs().jobStatus().get(JobType.stagingTest), serialized.deploymentJobs().jobStatus().get(JobType.stagingTest)); assertEquals(original.outstandingChange(), serialized.outstandingChange()); assertEquals(original.ownershipIssueId(), serialized.ownershipIssueId()); assertEquals(original.owner(), serialized.owner()); assertEquals(original.majorVersion(), serialized.majorVersion()); assertEquals(original.change(), serialized.change()); assertEquals(original.pemDeployKey(), serialized.pemDeployKey()); assertEquals(original.rotation().get(), serialized.rotation().get()); assertEquals(original.rotationStatus(), serialized.rotationStatus()); assertEquals(0, serialized.deployments().get(zone1).clusterUtils().size()); assertEquals(3, serialized.deployments().get(zone2).clusterUtils().size()); assertEquals(0.4, serialized.deployments().get(zone2).clusterUtils().get(ClusterSpec.Id.from("id2")).getCpu(), 0.01); assertEquals(0.2, serialized.deployments().get(zone2).clusterUtils().get(ClusterSpec.Id.from("id1")).getCpu(), 0.01); assertEquals(0.2, serialized.deployments().get(zone2).clusterUtils().get(ClusterSpec.Id.from("id1")).getMemory(), 0.01); assertEquals(3, serialized.deployments().get(zone2).clusterInfo().size()); assertEquals(10, serialized.deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getFlavorCost()); assertEquals(ClusterSpec.Type.content, serialized.deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getClusterType()); assertEquals("flavor2", serialized.deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getFlavor()); assertEquals(4, serialized.deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getHostnames().size()); assertEquals(2, serialized.deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getFlavorCPU(), Double.MIN_VALUE); assertEquals(4, serialized.deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getFlavorMem(), Double.MIN_VALUE); assertEquals(50, serialized.deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getFlavorDisk(), Double.MIN_VALUE); assertEquals(original.metrics().queryServiceQuality(), serialized.metrics().queryServiceQuality(), Double.MIN_VALUE); assertEquals(original.metrics().writeServiceQuality(), serialized.metrics().writeServiceQuality(), Double.MIN_VALUE); assertEquals(original.deployments().get(zone2).metrics().queriesPerSecond(), serialized.deployments().get(zone2).metrics().queriesPerSecond(), Double.MIN_VALUE); assertEquals(original.deployments().get(zone2).metrics().writesPerSecond(), serialized.deployments().get(zone2).metrics().writesPerSecond(), Double.MIN_VALUE); assertEquals(original.deployments().get(zone2).metrics().documentCount(), serialized.deployments().get(zone2).metrics().documentCount(), Double.MIN_VALUE); assertEquals(original.deployments().get(zone2).metrics().queryLatencyMillis(), serialized.deployments().get(zone2).metrics().queryLatencyMillis(), Double.MIN_VALUE); assertEquals(original.deployments().get(zone2).metrics().writeLatencyMillis(), serialized.deployments().get(zone2).metrics().writeLatencyMillis(), Double.MIN_VALUE); assertEquals(original.deployments().get(zone2).metrics().instant(), serialized.deployments().get(zone2).metrics().instant()); { Application original2 = writable(original).withChange(Change.of(ApplicationVersion.from(new SourceRevision("repo1", "branch1", "commit1"), 42))).get(); Application serialized2 = applicationSerializer.fromSlime(applicationSerializer.toSlime(original2)); assertEquals(original2.change(), serialized2.change()); assertEquals(serialized2.change().application().get().source(), original2.change().application().get().source()); Application original3 = writable(original).withChange(Change.of(ApplicationVersion.from(new SourceRevision("a", "b", "c"), 42))).get(); Application serialized3 = applicationSerializer.fromSlime(applicationSerializer.toSlime(original3)); assertEquals(original3.change(), serialized3.change()); assertEquals(serialized3.change().application().get().source(), original3.change().application().get().source()); Application original4 = writable(original).withChange(Change.empty()).get(); Application serialized4 = applicationSerializer.fromSlime(applicationSerializer.toSlime(original4)); assertEquals(original4.change(), serialized4.change()); Application original5 = writable(original).withChange(Change.of(ApplicationVersion.from(new SourceRevision("a", "b", "c"), 42))).get(); Application serialized5 = applicationSerializer.fromSlime(applicationSerializer.toSlime(original5)); assertEquals(original5.change(), serialized5.change()); Application original6 = writable(original).withOutstandingChange(Change.of(ApplicationVersion.from(new SourceRevision("a", "b", "c"), 42))).get(); Application serialized6 = applicationSerializer.fromSlime(applicationSerializer.toSlime(original6)); assertEquals(original6.outstandingChange(), serialized6.outstandingChange()); } }
Optional.of("---begin---\nKEY\n---end---"),
public void testSerialization() { DeploymentSpec deploymentSpec = DeploymentSpec.fromXml("<deployment version='1.0'>" + " <staging/>" + "</deployment>"); ValidationOverrides validationOverrides = ValidationOverrides.fromXml("<validation-overrides version='1.0'>" + " <allow until='2017-06-15'>deployment-removal</allow>" + "</validation-overrides>"); List<Deployment> deployments = new ArrayList<>(); ApplicationVersion applicationVersion1 = ApplicationVersion.from(new SourceRevision("repo1", "branch1", "commit1"), 31); ApplicationVersion applicationVersion2 = ApplicationVersion .from(new SourceRevision("repo1", "branch1", "commit1"), 32, "a@b", Version.fromString("6.3.1"), Instant.ofEpochMilli(496)); Instant activityAt = Instant.parse("2018-06-01T10:15:30.00Z"); deployments.add(new Deployment(zone1, applicationVersion1, Version.fromString("1.2.3"), Instant.ofEpochMilli(3))); deployments.add(new Deployment(zone2, applicationVersion2, Version.fromString("1.2.3"), Instant.ofEpochMilli(5), createClusterUtils(3, 0.2), createClusterInfo(3, 4), new DeploymentMetrics(2, 3, 4, 5, 6, Optional.of(Instant.now().truncatedTo(ChronoUnit.MILLIS))), DeploymentActivity.create(Optional.of(activityAt), Optional.of(activityAt), OptionalDouble.of(200), OptionalDouble.of(10)))); OptionalLong projectId = OptionalLong.of(123L); List<JobStatus> statusList = new ArrayList<>(); statusList.add(JobStatus.initial(JobType.systemTest) .withTriggering(Version.fromString("5.6.7"), ApplicationVersion.unknown, empty(), "Test", Instant.ofEpochMilli(7)) .withCompletion(30, empty(), Instant.ofEpochMilli(8)) .withPause(OptionalLong.of(1L << 32))); statusList.add(JobStatus.initial(JobType.stagingTest) .withTriggering(Version.fromString("5.6.6"), ApplicationVersion.unknown, empty(), "Test 2", Instant.ofEpochMilli(5)) .withCompletion(11, Optional.of(JobError.unknown), Instant.ofEpochMilli(6))); statusList.add(JobStatus.initial(JobType.from(main, zone1).get()) .withTriggering(Version.fromString("5.6.6"), ApplicationVersion.unknown, deployments.stream().findFirst(), "Test 3", Instant.ofEpochMilli(6)) .withCompletion(11, empty(), Instant.ofEpochMilli(7))); DeploymentJobs deploymentJobs = new DeploymentJobs(projectId, statusList, empty(), true); Map<HostName, RotationStatus> rotationStatus = new TreeMap<>(); rotationStatus.put(HostName.from("rot1.fqdn"), RotationStatus.in); rotationStatus.put(HostName.from("rot2.fqdn"), RotationStatus.out); Application original = new Application(ApplicationId.from("t1", "a1", "i1"), Instant.now().truncatedTo(ChronoUnit.MILLIS), deploymentSpec, validationOverrides, deployments, deploymentJobs, Change.of(Version.fromString("6.7")).withPin(), Change.of(ApplicationVersion.from(new SourceRevision("repo", "master", "deadcafe"), 42)), Optional.of(IssueId.from("1234")), Optional.of(User.from("by-username")), OptionalInt.of(7), new MetricsService.ApplicationMetrics(0.5, 0.9), Optional.of("---begin---\nKEY\n---end---"), Optional.of(new RotationId("my-rotation")), rotationStatus); Application serialized = applicationSerializer.fromSlime(applicationSerializer.toSlime(original)); assertEquals(original.id(), serialized.id()); assertEquals(original.createdAt(), serialized.createdAt()); assertEquals(original.deploymentSpec().xmlForm(), serialized.deploymentSpec().xmlForm()); assertEquals(original.validationOverrides().xmlForm(), serialized.validationOverrides().xmlForm()); assertEquals(2, serialized.deployments().size()); assertEquals(original.deployments().get(zone1).applicationVersion(), serialized.deployments().get(zone1).applicationVersion()); assertEquals(original.deployments().get(zone2).applicationVersion(), serialized.deployments().get(zone2).applicationVersion()); assertEquals(original.deployments().get(zone1).version(), serialized.deployments().get(zone1).version()); assertEquals(original.deployments().get(zone2).version(), serialized.deployments().get(zone2).version()); assertEquals(original.deployments().get(zone1).at(), serialized.deployments().get(zone1).at()); assertEquals(original.deployments().get(zone2).at(), serialized.deployments().get(zone2).at()); assertEquals(original.deployments().get(zone2).activity().lastQueried().get(), serialized.deployments().get(zone2).activity().lastQueried().get()); assertEquals(original.deployments().get(zone2).activity().lastWritten().get(), serialized.deployments().get(zone2).activity().lastWritten().get()); assertEquals(original.deploymentJobs().projectId(), serialized.deploymentJobs().projectId()); assertEquals(original.deploymentJobs().jobStatus().size(), serialized.deploymentJobs().jobStatus().size()); assertEquals( original.deploymentJobs().jobStatus().get(JobType.systemTest), serialized.deploymentJobs().jobStatus().get(JobType.systemTest)); assertEquals( original.deploymentJobs().jobStatus().get(JobType.stagingTest), serialized.deploymentJobs().jobStatus().get(JobType.stagingTest)); assertEquals(original.outstandingChange(), serialized.outstandingChange()); assertEquals(original.ownershipIssueId(), serialized.ownershipIssueId()); assertEquals(original.owner(), serialized.owner()); assertEquals(original.majorVersion(), serialized.majorVersion()); assertEquals(original.change(), serialized.change()); assertEquals(original.pemDeployKey(), serialized.pemDeployKey()); assertEquals(original.rotation().get(), serialized.rotation().get()); assertEquals(original.rotationStatus(), serialized.rotationStatus()); assertEquals(0, serialized.deployments().get(zone1).clusterUtils().size()); assertEquals(3, serialized.deployments().get(zone2).clusterUtils().size()); assertEquals(0.4, serialized.deployments().get(zone2).clusterUtils().get(ClusterSpec.Id.from("id2")).getCpu(), 0.01); assertEquals(0.2, serialized.deployments().get(zone2).clusterUtils().get(ClusterSpec.Id.from("id1")).getCpu(), 0.01); assertEquals(0.2, serialized.deployments().get(zone2).clusterUtils().get(ClusterSpec.Id.from("id1")).getMemory(), 0.01); assertEquals(3, serialized.deployments().get(zone2).clusterInfo().size()); assertEquals(10, serialized.deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getFlavorCost()); assertEquals(ClusterSpec.Type.content, serialized.deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getClusterType()); assertEquals("flavor2", serialized.deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getFlavor()); assertEquals(4, serialized.deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getHostnames().size()); assertEquals(2, serialized.deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getFlavorCPU(), Double.MIN_VALUE); assertEquals(4, serialized.deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getFlavorMem(), Double.MIN_VALUE); assertEquals(50, serialized.deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getFlavorDisk(), Double.MIN_VALUE); assertEquals(original.metrics().queryServiceQuality(), serialized.metrics().queryServiceQuality(), Double.MIN_VALUE); assertEquals(original.metrics().writeServiceQuality(), serialized.metrics().writeServiceQuality(), Double.MIN_VALUE); assertEquals(original.deployments().get(zone2).metrics().queriesPerSecond(), serialized.deployments().get(zone2).metrics().queriesPerSecond(), Double.MIN_VALUE); assertEquals(original.deployments().get(zone2).metrics().writesPerSecond(), serialized.deployments().get(zone2).metrics().writesPerSecond(), Double.MIN_VALUE); assertEquals(original.deployments().get(zone2).metrics().documentCount(), serialized.deployments().get(zone2).metrics().documentCount(), Double.MIN_VALUE); assertEquals(original.deployments().get(zone2).metrics().queryLatencyMillis(), serialized.deployments().get(zone2).metrics().queryLatencyMillis(), Double.MIN_VALUE); assertEquals(original.deployments().get(zone2).metrics().writeLatencyMillis(), serialized.deployments().get(zone2).metrics().writeLatencyMillis(), Double.MIN_VALUE); assertEquals(original.deployments().get(zone2).metrics().instant(), serialized.deployments().get(zone2).metrics().instant()); { Application original2 = writable(original).withChange(Change.of(ApplicationVersion.from(new SourceRevision("repo1", "branch1", "commit1"), 42))).get(); Application serialized2 = applicationSerializer.fromSlime(applicationSerializer.toSlime(original2)); assertEquals(original2.change(), serialized2.change()); assertEquals(serialized2.change().application().get().source(), original2.change().application().get().source()); Application original3 = writable(original).withChange(Change.of(ApplicationVersion.from(new SourceRevision("a", "b", "c"), 42))).get(); Application serialized3 = applicationSerializer.fromSlime(applicationSerializer.toSlime(original3)); assertEquals(original3.change(), serialized3.change()); assertEquals(serialized3.change().application().get().source(), original3.change().application().get().source()); Application original4 = writable(original).withChange(Change.empty()).get(); Application serialized4 = applicationSerializer.fromSlime(applicationSerializer.toSlime(original4)); assertEquals(original4.change(), serialized4.change()); Application original5 = writable(original).withChange(Change.of(ApplicationVersion.from(new SourceRevision("a", "b", "c"), 42))).get(); Application serialized5 = applicationSerializer.fromSlime(applicationSerializer.toSlime(original5)); assertEquals(original5.change(), serialized5.change()); Application original6 = writable(original).withOutstandingChange(Change.of(ApplicationVersion.from(new SourceRevision("a", "b", "c"), 42))).get(); Application serialized6 = applicationSerializer.fromSlime(applicationSerializer.toSlime(original6)); assertEquals(original6.outstandingChange(), serialized6.outstandingChange()); } }
class ApplicationSerializerTest { private static final ApplicationSerializer applicationSerializer = new ApplicationSerializer(); private static final Path testData = Paths.get("src/test/java/com/yahoo/vespa/hosted/controller/persistence/testdata/"); private static final ZoneId zone1 = ZoneId.from("prod", "us-west-1"); private static final ZoneId zone2 = ZoneId.from("prod", "us-east-3"); @Test private Map<ClusterSpec.Id, ClusterInfo> createClusterInfo(int clusters, int hosts) { Map<ClusterSpec.Id, ClusterInfo> result = new HashMap<>(); for (int cluster = 0; cluster < clusters; cluster++) { List<String> hostnames = new ArrayList<>(); for (int host = 0; host < hosts; host++) { hostnames.add("hostname" + cluster*host + host); } result.put(ClusterSpec.Id.from("id" + cluster), new ClusterInfo("flavor" + cluster, 10, 2, 4, 50, ClusterSpec.Type.content, hostnames)); } return result; } private Map<ClusterSpec.Id, ClusterUtilization> createClusterUtils(int clusters, double inc) { Map<ClusterSpec.Id, ClusterUtilization> result = new HashMap<>(); ClusterUtilization util = new ClusterUtilization(0,0,0,0); for (int cluster = 0; cluster < clusters; cluster++) { double agg = cluster*inc; result.put(ClusterSpec.Id.from("id" + cluster), new ClusterUtilization( util.getMemory()+ agg, util.getCpu()+ agg, util.getDisk() + agg, util.getDiskBusy() + agg)); } return result; } @Test public void testCompleteApplicationDeserialization() throws Exception { byte[] applicationJson = Files.readAllBytes(testData.resolve("complete-application.json")); applicationSerializer.fromSlime(SlimeUtils.jsonToSlime(applicationJson)); } }
class ApplicationSerializerTest { private static final ApplicationSerializer applicationSerializer = new ApplicationSerializer(); private static final Path testData = Paths.get("src/test/java/com/yahoo/vespa/hosted/controller/persistence/testdata/"); private static final ZoneId zone1 = ZoneId.from("prod", "us-west-1"); private static final ZoneId zone2 = ZoneId.from("prod", "us-east-3"); @Test private Map<ClusterSpec.Id, ClusterInfo> createClusterInfo(int clusters, int hosts) { Map<ClusterSpec.Id, ClusterInfo> result = new HashMap<>(); for (int cluster = 0; cluster < clusters; cluster++) { List<String> hostnames = new ArrayList<>(); for (int host = 0; host < hosts; host++) { hostnames.add("hostname" + cluster*host + host); } result.put(ClusterSpec.Id.from("id" + cluster), new ClusterInfo("flavor" + cluster, 10, 2, 4, 50, ClusterSpec.Type.content, hostnames)); } return result; } private Map<ClusterSpec.Id, ClusterUtilization> createClusterUtils(int clusters, double inc) { Map<ClusterSpec.Id, ClusterUtilization> result = new HashMap<>(); ClusterUtilization util = new ClusterUtilization(0,0,0,0); for (int cluster = 0; cluster < clusters; cluster++) { double agg = cluster*inc; result.put(ClusterSpec.Id.from("id" + cluster), new ClusterUtilization( util.getMemory()+ agg, util.getCpu()+ agg, util.getDisk() + agg, util.getDiskBusy() + agg)); } return result; } @Test public void testCompleteApplicationDeserialization() throws Exception { byte[] applicationJson = Files.readAllBytes(testData.resolve("complete-application.json")); applicationSerializer.fromSlime(SlimeUtils.jsonToSlime(applicationJson)); } }
Upload _is_ submission, btw :)
public void testSerialization() { DeploymentSpec deploymentSpec = DeploymentSpec.fromXml("<deployment version='1.0'>" + " <staging/>" + "</deployment>"); ValidationOverrides validationOverrides = ValidationOverrides.fromXml("<validation-overrides version='1.0'>" + " <allow until='2017-06-15'>deployment-removal</allow>" + "</validation-overrides>"); List<Deployment> deployments = new ArrayList<>(); ApplicationVersion applicationVersion1 = ApplicationVersion.from(new SourceRevision("repo1", "branch1", "commit1"), 31); ApplicationVersion applicationVersion2 = ApplicationVersion .from(new SourceRevision("repo1", "branch1", "commit1"), 32, "a@b", Version.fromString("6.3.1"), Instant.ofEpochMilli(496)); Instant activityAt = Instant.parse("2018-06-01T10:15:30.00Z"); deployments.add(new Deployment(zone1, applicationVersion1, Version.fromString("1.2.3"), Instant.ofEpochMilli(3))); deployments.add(new Deployment(zone2, applicationVersion2, Version.fromString("1.2.3"), Instant.ofEpochMilli(5), createClusterUtils(3, 0.2), createClusterInfo(3, 4), new DeploymentMetrics(2, 3, 4, 5, 6, Optional.of(Instant.now().truncatedTo(ChronoUnit.MILLIS))), DeploymentActivity.create(Optional.of(activityAt), Optional.of(activityAt), OptionalDouble.of(200), OptionalDouble.of(10)))); OptionalLong projectId = OptionalLong.of(123L); List<JobStatus> statusList = new ArrayList<>(); statusList.add(JobStatus.initial(JobType.systemTest) .withTriggering(Version.fromString("5.6.7"), ApplicationVersion.unknown, empty(), "Test", Instant.ofEpochMilli(7)) .withCompletion(30, empty(), Instant.ofEpochMilli(8)) .withPause(OptionalLong.of(1L << 32))); statusList.add(JobStatus.initial(JobType.stagingTest) .withTriggering(Version.fromString("5.6.6"), ApplicationVersion.unknown, empty(), "Test 2", Instant.ofEpochMilli(5)) .withCompletion(11, Optional.of(JobError.unknown), Instant.ofEpochMilli(6))); statusList.add(JobStatus.initial(JobType.from(main, zone1).get()) .withTriggering(Version.fromString("5.6.6"), ApplicationVersion.unknown, deployments.stream().findFirst(), "Test 3", Instant.ofEpochMilli(6)) .withCompletion(11, empty(), Instant.ofEpochMilli(7))); DeploymentJobs deploymentJobs = new DeploymentJobs(projectId, statusList, empty(), true); Map<HostName, RotationStatus> rotationStatus = new TreeMap<>(); rotationStatus.put(HostName.from("rot1.fqdn"), RotationStatus.in); rotationStatus.put(HostName.from("rot2.fqdn"), RotationStatus.out); Application original = new Application(ApplicationId.from("t1", "a1", "i1"), Instant.now().truncatedTo(ChronoUnit.MILLIS), deploymentSpec, validationOverrides, deployments, deploymentJobs, Change.of(Version.fromString("6.7")).withPin(), Change.of(ApplicationVersion.from(new SourceRevision("repo", "master", "deadcafe"), 42)), Optional.of(IssueId.from("1234")), Optional.of(User.from("by-username")), OptionalInt.of(7), new MetricsService.ApplicationMetrics(0.5, 0.9), Optional.of("---begin---\nKEY\n---end---"), Optional.of(new RotationId("my-rotation")), rotationStatus); Application serialized = applicationSerializer.fromSlime(applicationSerializer.toSlime(original)); assertEquals(original.id(), serialized.id()); assertEquals(original.createdAt(), serialized.createdAt()); assertEquals(original.deploymentSpec().xmlForm(), serialized.deploymentSpec().xmlForm()); assertEquals(original.validationOverrides().xmlForm(), serialized.validationOverrides().xmlForm()); assertEquals(2, serialized.deployments().size()); assertEquals(original.deployments().get(zone1).applicationVersion(), serialized.deployments().get(zone1).applicationVersion()); assertEquals(original.deployments().get(zone2).applicationVersion(), serialized.deployments().get(zone2).applicationVersion()); assertEquals(original.deployments().get(zone1).version(), serialized.deployments().get(zone1).version()); assertEquals(original.deployments().get(zone2).version(), serialized.deployments().get(zone2).version()); assertEquals(original.deployments().get(zone1).at(), serialized.deployments().get(zone1).at()); assertEquals(original.deployments().get(zone2).at(), serialized.deployments().get(zone2).at()); assertEquals(original.deployments().get(zone2).activity().lastQueried().get(), serialized.deployments().get(zone2).activity().lastQueried().get()); assertEquals(original.deployments().get(zone2).activity().lastWritten().get(), serialized.deployments().get(zone2).activity().lastWritten().get()); assertEquals(original.deploymentJobs().projectId(), serialized.deploymentJobs().projectId()); assertEquals(original.deploymentJobs().jobStatus().size(), serialized.deploymentJobs().jobStatus().size()); assertEquals( original.deploymentJobs().jobStatus().get(JobType.systemTest), serialized.deploymentJobs().jobStatus().get(JobType.systemTest)); assertEquals( original.deploymentJobs().jobStatus().get(JobType.stagingTest), serialized.deploymentJobs().jobStatus().get(JobType.stagingTest)); assertEquals(original.outstandingChange(), serialized.outstandingChange()); assertEquals(original.ownershipIssueId(), serialized.ownershipIssueId()); assertEquals(original.owner(), serialized.owner()); assertEquals(original.majorVersion(), serialized.majorVersion()); assertEquals(original.change(), serialized.change()); assertEquals(original.pemDeployKey(), serialized.pemDeployKey()); assertEquals(original.rotation().get(), serialized.rotation().get()); assertEquals(original.rotationStatus(), serialized.rotationStatus()); assertEquals(0, serialized.deployments().get(zone1).clusterUtils().size()); assertEquals(3, serialized.deployments().get(zone2).clusterUtils().size()); assertEquals(0.4, serialized.deployments().get(zone2).clusterUtils().get(ClusterSpec.Id.from("id2")).getCpu(), 0.01); assertEquals(0.2, serialized.deployments().get(zone2).clusterUtils().get(ClusterSpec.Id.from("id1")).getCpu(), 0.01); assertEquals(0.2, serialized.deployments().get(zone2).clusterUtils().get(ClusterSpec.Id.from("id1")).getMemory(), 0.01); assertEquals(3, serialized.deployments().get(zone2).clusterInfo().size()); assertEquals(10, serialized.deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getFlavorCost()); assertEquals(ClusterSpec.Type.content, serialized.deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getClusterType()); assertEquals("flavor2", serialized.deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getFlavor()); assertEquals(4, serialized.deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getHostnames().size()); assertEquals(2, serialized.deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getFlavorCPU(), Double.MIN_VALUE); assertEquals(4, serialized.deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getFlavorMem(), Double.MIN_VALUE); assertEquals(50, serialized.deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getFlavorDisk(), Double.MIN_VALUE); assertEquals(original.metrics().queryServiceQuality(), serialized.metrics().queryServiceQuality(), Double.MIN_VALUE); assertEquals(original.metrics().writeServiceQuality(), serialized.metrics().writeServiceQuality(), Double.MIN_VALUE); assertEquals(original.deployments().get(zone2).metrics().queriesPerSecond(), serialized.deployments().get(zone2).metrics().queriesPerSecond(), Double.MIN_VALUE); assertEquals(original.deployments().get(zone2).metrics().writesPerSecond(), serialized.deployments().get(zone2).metrics().writesPerSecond(), Double.MIN_VALUE); assertEquals(original.deployments().get(zone2).metrics().documentCount(), serialized.deployments().get(zone2).metrics().documentCount(), Double.MIN_VALUE); assertEquals(original.deployments().get(zone2).metrics().queryLatencyMillis(), serialized.deployments().get(zone2).metrics().queryLatencyMillis(), Double.MIN_VALUE); assertEquals(original.deployments().get(zone2).metrics().writeLatencyMillis(), serialized.deployments().get(zone2).metrics().writeLatencyMillis(), Double.MIN_VALUE); assertEquals(original.deployments().get(zone2).metrics().instant(), serialized.deployments().get(zone2).metrics().instant()); { Application original2 = writable(original).withChange(Change.of(ApplicationVersion.from(new SourceRevision("repo1", "branch1", "commit1"), 42))).get(); Application serialized2 = applicationSerializer.fromSlime(applicationSerializer.toSlime(original2)); assertEquals(original2.change(), serialized2.change()); assertEquals(serialized2.change().application().get().source(), original2.change().application().get().source()); Application original3 = writable(original).withChange(Change.of(ApplicationVersion.from(new SourceRevision("a", "b", "c"), 42))).get(); Application serialized3 = applicationSerializer.fromSlime(applicationSerializer.toSlime(original3)); assertEquals(original3.change(), serialized3.change()); assertEquals(serialized3.change().application().get().source(), original3.change().application().get().source()); Application original4 = writable(original).withChange(Change.empty()).get(); Application serialized4 = applicationSerializer.fromSlime(applicationSerializer.toSlime(original4)); assertEquals(original4.change(), serialized4.change()); Application original5 = writable(original).withChange(Change.of(ApplicationVersion.from(new SourceRevision("a", "b", "c"), 42))).get(); Application serialized5 = applicationSerializer.fromSlime(applicationSerializer.toSlime(original5)); assertEquals(original5.change(), serialized5.change()); Application original6 = writable(original).withOutstandingChange(Change.of(ApplicationVersion.from(new SourceRevision("a", "b", "c"), 42))).get(); Application serialized6 = applicationSerializer.fromSlime(applicationSerializer.toSlime(original6)); assertEquals(original6.outstandingChange(), serialized6.outstandingChange()); } }
Optional.of("---begin---\nKEY\n---end---"),
public void testSerialization() { DeploymentSpec deploymentSpec = DeploymentSpec.fromXml("<deployment version='1.0'>" + " <staging/>" + "</deployment>"); ValidationOverrides validationOverrides = ValidationOverrides.fromXml("<validation-overrides version='1.0'>" + " <allow until='2017-06-15'>deployment-removal</allow>" + "</validation-overrides>"); List<Deployment> deployments = new ArrayList<>(); ApplicationVersion applicationVersion1 = ApplicationVersion.from(new SourceRevision("repo1", "branch1", "commit1"), 31); ApplicationVersion applicationVersion2 = ApplicationVersion .from(new SourceRevision("repo1", "branch1", "commit1"), 32, "a@b", Version.fromString("6.3.1"), Instant.ofEpochMilli(496)); Instant activityAt = Instant.parse("2018-06-01T10:15:30.00Z"); deployments.add(new Deployment(zone1, applicationVersion1, Version.fromString("1.2.3"), Instant.ofEpochMilli(3))); deployments.add(new Deployment(zone2, applicationVersion2, Version.fromString("1.2.3"), Instant.ofEpochMilli(5), createClusterUtils(3, 0.2), createClusterInfo(3, 4), new DeploymentMetrics(2, 3, 4, 5, 6, Optional.of(Instant.now().truncatedTo(ChronoUnit.MILLIS))), DeploymentActivity.create(Optional.of(activityAt), Optional.of(activityAt), OptionalDouble.of(200), OptionalDouble.of(10)))); OptionalLong projectId = OptionalLong.of(123L); List<JobStatus> statusList = new ArrayList<>(); statusList.add(JobStatus.initial(JobType.systemTest) .withTriggering(Version.fromString("5.6.7"), ApplicationVersion.unknown, empty(), "Test", Instant.ofEpochMilli(7)) .withCompletion(30, empty(), Instant.ofEpochMilli(8)) .withPause(OptionalLong.of(1L << 32))); statusList.add(JobStatus.initial(JobType.stagingTest) .withTriggering(Version.fromString("5.6.6"), ApplicationVersion.unknown, empty(), "Test 2", Instant.ofEpochMilli(5)) .withCompletion(11, Optional.of(JobError.unknown), Instant.ofEpochMilli(6))); statusList.add(JobStatus.initial(JobType.from(main, zone1).get()) .withTriggering(Version.fromString("5.6.6"), ApplicationVersion.unknown, deployments.stream().findFirst(), "Test 3", Instant.ofEpochMilli(6)) .withCompletion(11, empty(), Instant.ofEpochMilli(7))); DeploymentJobs deploymentJobs = new DeploymentJobs(projectId, statusList, empty(), true); Map<HostName, RotationStatus> rotationStatus = new TreeMap<>(); rotationStatus.put(HostName.from("rot1.fqdn"), RotationStatus.in); rotationStatus.put(HostName.from("rot2.fqdn"), RotationStatus.out); Application original = new Application(ApplicationId.from("t1", "a1", "i1"), Instant.now().truncatedTo(ChronoUnit.MILLIS), deploymentSpec, validationOverrides, deployments, deploymentJobs, Change.of(Version.fromString("6.7")).withPin(), Change.of(ApplicationVersion.from(new SourceRevision("repo", "master", "deadcafe"), 42)), Optional.of(IssueId.from("1234")), Optional.of(User.from("by-username")), OptionalInt.of(7), new MetricsService.ApplicationMetrics(0.5, 0.9), Optional.of("---begin---\nKEY\n---end---"), Optional.of(new RotationId("my-rotation")), rotationStatus); Application serialized = applicationSerializer.fromSlime(applicationSerializer.toSlime(original)); assertEquals(original.id(), serialized.id()); assertEquals(original.createdAt(), serialized.createdAt()); assertEquals(original.deploymentSpec().xmlForm(), serialized.deploymentSpec().xmlForm()); assertEquals(original.validationOverrides().xmlForm(), serialized.validationOverrides().xmlForm()); assertEquals(2, serialized.deployments().size()); assertEquals(original.deployments().get(zone1).applicationVersion(), serialized.deployments().get(zone1).applicationVersion()); assertEquals(original.deployments().get(zone2).applicationVersion(), serialized.deployments().get(zone2).applicationVersion()); assertEquals(original.deployments().get(zone1).version(), serialized.deployments().get(zone1).version()); assertEquals(original.deployments().get(zone2).version(), serialized.deployments().get(zone2).version()); assertEquals(original.deployments().get(zone1).at(), serialized.deployments().get(zone1).at()); assertEquals(original.deployments().get(zone2).at(), serialized.deployments().get(zone2).at()); assertEquals(original.deployments().get(zone2).activity().lastQueried().get(), serialized.deployments().get(zone2).activity().lastQueried().get()); assertEquals(original.deployments().get(zone2).activity().lastWritten().get(), serialized.deployments().get(zone2).activity().lastWritten().get()); assertEquals(original.deploymentJobs().projectId(), serialized.deploymentJobs().projectId()); assertEquals(original.deploymentJobs().jobStatus().size(), serialized.deploymentJobs().jobStatus().size()); assertEquals( original.deploymentJobs().jobStatus().get(JobType.systemTest), serialized.deploymentJobs().jobStatus().get(JobType.systemTest)); assertEquals( original.deploymentJobs().jobStatus().get(JobType.stagingTest), serialized.deploymentJobs().jobStatus().get(JobType.stagingTest)); assertEquals(original.outstandingChange(), serialized.outstandingChange()); assertEquals(original.ownershipIssueId(), serialized.ownershipIssueId()); assertEquals(original.owner(), serialized.owner()); assertEquals(original.majorVersion(), serialized.majorVersion()); assertEquals(original.change(), serialized.change()); assertEquals(original.pemDeployKey(), serialized.pemDeployKey()); assertEquals(original.rotation().get(), serialized.rotation().get()); assertEquals(original.rotationStatus(), serialized.rotationStatus()); assertEquals(0, serialized.deployments().get(zone1).clusterUtils().size()); assertEquals(3, serialized.deployments().get(zone2).clusterUtils().size()); assertEquals(0.4, serialized.deployments().get(zone2).clusterUtils().get(ClusterSpec.Id.from("id2")).getCpu(), 0.01); assertEquals(0.2, serialized.deployments().get(zone2).clusterUtils().get(ClusterSpec.Id.from("id1")).getCpu(), 0.01); assertEquals(0.2, serialized.deployments().get(zone2).clusterUtils().get(ClusterSpec.Id.from("id1")).getMemory(), 0.01); assertEquals(3, serialized.deployments().get(zone2).clusterInfo().size()); assertEquals(10, serialized.deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getFlavorCost()); assertEquals(ClusterSpec.Type.content, serialized.deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getClusterType()); assertEquals("flavor2", serialized.deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getFlavor()); assertEquals(4, serialized.deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getHostnames().size()); assertEquals(2, serialized.deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getFlavorCPU(), Double.MIN_VALUE); assertEquals(4, serialized.deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getFlavorMem(), Double.MIN_VALUE); assertEquals(50, serialized.deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getFlavorDisk(), Double.MIN_VALUE); assertEquals(original.metrics().queryServiceQuality(), serialized.metrics().queryServiceQuality(), Double.MIN_VALUE); assertEquals(original.metrics().writeServiceQuality(), serialized.metrics().writeServiceQuality(), Double.MIN_VALUE); assertEquals(original.deployments().get(zone2).metrics().queriesPerSecond(), serialized.deployments().get(zone2).metrics().queriesPerSecond(), Double.MIN_VALUE); assertEquals(original.deployments().get(zone2).metrics().writesPerSecond(), serialized.deployments().get(zone2).metrics().writesPerSecond(), Double.MIN_VALUE); assertEquals(original.deployments().get(zone2).metrics().documentCount(), serialized.deployments().get(zone2).metrics().documentCount(), Double.MIN_VALUE); assertEquals(original.deployments().get(zone2).metrics().queryLatencyMillis(), serialized.deployments().get(zone2).metrics().queryLatencyMillis(), Double.MIN_VALUE); assertEquals(original.deployments().get(zone2).metrics().writeLatencyMillis(), serialized.deployments().get(zone2).metrics().writeLatencyMillis(), Double.MIN_VALUE); assertEquals(original.deployments().get(zone2).metrics().instant(), serialized.deployments().get(zone2).metrics().instant()); { Application original2 = writable(original).withChange(Change.of(ApplicationVersion.from(new SourceRevision("repo1", "branch1", "commit1"), 42))).get(); Application serialized2 = applicationSerializer.fromSlime(applicationSerializer.toSlime(original2)); assertEquals(original2.change(), serialized2.change()); assertEquals(serialized2.change().application().get().source(), original2.change().application().get().source()); Application original3 = writable(original).withChange(Change.of(ApplicationVersion.from(new SourceRevision("a", "b", "c"), 42))).get(); Application serialized3 = applicationSerializer.fromSlime(applicationSerializer.toSlime(original3)); assertEquals(original3.change(), serialized3.change()); assertEquals(serialized3.change().application().get().source(), original3.change().application().get().source()); Application original4 = writable(original).withChange(Change.empty()).get(); Application serialized4 = applicationSerializer.fromSlime(applicationSerializer.toSlime(original4)); assertEquals(original4.change(), serialized4.change()); Application original5 = writable(original).withChange(Change.of(ApplicationVersion.from(new SourceRevision("a", "b", "c"), 42))).get(); Application serialized5 = applicationSerializer.fromSlime(applicationSerializer.toSlime(original5)); assertEquals(original5.change(), serialized5.change()); Application original6 = writable(original).withOutstandingChange(Change.of(ApplicationVersion.from(new SourceRevision("a", "b", "c"), 42))).get(); Application serialized6 = applicationSerializer.fromSlime(applicationSerializer.toSlime(original6)); assertEquals(original6.outstandingChange(), serialized6.outstandingChange()); } }
class ApplicationSerializerTest { private static final ApplicationSerializer applicationSerializer = new ApplicationSerializer(); private static final Path testData = Paths.get("src/test/java/com/yahoo/vespa/hosted/controller/persistence/testdata/"); private static final ZoneId zone1 = ZoneId.from("prod", "us-west-1"); private static final ZoneId zone2 = ZoneId.from("prod", "us-east-3"); @Test private Map<ClusterSpec.Id, ClusterInfo> createClusterInfo(int clusters, int hosts) { Map<ClusterSpec.Id, ClusterInfo> result = new HashMap<>(); for (int cluster = 0; cluster < clusters; cluster++) { List<String> hostnames = new ArrayList<>(); for (int host = 0; host < hosts; host++) { hostnames.add("hostname" + cluster*host + host); } result.put(ClusterSpec.Id.from("id" + cluster), new ClusterInfo("flavor" + cluster, 10, 2, 4, 50, ClusterSpec.Type.content, hostnames)); } return result; } private Map<ClusterSpec.Id, ClusterUtilization> createClusterUtils(int clusters, double inc) { Map<ClusterSpec.Id, ClusterUtilization> result = new HashMap<>(); ClusterUtilization util = new ClusterUtilization(0,0,0,0); for (int cluster = 0; cluster < clusters; cluster++) { double agg = cluster*inc; result.put(ClusterSpec.Id.from("id" + cluster), new ClusterUtilization( util.getMemory()+ agg, util.getCpu()+ agg, util.getDisk() + agg, util.getDiskBusy() + agg)); } return result; } @Test public void testCompleteApplicationDeserialization() throws Exception { byte[] applicationJson = Files.readAllBytes(testData.resolve("complete-application.json")); applicationSerializer.fromSlime(SlimeUtils.jsonToSlime(applicationJson)); } }
class ApplicationSerializerTest { private static final ApplicationSerializer applicationSerializer = new ApplicationSerializer(); private static final Path testData = Paths.get("src/test/java/com/yahoo/vespa/hosted/controller/persistence/testdata/"); private static final ZoneId zone1 = ZoneId.from("prod", "us-west-1"); private static final ZoneId zone2 = ZoneId.from("prod", "us-east-3"); @Test private Map<ClusterSpec.Id, ClusterInfo> createClusterInfo(int clusters, int hosts) { Map<ClusterSpec.Id, ClusterInfo> result = new HashMap<>(); for (int cluster = 0; cluster < clusters; cluster++) { List<String> hostnames = new ArrayList<>(); for (int host = 0; host < hosts; host++) { hostnames.add("hostname" + cluster*host + host); } result.put(ClusterSpec.Id.from("id" + cluster), new ClusterInfo("flavor" + cluster, 10, 2, 4, 50, ClusterSpec.Type.content, hostnames)); } return result; } private Map<ClusterSpec.Id, ClusterUtilization> createClusterUtils(int clusters, double inc) { Map<ClusterSpec.Id, ClusterUtilization> result = new HashMap<>(); ClusterUtilization util = new ClusterUtilization(0,0,0,0); for (int cluster = 0; cluster < clusters; cluster++) { double agg = cluster*inc; result.put(ClusterSpec.Id.from("id" + cluster), new ClusterUtilization( util.getMemory()+ agg, util.getCpu()+ agg, util.getDisk() + agg, util.getDiskBusy() + agg)); } return result; } @Test public void testCompleteApplicationDeserialization() throws Exception { byte[] applicationJson = Files.readAllBytes(testData.resolve("complete-application.json")); applicationSerializer.fromSlime(SlimeUtils.jsonToSlime(applicationJson)); } }
(;¬_¬)
public void testSerialization() { DeploymentSpec deploymentSpec = DeploymentSpec.fromXml("<deployment version='1.0'>" + " <staging/>" + "</deployment>"); ValidationOverrides validationOverrides = ValidationOverrides.fromXml("<validation-overrides version='1.0'>" + " <allow until='2017-06-15'>deployment-removal</allow>" + "</validation-overrides>"); List<Deployment> deployments = new ArrayList<>(); ApplicationVersion applicationVersion1 = ApplicationVersion.from(new SourceRevision("repo1", "branch1", "commit1"), 31); ApplicationVersion applicationVersion2 = ApplicationVersion .from(new SourceRevision("repo1", "branch1", "commit1"), 32, "a@b", Version.fromString("6.3.1"), Instant.ofEpochMilli(496)); Instant activityAt = Instant.parse("2018-06-01T10:15:30.00Z"); deployments.add(new Deployment(zone1, applicationVersion1, Version.fromString("1.2.3"), Instant.ofEpochMilli(3))); deployments.add(new Deployment(zone2, applicationVersion2, Version.fromString("1.2.3"), Instant.ofEpochMilli(5), createClusterUtils(3, 0.2), createClusterInfo(3, 4), new DeploymentMetrics(2, 3, 4, 5, 6, Optional.of(Instant.now().truncatedTo(ChronoUnit.MILLIS))), DeploymentActivity.create(Optional.of(activityAt), Optional.of(activityAt), OptionalDouble.of(200), OptionalDouble.of(10)))); OptionalLong projectId = OptionalLong.of(123L); List<JobStatus> statusList = new ArrayList<>(); statusList.add(JobStatus.initial(JobType.systemTest) .withTriggering(Version.fromString("5.6.7"), ApplicationVersion.unknown, empty(), "Test", Instant.ofEpochMilli(7)) .withCompletion(30, empty(), Instant.ofEpochMilli(8)) .withPause(OptionalLong.of(1L << 32))); statusList.add(JobStatus.initial(JobType.stagingTest) .withTriggering(Version.fromString("5.6.6"), ApplicationVersion.unknown, empty(), "Test 2", Instant.ofEpochMilli(5)) .withCompletion(11, Optional.of(JobError.unknown), Instant.ofEpochMilli(6))); statusList.add(JobStatus.initial(JobType.from(main, zone1).get()) .withTriggering(Version.fromString("5.6.6"), ApplicationVersion.unknown, deployments.stream().findFirst(), "Test 3", Instant.ofEpochMilli(6)) .withCompletion(11, empty(), Instant.ofEpochMilli(7))); DeploymentJobs deploymentJobs = new DeploymentJobs(projectId, statusList, empty(), true); Map<HostName, RotationStatus> rotationStatus = new TreeMap<>(); rotationStatus.put(HostName.from("rot1.fqdn"), RotationStatus.in); rotationStatus.put(HostName.from("rot2.fqdn"), RotationStatus.out); Application original = new Application(ApplicationId.from("t1", "a1", "i1"), Instant.now().truncatedTo(ChronoUnit.MILLIS), deploymentSpec, validationOverrides, deployments, deploymentJobs, Change.of(Version.fromString("6.7")).withPin(), Change.of(ApplicationVersion.from(new SourceRevision("repo", "master", "deadcafe"), 42)), Optional.of(IssueId.from("1234")), Optional.of(User.from("by-username")), OptionalInt.of(7), new MetricsService.ApplicationMetrics(0.5, 0.9), Optional.of("-----BEGIN PUBLIC KEY-----\n∠( ᐛ 」∠)_\n-----END PUBLIC KEY-----"), Optional.of(new RotationId("my-rotation")), rotationStatus); Application serialized = applicationSerializer.fromSlime(applicationSerializer.toSlime(original)); assertEquals(original.id(), serialized.id()); assertEquals(original.createdAt(), serialized.createdAt()); assertEquals(original.deploymentSpec().xmlForm(), serialized.deploymentSpec().xmlForm()); assertEquals(original.validationOverrides().xmlForm(), serialized.validationOverrides().xmlForm()); assertEquals(2, serialized.deployments().size()); assertEquals(original.deployments().get(zone1).applicationVersion(), serialized.deployments().get(zone1).applicationVersion()); assertEquals(original.deployments().get(zone2).applicationVersion(), serialized.deployments().get(zone2).applicationVersion()); assertEquals(original.deployments().get(zone1).version(), serialized.deployments().get(zone1).version()); assertEquals(original.deployments().get(zone2).version(), serialized.deployments().get(zone2).version()); assertEquals(original.deployments().get(zone1).at(), serialized.deployments().get(zone1).at()); assertEquals(original.deployments().get(zone2).at(), serialized.deployments().get(zone2).at()); assertEquals(original.deployments().get(zone2).activity().lastQueried().get(), serialized.deployments().get(zone2).activity().lastQueried().get()); assertEquals(original.deployments().get(zone2).activity().lastWritten().get(), serialized.deployments().get(zone2).activity().lastWritten().get()); assertEquals(original.deploymentJobs().projectId(), serialized.deploymentJobs().projectId()); assertEquals(original.deploymentJobs().jobStatus().size(), serialized.deploymentJobs().jobStatus().size()); assertEquals( original.deploymentJobs().jobStatus().get(JobType.systemTest), serialized.deploymentJobs().jobStatus().get(JobType.systemTest)); assertEquals( original.deploymentJobs().jobStatus().get(JobType.stagingTest), serialized.deploymentJobs().jobStatus().get(JobType.stagingTest)); assertEquals(original.outstandingChange(), serialized.outstandingChange()); assertEquals(original.ownershipIssueId(), serialized.ownershipIssueId()); assertEquals(original.owner(), serialized.owner()); assertEquals(original.majorVersion(), serialized.majorVersion()); assertEquals(original.change(), serialized.change()); assertEquals(original.pemDeployKey(), serialized.pemDeployKey()); assertEquals(original.rotation().get(), serialized.rotation().get()); assertEquals(original.rotationStatus(), serialized.rotationStatus()); assertEquals(0, serialized.deployments().get(zone1).clusterUtils().size()); assertEquals(3, serialized.deployments().get(zone2).clusterUtils().size()); assertEquals(0.4, serialized.deployments().get(zone2).clusterUtils().get(ClusterSpec.Id.from("id2")).getCpu(), 0.01); assertEquals(0.2, serialized.deployments().get(zone2).clusterUtils().get(ClusterSpec.Id.from("id1")).getCpu(), 0.01); assertEquals(0.2, serialized.deployments().get(zone2).clusterUtils().get(ClusterSpec.Id.from("id1")).getMemory(), 0.01); assertEquals(3, serialized.deployments().get(zone2).clusterInfo().size()); assertEquals(10, serialized.deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getFlavorCost()); assertEquals(ClusterSpec.Type.content, serialized.deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getClusterType()); assertEquals("flavor2", serialized.deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getFlavor()); assertEquals(4, serialized.deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getHostnames().size()); assertEquals(2, serialized.deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getFlavorCPU(), Double.MIN_VALUE); assertEquals(4, serialized.deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getFlavorMem(), Double.MIN_VALUE); assertEquals(50, serialized.deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getFlavorDisk(), Double.MIN_VALUE); assertEquals(original.metrics().queryServiceQuality(), serialized.metrics().queryServiceQuality(), Double.MIN_VALUE); assertEquals(original.metrics().writeServiceQuality(), serialized.metrics().writeServiceQuality(), Double.MIN_VALUE); assertEquals(original.deployments().get(zone2).metrics().queriesPerSecond(), serialized.deployments().get(zone2).metrics().queriesPerSecond(), Double.MIN_VALUE); assertEquals(original.deployments().get(zone2).metrics().writesPerSecond(), serialized.deployments().get(zone2).metrics().writesPerSecond(), Double.MIN_VALUE); assertEquals(original.deployments().get(zone2).metrics().documentCount(), serialized.deployments().get(zone2).metrics().documentCount(), Double.MIN_VALUE); assertEquals(original.deployments().get(zone2).metrics().queryLatencyMillis(), serialized.deployments().get(zone2).metrics().queryLatencyMillis(), Double.MIN_VALUE); assertEquals(original.deployments().get(zone2).metrics().writeLatencyMillis(), serialized.deployments().get(zone2).metrics().writeLatencyMillis(), Double.MIN_VALUE); assertEquals(original.deployments().get(zone2).metrics().instant(), serialized.deployments().get(zone2).metrics().instant()); { Application original2 = writable(original).withChange(Change.of(ApplicationVersion.from(new SourceRevision("repo1", "branch1", "commit1"), 42))).get(); Application serialized2 = applicationSerializer.fromSlime(applicationSerializer.toSlime(original2)); assertEquals(original2.change(), serialized2.change()); assertEquals(serialized2.change().application().get().source(), original2.change().application().get().source()); Application original3 = writable(original).withChange(Change.of(ApplicationVersion.from(new SourceRevision("a", "b", "c"), 42))).get(); Application serialized3 = applicationSerializer.fromSlime(applicationSerializer.toSlime(original3)); assertEquals(original3.change(), serialized3.change()); assertEquals(serialized3.change().application().get().source(), original3.change().application().get().source()); Application original4 = writable(original).withChange(Change.empty()).get(); Application serialized4 = applicationSerializer.fromSlime(applicationSerializer.toSlime(original4)); assertEquals(original4.change(), serialized4.change()); Application original5 = writable(original).withChange(Change.of(ApplicationVersion.from(new SourceRevision("a", "b", "c"), 42))).get(); Application serialized5 = applicationSerializer.fromSlime(applicationSerializer.toSlime(original5)); assertEquals(original5.change(), serialized5.change()); Application original6 = writable(original).withOutstandingChange(Change.of(ApplicationVersion.from(new SourceRevision("a", "b", "c"), 42))).get(); Application serialized6 = applicationSerializer.fromSlime(applicationSerializer.toSlime(original6)); assertEquals(original6.outstandingChange(), serialized6.outstandingChange()); } }
Optional.of("-----BEGIN PUBLIC KEY-----\n∠( ᐛ 」∠)_\n-----END PUBLIC KEY-----"),
public void testSerialization() { DeploymentSpec deploymentSpec = DeploymentSpec.fromXml("<deployment version='1.0'>" + " <staging/>" + "</deployment>"); ValidationOverrides validationOverrides = ValidationOverrides.fromXml("<validation-overrides version='1.0'>" + " <allow until='2017-06-15'>deployment-removal</allow>" + "</validation-overrides>"); List<Deployment> deployments = new ArrayList<>(); ApplicationVersion applicationVersion1 = ApplicationVersion.from(new SourceRevision("repo1", "branch1", "commit1"), 31); ApplicationVersion applicationVersion2 = ApplicationVersion .from(new SourceRevision("repo1", "branch1", "commit1"), 32, "a@b", Version.fromString("6.3.1"), Instant.ofEpochMilli(496)); Instant activityAt = Instant.parse("2018-06-01T10:15:30.00Z"); deployments.add(new Deployment(zone1, applicationVersion1, Version.fromString("1.2.3"), Instant.ofEpochMilli(3))); deployments.add(new Deployment(zone2, applicationVersion2, Version.fromString("1.2.3"), Instant.ofEpochMilli(5), createClusterUtils(3, 0.2), createClusterInfo(3, 4), new DeploymentMetrics(2, 3, 4, 5, 6, Optional.of(Instant.now().truncatedTo(ChronoUnit.MILLIS))), DeploymentActivity.create(Optional.of(activityAt), Optional.of(activityAt), OptionalDouble.of(200), OptionalDouble.of(10)))); OptionalLong projectId = OptionalLong.of(123L); List<JobStatus> statusList = new ArrayList<>(); statusList.add(JobStatus.initial(JobType.systemTest) .withTriggering(Version.fromString("5.6.7"), ApplicationVersion.unknown, empty(), "Test", Instant.ofEpochMilli(7)) .withCompletion(30, empty(), Instant.ofEpochMilli(8)) .withPause(OptionalLong.of(1L << 32))); statusList.add(JobStatus.initial(JobType.stagingTest) .withTriggering(Version.fromString("5.6.6"), ApplicationVersion.unknown, empty(), "Test 2", Instant.ofEpochMilli(5)) .withCompletion(11, Optional.of(JobError.unknown), Instant.ofEpochMilli(6))); statusList.add(JobStatus.initial(JobType.from(main, zone1).get()) .withTriggering(Version.fromString("5.6.6"), ApplicationVersion.unknown, deployments.stream().findFirst(), "Test 3", Instant.ofEpochMilli(6)) .withCompletion(11, empty(), Instant.ofEpochMilli(7))); DeploymentJobs deploymentJobs = new DeploymentJobs(projectId, statusList, empty(), true); Map<HostName, RotationStatus> rotationStatus = new TreeMap<>(); rotationStatus.put(HostName.from("rot1.fqdn"), RotationStatus.in); rotationStatus.put(HostName.from("rot2.fqdn"), RotationStatus.out); Application original = new Application(ApplicationId.from("t1", "a1", "i1"), Instant.now().truncatedTo(ChronoUnit.MILLIS), deploymentSpec, validationOverrides, deployments, deploymentJobs, Change.of(Version.fromString("6.7")).withPin(), Change.of(ApplicationVersion.from(new SourceRevision("repo", "master", "deadcafe"), 42)), Optional.of(IssueId.from("1234")), Optional.of(User.from("by-username")), OptionalInt.of(7), new MetricsService.ApplicationMetrics(0.5, 0.9), Optional.of("-----BEGIN PUBLIC KEY-----\n∠( ᐛ 」∠)_\n-----END PUBLIC KEY-----"), Optional.of(new RotationId("my-rotation")), rotationStatus); Application serialized = applicationSerializer.fromSlime(applicationSerializer.toSlime(original)); assertEquals(original.id(), serialized.id()); assertEquals(original.createdAt(), serialized.createdAt()); assertEquals(original.deploymentSpec().xmlForm(), serialized.deploymentSpec().xmlForm()); assertEquals(original.validationOverrides().xmlForm(), serialized.validationOverrides().xmlForm()); assertEquals(2, serialized.deployments().size()); assertEquals(original.deployments().get(zone1).applicationVersion(), serialized.deployments().get(zone1).applicationVersion()); assertEquals(original.deployments().get(zone2).applicationVersion(), serialized.deployments().get(zone2).applicationVersion()); assertEquals(original.deployments().get(zone1).version(), serialized.deployments().get(zone1).version()); assertEquals(original.deployments().get(zone2).version(), serialized.deployments().get(zone2).version()); assertEquals(original.deployments().get(zone1).at(), serialized.deployments().get(zone1).at()); assertEquals(original.deployments().get(zone2).at(), serialized.deployments().get(zone2).at()); assertEquals(original.deployments().get(zone2).activity().lastQueried().get(), serialized.deployments().get(zone2).activity().lastQueried().get()); assertEquals(original.deployments().get(zone2).activity().lastWritten().get(), serialized.deployments().get(zone2).activity().lastWritten().get()); assertEquals(original.deploymentJobs().projectId(), serialized.deploymentJobs().projectId()); assertEquals(original.deploymentJobs().jobStatus().size(), serialized.deploymentJobs().jobStatus().size()); assertEquals( original.deploymentJobs().jobStatus().get(JobType.systemTest), serialized.deploymentJobs().jobStatus().get(JobType.systemTest)); assertEquals( original.deploymentJobs().jobStatus().get(JobType.stagingTest), serialized.deploymentJobs().jobStatus().get(JobType.stagingTest)); assertEquals(original.outstandingChange(), serialized.outstandingChange()); assertEquals(original.ownershipIssueId(), serialized.ownershipIssueId()); assertEquals(original.owner(), serialized.owner()); assertEquals(original.majorVersion(), serialized.majorVersion()); assertEquals(original.change(), serialized.change()); assertEquals(original.pemDeployKey(), serialized.pemDeployKey()); assertEquals(original.rotation().get(), serialized.rotation().get()); assertEquals(original.rotationStatus(), serialized.rotationStatus()); assertEquals(0, serialized.deployments().get(zone1).clusterUtils().size()); assertEquals(3, serialized.deployments().get(zone2).clusterUtils().size()); assertEquals(0.4, serialized.deployments().get(zone2).clusterUtils().get(ClusterSpec.Id.from("id2")).getCpu(), 0.01); assertEquals(0.2, serialized.deployments().get(zone2).clusterUtils().get(ClusterSpec.Id.from("id1")).getCpu(), 0.01); assertEquals(0.2, serialized.deployments().get(zone2).clusterUtils().get(ClusterSpec.Id.from("id1")).getMemory(), 0.01); assertEquals(3, serialized.deployments().get(zone2).clusterInfo().size()); assertEquals(10, serialized.deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getFlavorCost()); assertEquals(ClusterSpec.Type.content, serialized.deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getClusterType()); assertEquals("flavor2", serialized.deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getFlavor()); assertEquals(4, serialized.deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getHostnames().size()); assertEquals(2, serialized.deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getFlavorCPU(), Double.MIN_VALUE); assertEquals(4, serialized.deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getFlavorMem(), Double.MIN_VALUE); assertEquals(50, serialized.deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getFlavorDisk(), Double.MIN_VALUE); assertEquals(original.metrics().queryServiceQuality(), serialized.metrics().queryServiceQuality(), Double.MIN_VALUE); assertEquals(original.metrics().writeServiceQuality(), serialized.metrics().writeServiceQuality(), Double.MIN_VALUE); assertEquals(original.deployments().get(zone2).metrics().queriesPerSecond(), serialized.deployments().get(zone2).metrics().queriesPerSecond(), Double.MIN_VALUE); assertEquals(original.deployments().get(zone2).metrics().writesPerSecond(), serialized.deployments().get(zone2).metrics().writesPerSecond(), Double.MIN_VALUE); assertEquals(original.deployments().get(zone2).metrics().documentCount(), serialized.deployments().get(zone2).metrics().documentCount(), Double.MIN_VALUE); assertEquals(original.deployments().get(zone2).metrics().queryLatencyMillis(), serialized.deployments().get(zone2).metrics().queryLatencyMillis(), Double.MIN_VALUE); assertEquals(original.deployments().get(zone2).metrics().writeLatencyMillis(), serialized.deployments().get(zone2).metrics().writeLatencyMillis(), Double.MIN_VALUE); assertEquals(original.deployments().get(zone2).metrics().instant(), serialized.deployments().get(zone2).metrics().instant()); { Application original2 = writable(original).withChange(Change.of(ApplicationVersion.from(new SourceRevision("repo1", "branch1", "commit1"), 42))).get(); Application serialized2 = applicationSerializer.fromSlime(applicationSerializer.toSlime(original2)); assertEquals(original2.change(), serialized2.change()); assertEquals(serialized2.change().application().get().source(), original2.change().application().get().source()); Application original3 = writable(original).withChange(Change.of(ApplicationVersion.from(new SourceRevision("a", "b", "c"), 42))).get(); Application serialized3 = applicationSerializer.fromSlime(applicationSerializer.toSlime(original3)); assertEquals(original3.change(), serialized3.change()); assertEquals(serialized3.change().application().get().source(), original3.change().application().get().source()); Application original4 = writable(original).withChange(Change.empty()).get(); Application serialized4 = applicationSerializer.fromSlime(applicationSerializer.toSlime(original4)); assertEquals(original4.change(), serialized4.change()); Application original5 = writable(original).withChange(Change.of(ApplicationVersion.from(new SourceRevision("a", "b", "c"), 42))).get(); Application serialized5 = applicationSerializer.fromSlime(applicationSerializer.toSlime(original5)); assertEquals(original5.change(), serialized5.change()); Application original6 = writable(original).withOutstandingChange(Change.of(ApplicationVersion.from(new SourceRevision("a", "b", "c"), 42))).get(); Application serialized6 = applicationSerializer.fromSlime(applicationSerializer.toSlime(original6)); assertEquals(original6.outstandingChange(), serialized6.outstandingChange()); } }
class ApplicationSerializerTest { private static final ApplicationSerializer applicationSerializer = new ApplicationSerializer(); private static final Path testData = Paths.get("src/test/java/com/yahoo/vespa/hosted/controller/persistence/testdata/"); private static final ZoneId zone1 = ZoneId.from("prod", "us-west-1"); private static final ZoneId zone2 = ZoneId.from("prod", "us-east-3"); @Test private Map<ClusterSpec.Id, ClusterInfo> createClusterInfo(int clusters, int hosts) { Map<ClusterSpec.Id, ClusterInfo> result = new HashMap<>(); for (int cluster = 0; cluster < clusters; cluster++) { List<String> hostnames = new ArrayList<>(); for (int host = 0; host < hosts; host++) { hostnames.add("hostname" + cluster*host + host); } result.put(ClusterSpec.Id.from("id" + cluster), new ClusterInfo("flavor" + cluster, 10, 2, 4, 50, ClusterSpec.Type.content, hostnames)); } return result; } private Map<ClusterSpec.Id, ClusterUtilization> createClusterUtils(int clusters, double inc) { Map<ClusterSpec.Id, ClusterUtilization> result = new HashMap<>(); ClusterUtilization util = new ClusterUtilization(0,0,0,0); for (int cluster = 0; cluster < clusters; cluster++) { double agg = cluster*inc; result.put(ClusterSpec.Id.from("id" + cluster), new ClusterUtilization( util.getMemory()+ agg, util.getCpu()+ agg, util.getDisk() + agg, util.getDiskBusy() + agg)); } return result; } @Test public void testCompleteApplicationDeserialization() throws Exception { byte[] applicationJson = Files.readAllBytes(testData.resolve("complete-application.json")); applicationSerializer.fromSlime(SlimeUtils.jsonToSlime(applicationJson)); } }
class ApplicationSerializerTest { private static final ApplicationSerializer applicationSerializer = new ApplicationSerializer(); private static final Path testData = Paths.get("src/test/java/com/yahoo/vespa/hosted/controller/persistence/testdata/"); private static final ZoneId zone1 = ZoneId.from("prod", "us-west-1"); private static final ZoneId zone2 = ZoneId.from("prod", "us-east-3"); @Test private Map<ClusterSpec.Id, ClusterInfo> createClusterInfo(int clusters, int hosts) { Map<ClusterSpec.Id, ClusterInfo> result = new HashMap<>(); for (int cluster = 0; cluster < clusters; cluster++) { List<String> hostnames = new ArrayList<>(); for (int host = 0; host < hosts; host++) { hostnames.add("hostname" + cluster*host + host); } result.put(ClusterSpec.Id.from("id" + cluster), new ClusterInfo("flavor" + cluster, 10, 2, 4, 50, ClusterSpec.Type.content, hostnames)); } return result; } private Map<ClusterSpec.Id, ClusterUtilization> createClusterUtils(int clusters, double inc) { Map<ClusterSpec.Id, ClusterUtilization> result = new HashMap<>(); ClusterUtilization util = new ClusterUtilization(0,0,0,0); for (int cluster = 0; cluster < clusters; cluster++) { double agg = cluster*inc; result.put(ClusterSpec.Id.from("id" + cluster), new ClusterUtilization( util.getMemory()+ agg, util.getCpu()+ agg, util.getDisk() + agg, util.getDiskBusy() + agg)); } return result; } @Test public void testCompleteApplicationDeserialization() throws Exception { byte[] applicationJson = Files.readAllBytes(testData.resolve("complete-application.json")); applicationSerializer.fromSlime(SlimeUtils.jsonToSlime(applicationJson)); } }