comment
stringlengths
1
45k
method_body
stringlengths
23
281k
target_code
stringlengths
0
5.16k
method_body_after
stringlengths
12
281k
context_before
stringlengths
8
543k
context_after
stringlengths
8
543k
Those aren't separate jobs. The point of this optional is that not all systems have all zones.
private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; Change change = application.deploying().get(); for (JobType job : order.jobsFrom(application.deploymentSpec())) { if ( ! job.isProduction()) continue; Optional<ZoneId> zone = job.zone(this.controller.system()); if ( ! zone.isPresent()) continue; Deployment deployment = application.deployments().get(zone.get()); if (deployment == null) return false; if (change instanceof VersionChange) { if (((VersionChange)change).version().isAfter(deployment.version())) return false; } else if (((Change.ApplicationChange)change).revision().isPresent()) { if ( ! deployment.revision().equals(((Change.ApplicationChange)change).revision().get())) return false; } else { return false; } } return true; }
if ( ! zone.isPresent()) continue;
private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; Change change = application.deploying().get(); for (JobType job : order.jobsFrom(application.deploymentSpec())) { if ( ! job.isProduction()) continue; Optional<ZoneId> zone = job.zone(this.controller.system()); if ( ! zone.isPresent()) continue; Deployment deployment = application.deployments().get(zone.get()); if (deployment == null) return false; if (change instanceof VersionChange) { if (((VersionChange)change).version().isAfter(deployment.version())) return false; } else if (((Change.ApplicationChange)change).revision().isPresent()) { if ( ! ((Change.ApplicationChange)change).revision().get().equals(deployment.revision())) return false; } else { return false; } } return true; }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (report.jobType() == JobType.component) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (retryBecauseOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (retryBecauseNewFailure(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); }); } /** Returns whether all production zones listed in deployment spec last were successful on the currently deploying change. */ /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** Find the next step to trigger if any, and triggers it */ public void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange) application.deploying().get()).version(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target) || systemTestStatus.isHanging(jobTimeoutLimit())) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } else { JobStatus componentStatus = application.deploymentJobs().jobStatus().get(JobType.component); if (changesAvailable(application, componentStatus, systemTestStatus)) { application = trigger(JobType.systemTest, application, false, "Available change in component"); controller.applications().store(application); } } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus) || nextStatus.isHanging(jobTimeoutLimit())) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; Change change = application.deploying().get(); if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if ( ! lastSuccessfulIs(targetVersion, JobType.stagingTest, application)) return false; if ( previous.type().isProduction() && ! isOnAtLeastProductionVersion(targetVersion, application, previous.type())) return false; if (next != null && next.type().isTest() && lastSuccessfulIs(targetVersion, next.type(), application)) return false; if (next != null && next.type().isProduction() && isOnAtLeastProductionVersion(targetVersion, application, next.type())) return false; return true; } else { if ( ! previous.lastSuccess().isPresent()) return false; if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; return previous.lastSuccess().get().revision().isPresent() && ! previous.lastSuccess().get().revision().equals(next.lastSuccess().get().revision()); } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { applications().lockOrThrow(applicationId, application -> { buildSystem.removeJobs(application.id()); applications().store(application.withDeploying(Optional.empty())); }); } private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), clock.instant(), application.deployVersionFor(jobType, controller), application.deployRevisionFor(jobType, controller), reason); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deploying().isPresent() && application.deploying().get().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof VersionChange && jobType.isProduction() && isOnAtLeastProductionVersion(((VersionChange) application.deploying().get()).version(), application, jobType)) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the current deployed version in the zone given by the job * is newer or equal to the given version. This may be the case even if the production job * in question failed, if the failure happens after deployment. * In that case we should never deploy an earlier version as that may potentially * downgrade production nodes which we are not guaranteed to support, and upgradibng to the current * version is just unnecessary work. */ private boolean isOnAtLeastProductionVersion(Version version, Application application, JobType job) { if ( ! job.isProduction()) return false; Optional<ZoneId> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; Deployment existingDeployment = application.deployments().get(zone.get()); if (existingDeployment == null) return false; return existingDeployment.version().isAfter(version) || existingDeployment.version().equals(version); } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if (application.deploying().get() instanceof Change.ApplicationChange) return true; if (application.deploymentJobs().hasFailures()) return true; if (application.isBlocked(clock.instant())) return true; return false; } private boolean lastSuccessfulIs(Version version, JobType jobType, Application application) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return false; Optional<JobStatus.JobRun> lastSuccessfulStagingRun = status.lastSuccess(); if ( ! lastSuccessfulStagingRun.isPresent()) return false; return lastSuccessfulStagingRun.get().version().equals(version); } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (report.jobType() == JobType.component) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (retryBecauseOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (retryBecauseNewFailure(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); }); } /** Returns whether all production zones listed in deployment spec has this change (or a newer version, if upgrade) */ /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** Find the next step to trigger if any, and triggers it */ public void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange) application.deploying().get()).version(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target) || systemTestStatus.isHanging(jobTimeoutLimit())) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } else { JobStatus componentStatus = application.deploymentJobs().jobStatus().get(JobType.component); if (componentStatus != null && changesAvailable(application, componentStatus, systemTestStatus)) { application = trigger(JobType.systemTest, application, false, "Available change in component"); controller.applications().store(application); } } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus) || nextStatus.isHanging(jobTimeoutLimit())) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; if (next == null) return true; Change change = application.deploying().get(); if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if (next.type().isTest()) { if ( ! lastSuccessfulIs(targetVersion, previous.type(), application)) return false; if (lastSuccessfulIs(targetVersion, next.type(), application)) return false; } else if (next.type().isProduction()) { if ( ! lastSuccessfulIs(targetVersion, JobType.stagingTest, application)) return false; if (previous.type().isProduction() && ! alreadyDeployed(targetVersion, application, previous.type())) return false; if (alreadyDeployed(targetVersion, application, next.type())) return false; } else throw new IllegalStateException("Unclassified type of next job: " + next); return true; } else { if ( ! previous.lastSuccess().isPresent()) return false; if ( ! next.lastSuccess().isPresent()) return true; return previous.lastSuccess().get().revision().isPresent() && ! previous.lastSuccess().get().revision().equals(next.lastSuccess().get().revision()); } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { applications().lockOrThrow(applicationId, application -> { buildSystem.removeJobs(application.id()); applications().store(application.withDeploying(Optional.empty())); }); } private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), clock.instant(), application.deployVersionFor(jobType, controller), application.deployRevisionFor(jobType, controller), reason); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deploying().isPresent() && application.deploying().get().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof VersionChange && jobType.isProduction() && alreadyDeployed(((VersionChange) application.deploying().get()).version(), application, jobType)) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the currently deployed version in the zone for the given production job is newer * than the given version, in which case we should avoid an unsupported downgrade, or if it is the * same version, and was successfully deployed, in which case it is unnecessary to redeploy it. */ private boolean alreadyDeployed(Version version, Application application, JobType job) { if ( ! job.isProduction()) throw new IllegalArgumentException(job + " is not a production job!"); return lastSuccessfulIs(version, job, application) || job.zone(controller.system()) .map(zone -> application.deployments().get(zone)) .map(deployment -> deployment.version().isAfter(version)) .orElse(false); } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if (application.deploying().get() instanceof Change.ApplicationChange) return true; if (application.deploymentJobs().hasFailures()) return true; if (application.isBlocked(clock.instant())) return true; return false; } private boolean lastSuccessfulIs(Version version, JobType jobType, Application application) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return false; Optional<JobStatus.JobRun> lastSuccessfulRun = status.lastSuccess(); if ( ! lastSuccessfulRun.isPresent()) return false; return lastSuccessfulRun.get().version().equals(version); } }
In that case the deployment is incomplete and this method returns false - so how is it in disagreement?
private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; Change change = application.deploying().get(); for (JobType job : order.jobsFrom(application.deploymentSpec())) { if ( ! job.isProduction()) continue; Optional<ZoneId> zone = job.zone(this.controller.system()); if ( ! zone.isPresent()) continue; Deployment deployment = application.deployments().get(zone.get()); if (deployment == null) return false; if (change instanceof VersionChange) { if (((VersionChange)change).version().isAfter(deployment.version())) return false; } else if (((Change.ApplicationChange)change).revision().isPresent()) { if ( ! deployment.revision().equals(((Change.ApplicationChange)change).revision().get())) return false; } else { return false; } } return true; }
if ( ! application.deploying().isPresent()) return true;
private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; Change change = application.deploying().get(); for (JobType job : order.jobsFrom(application.deploymentSpec())) { if ( ! job.isProduction()) continue; Optional<ZoneId> zone = job.zone(this.controller.system()); if ( ! zone.isPresent()) continue; Deployment deployment = application.deployments().get(zone.get()); if (deployment == null) return false; if (change instanceof VersionChange) { if (((VersionChange)change).version().isAfter(deployment.version())) return false; } else if (((Change.ApplicationChange)change).revision().isPresent()) { if ( ! ((Change.ApplicationChange)change).revision().get().equals(deployment.revision())) return false; } else { return false; } } return true; }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (report.jobType() == JobType.component) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (retryBecauseOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (retryBecauseNewFailure(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); }); } /** Returns whether all production zones listed in deployment spec last were successful on the currently deploying change. */ /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** Find the next step to trigger if any, and triggers it */ public void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange) application.deploying().get()).version(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target) || systemTestStatus.isHanging(jobTimeoutLimit())) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } else { JobStatus componentStatus = application.deploymentJobs().jobStatus().get(JobType.component); if (changesAvailable(application, componentStatus, systemTestStatus)) { application = trigger(JobType.systemTest, application, false, "Available change in component"); controller.applications().store(application); } } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus) || nextStatus.isHanging(jobTimeoutLimit())) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; Change change = application.deploying().get(); if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if ( ! lastSuccessfulIs(targetVersion, JobType.stagingTest, application)) return false; if ( previous.type().isProduction() && ! isOnAtLeastProductionVersion(targetVersion, application, previous.type())) return false; if (next != null && next.type().isTest() && lastSuccessfulIs(targetVersion, next.type(), application)) return false; if (next != null && next.type().isProduction() && isOnAtLeastProductionVersion(targetVersion, application, next.type())) return false; return true; } else { if ( ! previous.lastSuccess().isPresent()) return false; if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; return previous.lastSuccess().get().revision().isPresent() && ! previous.lastSuccess().get().revision().equals(next.lastSuccess().get().revision()); } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { applications().lockOrThrow(applicationId, application -> { buildSystem.removeJobs(application.id()); applications().store(application.withDeploying(Optional.empty())); }); } private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), clock.instant(), application.deployVersionFor(jobType, controller), application.deployRevisionFor(jobType, controller), reason); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deploying().isPresent() && application.deploying().get().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof VersionChange && jobType.isProduction() && isOnAtLeastProductionVersion(((VersionChange) application.deploying().get()).version(), application, jobType)) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the current deployed version in the zone given by the job * is newer or equal to the given version. This may be the case even if the production job * in question failed, if the failure happens after deployment. * In that case we should never deploy an earlier version as that may potentially * downgrade production nodes which we are not guaranteed to support, and upgradibng to the current * version is just unnecessary work. */ private boolean isOnAtLeastProductionVersion(Version version, Application application, JobType job) { if ( ! job.isProduction()) return false; Optional<ZoneId> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; Deployment existingDeployment = application.deployments().get(zone.get()); if (existingDeployment == null) return false; return existingDeployment.version().isAfter(version) || existingDeployment.version().equals(version); } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if (application.deploying().get() instanceof Change.ApplicationChange) return true; if (application.deploymentJobs().hasFailures()) return true; if (application.isBlocked(clock.instant())) return true; return false; } private boolean lastSuccessfulIs(Version version, JobType jobType, Application application) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return false; Optional<JobStatus.JobRun> lastSuccessfulStagingRun = status.lastSuccess(); if ( ! lastSuccessfulStagingRun.isPresent()) return false; return lastSuccessfulStagingRun.get().version().equals(version); } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (report.jobType() == JobType.component) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (retryBecauseOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (retryBecauseNewFailure(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); }); } /** Returns whether all production zones listed in deployment spec has this change (or a newer version, if upgrade) */ /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** Find the next step to trigger if any, and triggers it */ public void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange) application.deploying().get()).version(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target) || systemTestStatus.isHanging(jobTimeoutLimit())) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } else { JobStatus componentStatus = application.deploymentJobs().jobStatus().get(JobType.component); if (componentStatus != null && changesAvailable(application, componentStatus, systemTestStatus)) { application = trigger(JobType.systemTest, application, false, "Available change in component"); controller.applications().store(application); } } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus) || nextStatus.isHanging(jobTimeoutLimit())) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; if (next == null) return true; Change change = application.deploying().get(); if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if (next.type().isTest()) { if ( ! lastSuccessfulIs(targetVersion, previous.type(), application)) return false; if (lastSuccessfulIs(targetVersion, next.type(), application)) return false; } else if (next.type().isProduction()) { if ( ! lastSuccessfulIs(targetVersion, JobType.stagingTest, application)) return false; if (previous.type().isProduction() && ! alreadyDeployed(targetVersion, application, previous.type())) return false; if (alreadyDeployed(targetVersion, application, next.type())) return false; } else throw new IllegalStateException("Unclassified type of next job: " + next); return true; } else { if ( ! previous.lastSuccess().isPresent()) return false; if ( ! next.lastSuccess().isPresent()) return true; return previous.lastSuccess().get().revision().isPresent() && ! previous.lastSuccess().get().revision().equals(next.lastSuccess().get().revision()); } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { applications().lockOrThrow(applicationId, application -> { buildSystem.removeJobs(application.id()); applications().store(application.withDeploying(Optional.empty())); }); } private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), clock.instant(), application.deployVersionFor(jobType, controller), application.deployRevisionFor(jobType, controller), reason); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deploying().isPresent() && application.deploying().get().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof VersionChange && jobType.isProduction() && alreadyDeployed(((VersionChange) application.deploying().get()).version(), application, jobType)) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the currently deployed version in the zone for the given production job is newer * than the given version, in which case we should avoid an unsupported downgrade, or if it is the * same version, and was successfully deployed, in which case it is unnecessary to redeploy it. */ private boolean alreadyDeployed(Version version, Application application, JobType job) { if ( ! job.isProduction()) throw new IllegalArgumentException(job + " is not a production job!"); return lastSuccessfulIs(version, job, application) || job.zone(controller.system()) .map(zone -> application.deployments().get(zone)) .map(deployment -> deployment.version().isAfter(version)) .orElse(false); } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if (application.deploying().get() instanceof Change.ApplicationChange) return true; if (application.deploymentJobs().hasFailures()) return true; if (application.isBlocked(clock.instant())) return true; return false; } private boolean lastSuccessfulIs(Version version, JobType jobType, Application application) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return false; Optional<JobStatus.JobRun> lastSuccessfulRun = status.lastSuccess(); if ( ! lastSuccessfulRun.isPresent()) return false; return lastSuccessfulRun.get().version().equals(version); } }
I'd prefer if the order of the versions (change vs deployment) was the same as the order of the revisions, some lines down (deployment vs change).
private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; Change change = application.deploying().get(); for (JobType job : order.jobsFrom(application.deploymentSpec())) { if ( ! job.isProduction()) continue; Optional<ZoneId> zone = job.zone(this.controller.system()); if ( ! zone.isPresent()) continue; Deployment deployment = application.deployments().get(zone.get()); if (deployment == null) return false; if (change instanceof VersionChange) { if (((VersionChange)change).version().isAfter(deployment.version())) return false; } else if (((Change.ApplicationChange)change).revision().isPresent()) { if ( ! deployment.revision().equals(((Change.ApplicationChange)change).revision().get())) return false; } else { return false; } } return true; }
if (((VersionChange)change).version().isAfter(deployment.version())) return false;
private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; Change change = application.deploying().get(); for (JobType job : order.jobsFrom(application.deploymentSpec())) { if ( ! job.isProduction()) continue; Optional<ZoneId> zone = job.zone(this.controller.system()); if ( ! zone.isPresent()) continue; Deployment deployment = application.deployments().get(zone.get()); if (deployment == null) return false; if (change instanceof VersionChange) { if (((VersionChange)change).version().isAfter(deployment.version())) return false; } else if (((Change.ApplicationChange)change).revision().isPresent()) { if ( ! ((Change.ApplicationChange)change).revision().get().equals(deployment.revision())) return false; } else { return false; } } return true; }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (report.jobType() == JobType.component) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (retryBecauseOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (retryBecauseNewFailure(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); }); } /** Returns whether all production zones listed in deployment spec last were successful on the currently deploying change. */ /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** Find the next step to trigger if any, and triggers it */ public void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange) application.deploying().get()).version(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target) || systemTestStatus.isHanging(jobTimeoutLimit())) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } else { JobStatus componentStatus = application.deploymentJobs().jobStatus().get(JobType.component); if (changesAvailable(application, componentStatus, systemTestStatus)) { application = trigger(JobType.systemTest, application, false, "Available change in component"); controller.applications().store(application); } } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus) || nextStatus.isHanging(jobTimeoutLimit())) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; Change change = application.deploying().get(); if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if ( ! lastSuccessfulIs(targetVersion, JobType.stagingTest, application)) return false; if ( previous.type().isProduction() && ! isOnAtLeastProductionVersion(targetVersion, application, previous.type())) return false; if (next != null && next.type().isTest() && lastSuccessfulIs(targetVersion, next.type(), application)) return false; if (next != null && next.type().isProduction() && isOnAtLeastProductionVersion(targetVersion, application, next.type())) return false; return true; } else { if ( ! previous.lastSuccess().isPresent()) return false; if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; return previous.lastSuccess().get().revision().isPresent() && ! previous.lastSuccess().get().revision().equals(next.lastSuccess().get().revision()); } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { applications().lockOrThrow(applicationId, application -> { buildSystem.removeJobs(application.id()); applications().store(application.withDeploying(Optional.empty())); }); } private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), clock.instant(), application.deployVersionFor(jobType, controller), application.deployRevisionFor(jobType, controller), reason); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deploying().isPresent() && application.deploying().get().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof VersionChange && jobType.isProduction() && isOnAtLeastProductionVersion(((VersionChange) application.deploying().get()).version(), application, jobType)) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the current deployed version in the zone given by the job * is newer or equal to the given version. This may be the case even if the production job * in question failed, if the failure happens after deployment. * In that case we should never deploy an earlier version as that may potentially * downgrade production nodes which we are not guaranteed to support, and upgradibng to the current * version is just unnecessary work. */ private boolean isOnAtLeastProductionVersion(Version version, Application application, JobType job) { if ( ! job.isProduction()) return false; Optional<ZoneId> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; Deployment existingDeployment = application.deployments().get(zone.get()); if (existingDeployment == null) return false; return existingDeployment.version().isAfter(version) || existingDeployment.version().equals(version); } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if (application.deploying().get() instanceof Change.ApplicationChange) return true; if (application.deploymentJobs().hasFailures()) return true; if (application.isBlocked(clock.instant())) return true; return false; } private boolean lastSuccessfulIs(Version version, JobType jobType, Application application) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return false; Optional<JobStatus.JobRun> lastSuccessfulStagingRun = status.lastSuccess(); if ( ! lastSuccessfulStagingRun.isPresent()) return false; return lastSuccessfulStagingRun.get().version().equals(version); } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (report.jobType() == JobType.component) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (retryBecauseOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (retryBecauseNewFailure(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); }); } /** Returns whether all production zones listed in deployment spec has this change (or a newer version, if upgrade) */ /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** Find the next step to trigger if any, and triggers it */ public void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange) application.deploying().get()).version(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target) || systemTestStatus.isHanging(jobTimeoutLimit())) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } else { JobStatus componentStatus = application.deploymentJobs().jobStatus().get(JobType.component); if (componentStatus != null && changesAvailable(application, componentStatus, systemTestStatus)) { application = trigger(JobType.systemTest, application, false, "Available change in component"); controller.applications().store(application); } } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus) || nextStatus.isHanging(jobTimeoutLimit())) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; if (next == null) return true; Change change = application.deploying().get(); if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if (next.type().isTest()) { if ( ! lastSuccessfulIs(targetVersion, previous.type(), application)) return false; if (lastSuccessfulIs(targetVersion, next.type(), application)) return false; } else if (next.type().isProduction()) { if ( ! lastSuccessfulIs(targetVersion, JobType.stagingTest, application)) return false; if (previous.type().isProduction() && ! alreadyDeployed(targetVersion, application, previous.type())) return false; if (alreadyDeployed(targetVersion, application, next.type())) return false; } else throw new IllegalStateException("Unclassified type of next job: " + next); return true; } else { if ( ! previous.lastSuccess().isPresent()) return false; if ( ! next.lastSuccess().isPresent()) return true; return previous.lastSuccess().get().revision().isPresent() && ! previous.lastSuccess().get().revision().equals(next.lastSuccess().get().revision()); } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { applications().lockOrThrow(applicationId, application -> { buildSystem.removeJobs(application.id()); applications().store(application.withDeploying(Optional.empty())); }); } private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), clock.instant(), application.deployVersionFor(jobType, controller), application.deployRevisionFor(jobType, controller), reason); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deploying().isPresent() && application.deploying().get().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof VersionChange && jobType.isProduction() && alreadyDeployed(((VersionChange) application.deploying().get()).version(), application, jobType)) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the currently deployed version in the zone for the given production job is newer * than the given version, in which case we should avoid an unsupported downgrade, or if it is the * same version, and was successfully deployed, in which case it is unnecessary to redeploy it. */ private boolean alreadyDeployed(Version version, Application application, JobType job) { if ( ! job.isProduction()) throw new IllegalArgumentException(job + " is not a production job!"); return lastSuccessfulIs(version, job, application) || job.zone(controller.system()) .map(zone -> application.deployments().get(zone)) .map(deployment -> deployment.version().isAfter(version)) .orElse(false); } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if (application.deploying().get() instanceof Change.ApplicationChange) return true; if (application.deploymentJobs().hasFailures()) return true; if (application.isBlocked(clock.instant())) return true; return false; } private boolean lastSuccessfulIs(Version version, JobType jobType, Application application) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return false; Optional<JobStatus.JobRun> lastSuccessfulRun = status.lastSuccess(); if ( ! lastSuccessfulRun.isPresent()) return false; return lastSuccessfulRun.get().version().equals(version); } }
Also, the doc should state that the jobs are checked for success on the currently deploying change, or on a later version, if currently upgrading.
private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; Change change = application.deploying().get(); for (JobType job : order.jobsFrom(application.deploymentSpec())) { if ( ! job.isProduction()) continue; Optional<ZoneId> zone = job.zone(this.controller.system()); if ( ! zone.isPresent()) continue; Deployment deployment = application.deployments().get(zone.get()); if (deployment == null) return false; if (change instanceof VersionChange) { if (((VersionChange)change).version().isAfter(deployment.version())) return false; } else if (((Change.ApplicationChange)change).revision().isPresent()) { if ( ! deployment.revision().equals(((Change.ApplicationChange)change).revision().get())) return false; } else { return false; } } return true; }
if (((VersionChange)change).version().isAfter(deployment.version())) return false;
private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; Change change = application.deploying().get(); for (JobType job : order.jobsFrom(application.deploymentSpec())) { if ( ! job.isProduction()) continue; Optional<ZoneId> zone = job.zone(this.controller.system()); if ( ! zone.isPresent()) continue; Deployment deployment = application.deployments().get(zone.get()); if (deployment == null) return false; if (change instanceof VersionChange) { if (((VersionChange)change).version().isAfter(deployment.version())) return false; } else if (((Change.ApplicationChange)change).revision().isPresent()) { if ( ! ((Change.ApplicationChange)change).revision().get().equals(deployment.revision())) return false; } else { return false; } } return true; }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (report.jobType() == JobType.component) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (retryBecauseOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (retryBecauseNewFailure(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); }); } /** Returns whether all production zones listed in deployment spec last were successful on the currently deploying change. */ /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** Find the next step to trigger if any, and triggers it */ public void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange) application.deploying().get()).version(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target) || systemTestStatus.isHanging(jobTimeoutLimit())) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } else { JobStatus componentStatus = application.deploymentJobs().jobStatus().get(JobType.component); if (changesAvailable(application, componentStatus, systemTestStatus)) { application = trigger(JobType.systemTest, application, false, "Available change in component"); controller.applications().store(application); } } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus) || nextStatus.isHanging(jobTimeoutLimit())) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; Change change = application.deploying().get(); if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if ( ! lastSuccessfulIs(targetVersion, JobType.stagingTest, application)) return false; if ( previous.type().isProduction() && ! isOnAtLeastProductionVersion(targetVersion, application, previous.type())) return false; if (next != null && next.type().isTest() && lastSuccessfulIs(targetVersion, next.type(), application)) return false; if (next != null && next.type().isProduction() && isOnAtLeastProductionVersion(targetVersion, application, next.type())) return false; return true; } else { if ( ! previous.lastSuccess().isPresent()) return false; if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; return previous.lastSuccess().get().revision().isPresent() && ! previous.lastSuccess().get().revision().equals(next.lastSuccess().get().revision()); } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { applications().lockOrThrow(applicationId, application -> { buildSystem.removeJobs(application.id()); applications().store(application.withDeploying(Optional.empty())); }); } private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), clock.instant(), application.deployVersionFor(jobType, controller), application.deployRevisionFor(jobType, controller), reason); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deploying().isPresent() && application.deploying().get().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof VersionChange && jobType.isProduction() && isOnAtLeastProductionVersion(((VersionChange) application.deploying().get()).version(), application, jobType)) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the current deployed version in the zone given by the job * is newer or equal to the given version. This may be the case even if the production job * in question failed, if the failure happens after deployment. * In that case we should never deploy an earlier version as that may potentially * downgrade production nodes which we are not guaranteed to support, and upgradibng to the current * version is just unnecessary work. */ private boolean isOnAtLeastProductionVersion(Version version, Application application, JobType job) { if ( ! job.isProduction()) return false; Optional<ZoneId> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; Deployment existingDeployment = application.deployments().get(zone.get()); if (existingDeployment == null) return false; return existingDeployment.version().isAfter(version) || existingDeployment.version().equals(version); } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if (application.deploying().get() instanceof Change.ApplicationChange) return true; if (application.deploymentJobs().hasFailures()) return true; if (application.isBlocked(clock.instant())) return true; return false; } private boolean lastSuccessfulIs(Version version, JobType jobType, Application application) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return false; Optional<JobStatus.JobRun> lastSuccessfulStagingRun = status.lastSuccess(); if ( ! lastSuccessfulStagingRun.isPresent()) return false; return lastSuccessfulStagingRun.get().version().equals(version); } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (report.jobType() == JobType.component) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (retryBecauseOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (retryBecauseNewFailure(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); }); } /** Returns whether all production zones listed in deployment spec has this change (or a newer version, if upgrade) */ /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** Find the next step to trigger if any, and triggers it */ public void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange) application.deploying().get()).version(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target) || systemTestStatus.isHanging(jobTimeoutLimit())) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } else { JobStatus componentStatus = application.deploymentJobs().jobStatus().get(JobType.component); if (componentStatus != null && changesAvailable(application, componentStatus, systemTestStatus)) { application = trigger(JobType.systemTest, application, false, "Available change in component"); controller.applications().store(application); } } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus) || nextStatus.isHanging(jobTimeoutLimit())) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; if (next == null) return true; Change change = application.deploying().get(); if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if (next.type().isTest()) { if ( ! lastSuccessfulIs(targetVersion, previous.type(), application)) return false; if (lastSuccessfulIs(targetVersion, next.type(), application)) return false; } else if (next.type().isProduction()) { if ( ! lastSuccessfulIs(targetVersion, JobType.stagingTest, application)) return false; if (previous.type().isProduction() && ! alreadyDeployed(targetVersion, application, previous.type())) return false; if (alreadyDeployed(targetVersion, application, next.type())) return false; } else throw new IllegalStateException("Unclassified type of next job: " + next); return true; } else { if ( ! previous.lastSuccess().isPresent()) return false; if ( ! next.lastSuccess().isPresent()) return true; return previous.lastSuccess().get().revision().isPresent() && ! previous.lastSuccess().get().revision().equals(next.lastSuccess().get().revision()); } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { applications().lockOrThrow(applicationId, application -> { buildSystem.removeJobs(application.id()); applications().store(application.withDeploying(Optional.empty())); }); } private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), clock.instant(), application.deployVersionFor(jobType, controller), application.deployRevisionFor(jobType, controller), reason); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deploying().isPresent() && application.deploying().get().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof VersionChange && jobType.isProduction() && alreadyDeployed(((VersionChange) application.deploying().get()).version(), application, jobType)) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the currently deployed version in the zone for the given production job is newer * than the given version, in which case we should avoid an unsupported downgrade, or if it is the * same version, and was successfully deployed, in which case it is unnecessary to redeploy it. */ private boolean alreadyDeployed(Version version, Application application, JobType job) { if ( ! job.isProduction()) throw new IllegalArgumentException(job + " is not a production job!"); return lastSuccessfulIs(version, job, application) || job.zone(controller.system()) .map(zone -> application.deployments().get(zone)) .map(deployment -> deployment.version().isAfter(version)) .orElse(false); } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if (application.deploying().get() instanceof Change.ApplicationChange) return true; if (application.deploymentJobs().hasFailures()) return true; if (application.isBlocked(clock.instant())) return true; return false; } private boolean lastSuccessfulIs(Version version, JobType jobType, Application application) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return false; Optional<JobStatus.JobRun> lastSuccessfulRun = status.lastSuccess(); if ( ! lastSuccessfulRun.isPresent()) return false; return lastSuccessfulRun.get().version().equals(version); } }
I guess the success (of the deployment job) isn't even a factor anymore? If a new version/revision has been deployed, and tests have then failed, it will still count as a success here.
private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; Change change = application.deploying().get(); for (JobType job : order.jobsFrom(application.deploymentSpec())) { if ( ! job.isProduction()) continue; Optional<ZoneId> zone = job.zone(this.controller.system()); if ( ! zone.isPresent()) continue; Deployment deployment = application.deployments().get(zone.get()); if (deployment == null) return false; if (change instanceof VersionChange) { if (((VersionChange)change).version().isAfter(deployment.version())) return false; } else if (((Change.ApplicationChange)change).revision().isPresent()) { if ( ! deployment.revision().equals(((Change.ApplicationChange)change).revision().get())) return false; } else { return false; } } return true; }
if (((VersionChange)change).version().isAfter(deployment.version())) return false;
private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; Change change = application.deploying().get(); for (JobType job : order.jobsFrom(application.deploymentSpec())) { if ( ! job.isProduction()) continue; Optional<ZoneId> zone = job.zone(this.controller.system()); if ( ! zone.isPresent()) continue; Deployment deployment = application.deployments().get(zone.get()); if (deployment == null) return false; if (change instanceof VersionChange) { if (((VersionChange)change).version().isAfter(deployment.version())) return false; } else if (((Change.ApplicationChange)change).revision().isPresent()) { if ( ! ((Change.ApplicationChange)change).revision().get().equals(deployment.revision())) return false; } else { return false; } } return true; }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (report.jobType() == JobType.component) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (retryBecauseOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (retryBecauseNewFailure(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); }); } /** Returns whether all production zones listed in deployment spec last were successful on the currently deploying change. */ /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** Find the next step to trigger if any, and triggers it */ public void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange) application.deploying().get()).version(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target) || systemTestStatus.isHanging(jobTimeoutLimit())) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } else { JobStatus componentStatus = application.deploymentJobs().jobStatus().get(JobType.component); if (changesAvailable(application, componentStatus, systemTestStatus)) { application = trigger(JobType.systemTest, application, false, "Available change in component"); controller.applications().store(application); } } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus) || nextStatus.isHanging(jobTimeoutLimit())) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; Change change = application.deploying().get(); if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if ( ! lastSuccessfulIs(targetVersion, JobType.stagingTest, application)) return false; if ( previous.type().isProduction() && ! isOnAtLeastProductionVersion(targetVersion, application, previous.type())) return false; if (next != null && next.type().isTest() && lastSuccessfulIs(targetVersion, next.type(), application)) return false; if (next != null && next.type().isProduction() && isOnAtLeastProductionVersion(targetVersion, application, next.type())) return false; return true; } else { if ( ! previous.lastSuccess().isPresent()) return false; if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; return previous.lastSuccess().get().revision().isPresent() && ! previous.lastSuccess().get().revision().equals(next.lastSuccess().get().revision()); } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { applications().lockOrThrow(applicationId, application -> { buildSystem.removeJobs(application.id()); applications().store(application.withDeploying(Optional.empty())); }); } private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), clock.instant(), application.deployVersionFor(jobType, controller), application.deployRevisionFor(jobType, controller), reason); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deploying().isPresent() && application.deploying().get().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof VersionChange && jobType.isProduction() && isOnAtLeastProductionVersion(((VersionChange) application.deploying().get()).version(), application, jobType)) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the current deployed version in the zone given by the job * is newer or equal to the given version. This may be the case even if the production job * in question failed, if the failure happens after deployment. * In that case we should never deploy an earlier version as that may potentially * downgrade production nodes which we are not guaranteed to support, and upgradibng to the current * version is just unnecessary work. */ private boolean isOnAtLeastProductionVersion(Version version, Application application, JobType job) { if ( ! job.isProduction()) return false; Optional<ZoneId> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; Deployment existingDeployment = application.deployments().get(zone.get()); if (existingDeployment == null) return false; return existingDeployment.version().isAfter(version) || existingDeployment.version().equals(version); } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if (application.deploying().get() instanceof Change.ApplicationChange) return true; if (application.deploymentJobs().hasFailures()) return true; if (application.isBlocked(clock.instant())) return true; return false; } private boolean lastSuccessfulIs(Version version, JobType jobType, Application application) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return false; Optional<JobStatus.JobRun> lastSuccessfulStagingRun = status.lastSuccess(); if ( ! lastSuccessfulStagingRun.isPresent()) return false; return lastSuccessfulStagingRun.get().version().equals(version); } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (report.jobType() == JobType.component) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (retryBecauseOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (retryBecauseNewFailure(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); }); } /** Returns whether all production zones listed in deployment spec has this change (or a newer version, if upgrade) */ /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** Find the next step to trigger if any, and triggers it */ public void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange) application.deploying().get()).version(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target) || systemTestStatus.isHanging(jobTimeoutLimit())) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } else { JobStatus componentStatus = application.deploymentJobs().jobStatus().get(JobType.component); if (componentStatus != null && changesAvailable(application, componentStatus, systemTestStatus)) { application = trigger(JobType.systemTest, application, false, "Available change in component"); controller.applications().store(application); } } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus) || nextStatus.isHanging(jobTimeoutLimit())) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; if (next == null) return true; Change change = application.deploying().get(); if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if (next.type().isTest()) { if ( ! lastSuccessfulIs(targetVersion, previous.type(), application)) return false; if (lastSuccessfulIs(targetVersion, next.type(), application)) return false; } else if (next.type().isProduction()) { if ( ! lastSuccessfulIs(targetVersion, JobType.stagingTest, application)) return false; if (previous.type().isProduction() && ! alreadyDeployed(targetVersion, application, previous.type())) return false; if (alreadyDeployed(targetVersion, application, next.type())) return false; } else throw new IllegalStateException("Unclassified type of next job: " + next); return true; } else { if ( ! previous.lastSuccess().isPresent()) return false; if ( ! next.lastSuccess().isPresent()) return true; return previous.lastSuccess().get().revision().isPresent() && ! previous.lastSuccess().get().revision().equals(next.lastSuccess().get().revision()); } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { applications().lockOrThrow(applicationId, application -> { buildSystem.removeJobs(application.id()); applications().store(application.withDeploying(Optional.empty())); }); } private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), clock.instant(), application.deployVersionFor(jobType, controller), application.deployRevisionFor(jobType, controller), reason); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deploying().isPresent() && application.deploying().get().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof VersionChange && jobType.isProduction() && alreadyDeployed(((VersionChange) application.deploying().get()).version(), application, jobType)) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the currently deployed version in the zone for the given production job is newer * than the given version, in which case we should avoid an unsupported downgrade, or if it is the * same version, and was successfully deployed, in which case it is unnecessary to redeploy it. */ private boolean alreadyDeployed(Version version, Application application, JobType job) { if ( ! job.isProduction()) throw new IllegalArgumentException(job + " is not a production job!"); return lastSuccessfulIs(version, job, application) || job.zone(controller.system()) .map(zone -> application.deployments().get(zone)) .map(deployment -> deployment.version().isAfter(version)) .orElse(false); } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if (application.deploying().get() instanceof Change.ApplicationChange) return true; if (application.deploymentJobs().hasFailures()) return true; if (application.isBlocked(clock.instant())) return true; return false; } private boolean lastSuccessfulIs(Version version, JobType jobType, Application application) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return false; Optional<JobStatus.JobRun> lastSuccessfulRun = status.lastSuccess(); if ( ! lastSuccessfulRun.isPresent()) return false; return lastSuccessfulRun.get().version().equals(version); } }
The jobs here come from a valid deployment spec, so all zones listed should exist?
private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; Change change = application.deploying().get(); for (JobType job : order.jobsFrom(application.deploymentSpec())) { if ( ! job.isProduction()) continue; Optional<ZoneId> zone = job.zone(this.controller.system()); if ( ! zone.isPresent()) continue; Deployment deployment = application.deployments().get(zone.get()); if (deployment == null) return false; if (change instanceof VersionChange) { if (((VersionChange)change).version().isAfter(deployment.version())) return false; } else if (((Change.ApplicationChange)change).revision().isPresent()) { if ( ! deployment.revision().equals(((Change.ApplicationChange)change).revision().get())) return false; } else { return false; } } return true; }
if ( ! zone.isPresent()) continue;
private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; Change change = application.deploying().get(); for (JobType job : order.jobsFrom(application.deploymentSpec())) { if ( ! job.isProduction()) continue; Optional<ZoneId> zone = job.zone(this.controller.system()); if ( ! zone.isPresent()) continue; Deployment deployment = application.deployments().get(zone.get()); if (deployment == null) return false; if (change instanceof VersionChange) { if (((VersionChange)change).version().isAfter(deployment.version())) return false; } else if (((Change.ApplicationChange)change).revision().isPresent()) { if ( ! ((Change.ApplicationChange)change).revision().get().equals(deployment.revision())) return false; } else { return false; } } return true; }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (report.jobType() == JobType.component) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (retryBecauseOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (retryBecauseNewFailure(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); }); } /** Returns whether all production zones listed in deployment spec last were successful on the currently deploying change. */ /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** Find the next step to trigger if any, and triggers it */ public void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange) application.deploying().get()).version(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target) || systemTestStatus.isHanging(jobTimeoutLimit())) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } else { JobStatus componentStatus = application.deploymentJobs().jobStatus().get(JobType.component); if (changesAvailable(application, componentStatus, systemTestStatus)) { application = trigger(JobType.systemTest, application, false, "Available change in component"); controller.applications().store(application); } } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus) || nextStatus.isHanging(jobTimeoutLimit())) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; Change change = application.deploying().get(); if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if ( ! lastSuccessfulIs(targetVersion, JobType.stagingTest, application)) return false; if ( previous.type().isProduction() && ! isOnAtLeastProductionVersion(targetVersion, application, previous.type())) return false; if (next != null && next.type().isTest() && lastSuccessfulIs(targetVersion, next.type(), application)) return false; if (next != null && next.type().isProduction() && isOnAtLeastProductionVersion(targetVersion, application, next.type())) return false; return true; } else { if ( ! previous.lastSuccess().isPresent()) return false; if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; return previous.lastSuccess().get().revision().isPresent() && ! previous.lastSuccess().get().revision().equals(next.lastSuccess().get().revision()); } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { applications().lockOrThrow(applicationId, application -> { buildSystem.removeJobs(application.id()); applications().store(application.withDeploying(Optional.empty())); }); } private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), clock.instant(), application.deployVersionFor(jobType, controller), application.deployRevisionFor(jobType, controller), reason); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deploying().isPresent() && application.deploying().get().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof VersionChange && jobType.isProduction() && isOnAtLeastProductionVersion(((VersionChange) application.deploying().get()).version(), application, jobType)) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the current deployed version in the zone given by the job * is newer or equal to the given version. This may be the case even if the production job * in question failed, if the failure happens after deployment. * In that case we should never deploy an earlier version as that may potentially * downgrade production nodes which we are not guaranteed to support, and upgradibng to the current * version is just unnecessary work. */ private boolean isOnAtLeastProductionVersion(Version version, Application application, JobType job) { if ( ! job.isProduction()) return false; Optional<ZoneId> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; Deployment existingDeployment = application.deployments().get(zone.get()); if (existingDeployment == null) return false; return existingDeployment.version().isAfter(version) || existingDeployment.version().equals(version); } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if (application.deploying().get() instanceof Change.ApplicationChange) return true; if (application.deploymentJobs().hasFailures()) return true; if (application.isBlocked(clock.instant())) return true; return false; } private boolean lastSuccessfulIs(Version version, JobType jobType, Application application) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return false; Optional<JobStatus.JobRun> lastSuccessfulStagingRun = status.lastSuccess(); if ( ! lastSuccessfulStagingRun.isPresent()) return false; return lastSuccessfulStagingRun.get().version().equals(version); } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (report.jobType() == JobType.component) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (retryBecauseOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (retryBecauseNewFailure(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); }); } /** Returns whether all production zones listed in deployment spec has this change (or a newer version, if upgrade) */ /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** Find the next step to trigger if any, and triggers it */ public void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange) application.deploying().get()).version(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target) || systemTestStatus.isHanging(jobTimeoutLimit())) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } else { JobStatus componentStatus = application.deploymentJobs().jobStatus().get(JobType.component); if (componentStatus != null && changesAvailable(application, componentStatus, systemTestStatus)) { application = trigger(JobType.systemTest, application, false, "Available change in component"); controller.applications().store(application); } } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus) || nextStatus.isHanging(jobTimeoutLimit())) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; if (next == null) return true; Change change = application.deploying().get(); if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if (next.type().isTest()) { if ( ! lastSuccessfulIs(targetVersion, previous.type(), application)) return false; if (lastSuccessfulIs(targetVersion, next.type(), application)) return false; } else if (next.type().isProduction()) { if ( ! lastSuccessfulIs(targetVersion, JobType.stagingTest, application)) return false; if (previous.type().isProduction() && ! alreadyDeployed(targetVersion, application, previous.type())) return false; if (alreadyDeployed(targetVersion, application, next.type())) return false; } else throw new IllegalStateException("Unclassified type of next job: " + next); return true; } else { if ( ! previous.lastSuccess().isPresent()) return false; if ( ! next.lastSuccess().isPresent()) return true; return previous.lastSuccess().get().revision().isPresent() && ! previous.lastSuccess().get().revision().equals(next.lastSuccess().get().revision()); } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { applications().lockOrThrow(applicationId, application -> { buildSystem.removeJobs(application.id()); applications().store(application.withDeploying(Optional.empty())); }); } private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), clock.instant(), application.deployVersionFor(jobType, controller), application.deployRevisionFor(jobType, controller), reason); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deploying().isPresent() && application.deploying().get().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof VersionChange && jobType.isProduction() && alreadyDeployed(((VersionChange) application.deploying().get()).version(), application, jobType)) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the currently deployed version in the zone for the given production job is newer * than the given version, in which case we should avoid an unsupported downgrade, or if it is the * same version, and was successfully deployed, in which case it is unnecessary to redeploy it. */ private boolean alreadyDeployed(Version version, Application application, JobType job) { if ( ! job.isProduction()) throw new IllegalArgumentException(job + " is not a production job!"); return lastSuccessfulIs(version, job, application) || job.zone(controller.system()) .map(zone -> application.deployments().get(zone)) .map(deployment -> deployment.version().isAfter(version)) .orElse(false); } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if (application.deploying().get() instanceof Change.ApplicationChange) return true; if (application.deploymentJobs().hasFailures()) return true; if (application.isBlocked(clock.instant())) return true; return false; } private boolean lastSuccessfulIs(Version version, JobType jobType, Application application) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return false; Optional<JobStatus.JobRun> lastSuccessfulRun = status.lastSuccess(); if ( ! lastSuccessfulRun.isPresent()) return false; return lastSuccessfulRun.get().version().equals(version); } }
No, it returns `true`.
private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; Change change = application.deploying().get(); for (JobType job : order.jobsFrom(application.deploymentSpec())) { if ( ! job.isProduction()) continue; Optional<ZoneId> zone = job.zone(this.controller.system()); if ( ! zone.isPresent()) continue; Deployment deployment = application.deployments().get(zone.get()); if (deployment == null) return false; if (change instanceof VersionChange) { if (((VersionChange)change).version().isAfter(deployment.version())) return false; } else if (((Change.ApplicationChange)change).revision().isPresent()) { if ( ! deployment.revision().equals(((Change.ApplicationChange)change).revision().get())) return false; } else { return false; } } return true; }
if ( ! application.deploying().isPresent()) return true;
private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; Change change = application.deploying().get(); for (JobType job : order.jobsFrom(application.deploymentSpec())) { if ( ! job.isProduction()) continue; Optional<ZoneId> zone = job.zone(this.controller.system()); if ( ! zone.isPresent()) continue; Deployment deployment = application.deployments().get(zone.get()); if (deployment == null) return false; if (change instanceof VersionChange) { if (((VersionChange)change).version().isAfter(deployment.version())) return false; } else if (((Change.ApplicationChange)change).revision().isPresent()) { if ( ! ((Change.ApplicationChange)change).revision().get().equals(deployment.revision())) return false; } else { return false; } } return true; }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (report.jobType() == JobType.component) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (retryBecauseOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (retryBecauseNewFailure(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); }); } /** Returns whether all production zones listed in deployment spec last were successful on the currently deploying change. */ /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** Find the next step to trigger if any, and triggers it */ public void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange) application.deploying().get()).version(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target) || systemTestStatus.isHanging(jobTimeoutLimit())) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } else { JobStatus componentStatus = application.deploymentJobs().jobStatus().get(JobType.component); if (changesAvailable(application, componentStatus, systemTestStatus)) { application = trigger(JobType.systemTest, application, false, "Available change in component"); controller.applications().store(application); } } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus) || nextStatus.isHanging(jobTimeoutLimit())) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; Change change = application.deploying().get(); if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if ( ! lastSuccessfulIs(targetVersion, JobType.stagingTest, application)) return false; if ( previous.type().isProduction() && ! isOnAtLeastProductionVersion(targetVersion, application, previous.type())) return false; if (next != null && next.type().isTest() && lastSuccessfulIs(targetVersion, next.type(), application)) return false; if (next != null && next.type().isProduction() && isOnAtLeastProductionVersion(targetVersion, application, next.type())) return false; return true; } else { if ( ! previous.lastSuccess().isPresent()) return false; if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; return previous.lastSuccess().get().revision().isPresent() && ! previous.lastSuccess().get().revision().equals(next.lastSuccess().get().revision()); } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { applications().lockOrThrow(applicationId, application -> { buildSystem.removeJobs(application.id()); applications().store(application.withDeploying(Optional.empty())); }); } private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), clock.instant(), application.deployVersionFor(jobType, controller), application.deployRevisionFor(jobType, controller), reason); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deploying().isPresent() && application.deploying().get().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof VersionChange && jobType.isProduction() && isOnAtLeastProductionVersion(((VersionChange) application.deploying().get()).version(), application, jobType)) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the current deployed version in the zone given by the job * is newer or equal to the given version. This may be the case even if the production job * in question failed, if the failure happens after deployment. * In that case we should never deploy an earlier version as that may potentially * downgrade production nodes which we are not guaranteed to support, and upgradibng to the current * version is just unnecessary work. */ private boolean isOnAtLeastProductionVersion(Version version, Application application, JobType job) { if ( ! job.isProduction()) return false; Optional<ZoneId> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; Deployment existingDeployment = application.deployments().get(zone.get()); if (existingDeployment == null) return false; return existingDeployment.version().isAfter(version) || existingDeployment.version().equals(version); } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if (application.deploying().get() instanceof Change.ApplicationChange) return true; if (application.deploymentJobs().hasFailures()) return true; if (application.isBlocked(clock.instant())) return true; return false; } private boolean lastSuccessfulIs(Version version, JobType jobType, Application application) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return false; Optional<JobStatus.JobRun> lastSuccessfulStagingRun = status.lastSuccess(); if ( ! lastSuccessfulStagingRun.isPresent()) return false; return lastSuccessfulStagingRun.get().version().equals(version); } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (report.jobType() == JobType.component) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (retryBecauseOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (retryBecauseNewFailure(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); }); } /** Returns whether all production zones listed in deployment spec has this change (or a newer version, if upgrade) */ /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** Find the next step to trigger if any, and triggers it */ public void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange) application.deploying().get()).version(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target) || systemTestStatus.isHanging(jobTimeoutLimit())) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } else { JobStatus componentStatus = application.deploymentJobs().jobStatus().get(JobType.component); if (componentStatus != null && changesAvailable(application, componentStatus, systemTestStatus)) { application = trigger(JobType.systemTest, application, false, "Available change in component"); controller.applications().store(application); } } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus) || nextStatus.isHanging(jobTimeoutLimit())) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; if (next == null) return true; Change change = application.deploying().get(); if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if (next.type().isTest()) { if ( ! lastSuccessfulIs(targetVersion, previous.type(), application)) return false; if (lastSuccessfulIs(targetVersion, next.type(), application)) return false; } else if (next.type().isProduction()) { if ( ! lastSuccessfulIs(targetVersion, JobType.stagingTest, application)) return false; if (previous.type().isProduction() && ! alreadyDeployed(targetVersion, application, previous.type())) return false; if (alreadyDeployed(targetVersion, application, next.type())) return false; } else throw new IllegalStateException("Unclassified type of next job: " + next); return true; } else { if ( ! previous.lastSuccess().isPresent()) return false; if ( ! next.lastSuccess().isPresent()) return true; return previous.lastSuccess().get().revision().isPresent() && ! previous.lastSuccess().get().revision().equals(next.lastSuccess().get().revision()); } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { applications().lockOrThrow(applicationId, application -> { buildSystem.removeJobs(application.id()); applications().store(application.withDeploying(Optional.empty())); }); } private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), clock.instant(), application.deployVersionFor(jobType, controller), application.deployRevisionFor(jobType, controller), reason); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deploying().isPresent() && application.deploying().get().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof VersionChange && jobType.isProduction() && alreadyDeployed(((VersionChange) application.deploying().get()).version(), application, jobType)) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the currently deployed version in the zone for the given production job is newer * than the given version, in which case we should avoid an unsupported downgrade, or if it is the * same version, and was successfully deployed, in which case it is unnecessary to redeploy it. */ private boolean alreadyDeployed(Version version, Application application, JobType job) { if ( ! job.isProduction()) throw new IllegalArgumentException(job + " is not a production job!"); return lastSuccessfulIs(version, job, application) || job.zone(controller.system()) .map(zone -> application.deployments().get(zone)) .map(deployment -> deployment.version().isAfter(version)) .orElse(false); } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if (application.deploying().get() instanceof Change.ApplicationChange) return true; if (application.deploymentJobs().hasFailures()) return true; if (application.isBlocked(clock.instant())) return true; return false; } private boolean lastSuccessfulIs(Version version, JobType jobType, Application application) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return false; Optional<JobStatus.JobRun> lastSuccessfulRun = status.lastSuccess(); if ( ! lastSuccessfulRun.isPresent()) return false; return lastSuccessfulRun.get().version().equals(version); } }
Both done
private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; Change change = application.deploying().get(); for (JobType job : order.jobsFrom(application.deploymentSpec())) { if ( ! job.isProduction()) continue; Optional<ZoneId> zone = job.zone(this.controller.system()); if ( ! zone.isPresent()) continue; Deployment deployment = application.deployments().get(zone.get()); if (deployment == null) return false; if (change instanceof VersionChange) { if (((VersionChange)change).version().isAfter(deployment.version())) return false; } else if (((Change.ApplicationChange)change).revision().isPresent()) { if ( ! deployment.revision().equals(((Change.ApplicationChange)change).revision().get())) return false; } else { return false; } } return true; }
if (((VersionChange)change).version().isAfter(deployment.version())) return false;
private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; Change change = application.deploying().get(); for (JobType job : order.jobsFrom(application.deploymentSpec())) { if ( ! job.isProduction()) continue; Optional<ZoneId> zone = job.zone(this.controller.system()); if ( ! zone.isPresent()) continue; Deployment deployment = application.deployments().get(zone.get()); if (deployment == null) return false; if (change instanceof VersionChange) { if (((VersionChange)change).version().isAfter(deployment.version())) return false; } else if (((Change.ApplicationChange)change).revision().isPresent()) { if ( ! ((Change.ApplicationChange)change).revision().get().equals(deployment.revision())) return false; } else { return false; } } return true; }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (report.jobType() == JobType.component) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (retryBecauseOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (retryBecauseNewFailure(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); }); } /** Returns whether all production zones listed in deployment spec last were successful on the currently deploying change. */ /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** Find the next step to trigger if any, and triggers it */ public void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange) application.deploying().get()).version(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target) || systemTestStatus.isHanging(jobTimeoutLimit())) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } else { JobStatus componentStatus = application.deploymentJobs().jobStatus().get(JobType.component); if (changesAvailable(application, componentStatus, systemTestStatus)) { application = trigger(JobType.systemTest, application, false, "Available change in component"); controller.applications().store(application); } } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus) || nextStatus.isHanging(jobTimeoutLimit())) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; Change change = application.deploying().get(); if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if ( ! lastSuccessfulIs(targetVersion, JobType.stagingTest, application)) return false; if ( previous.type().isProduction() && ! isOnAtLeastProductionVersion(targetVersion, application, previous.type())) return false; if (next != null && next.type().isTest() && lastSuccessfulIs(targetVersion, next.type(), application)) return false; if (next != null && next.type().isProduction() && isOnAtLeastProductionVersion(targetVersion, application, next.type())) return false; return true; } else { if ( ! previous.lastSuccess().isPresent()) return false; if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; return previous.lastSuccess().get().revision().isPresent() && ! previous.lastSuccess().get().revision().equals(next.lastSuccess().get().revision()); } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { applications().lockOrThrow(applicationId, application -> { buildSystem.removeJobs(application.id()); applications().store(application.withDeploying(Optional.empty())); }); } private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), clock.instant(), application.deployVersionFor(jobType, controller), application.deployRevisionFor(jobType, controller), reason); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deploying().isPresent() && application.deploying().get().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof VersionChange && jobType.isProduction() && isOnAtLeastProductionVersion(((VersionChange) application.deploying().get()).version(), application, jobType)) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the current deployed version in the zone given by the job * is newer or equal to the given version. This may be the case even if the production job * in question failed, if the failure happens after deployment. * In that case we should never deploy an earlier version as that may potentially * downgrade production nodes which we are not guaranteed to support, and upgradibng to the current * version is just unnecessary work. */ private boolean isOnAtLeastProductionVersion(Version version, Application application, JobType job) { if ( ! job.isProduction()) return false; Optional<ZoneId> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; Deployment existingDeployment = application.deployments().get(zone.get()); if (existingDeployment == null) return false; return existingDeployment.version().isAfter(version) || existingDeployment.version().equals(version); } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if (application.deploying().get() instanceof Change.ApplicationChange) return true; if (application.deploymentJobs().hasFailures()) return true; if (application.isBlocked(clock.instant())) return true; return false; } private boolean lastSuccessfulIs(Version version, JobType jobType, Application application) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return false; Optional<JobStatus.JobRun> lastSuccessfulStagingRun = status.lastSuccess(); if ( ! lastSuccessfulStagingRun.isPresent()) return false; return lastSuccessfulStagingRun.get().version().equals(version); } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (report.jobType() == JobType.component) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (retryBecauseOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (retryBecauseNewFailure(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); }); } /** Returns whether all production zones listed in deployment spec has this change (or a newer version, if upgrade) */ /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** Find the next step to trigger if any, and triggers it */ public void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange) application.deploying().get()).version(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target) || systemTestStatus.isHanging(jobTimeoutLimit())) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } else { JobStatus componentStatus = application.deploymentJobs().jobStatus().get(JobType.component); if (componentStatus != null && changesAvailable(application, componentStatus, systemTestStatus)) { application = trigger(JobType.systemTest, application, false, "Available change in component"); controller.applications().store(application); } } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus) || nextStatus.isHanging(jobTimeoutLimit())) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; if (next == null) return true; Change change = application.deploying().get(); if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if (next.type().isTest()) { if ( ! lastSuccessfulIs(targetVersion, previous.type(), application)) return false; if (lastSuccessfulIs(targetVersion, next.type(), application)) return false; } else if (next.type().isProduction()) { if ( ! lastSuccessfulIs(targetVersion, JobType.stagingTest, application)) return false; if (previous.type().isProduction() && ! alreadyDeployed(targetVersion, application, previous.type())) return false; if (alreadyDeployed(targetVersion, application, next.type())) return false; } else throw new IllegalStateException("Unclassified type of next job: " + next); return true; } else { if ( ! previous.lastSuccess().isPresent()) return false; if ( ! next.lastSuccess().isPresent()) return true; return previous.lastSuccess().get().revision().isPresent() && ! previous.lastSuccess().get().revision().equals(next.lastSuccess().get().revision()); } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { applications().lockOrThrow(applicationId, application -> { buildSystem.removeJobs(application.id()); applications().store(application.withDeploying(Optional.empty())); }); } private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), clock.instant(), application.deployVersionFor(jobType, controller), application.deployRevisionFor(jobType, controller), reason); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deploying().isPresent() && application.deploying().get().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof VersionChange && jobType.isProduction() && alreadyDeployed(((VersionChange) application.deploying().get()).version(), application, jobType)) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the currently deployed version in the zone for the given production job is newer * than the given version, in which case we should avoid an unsupported downgrade, or if it is the * same version, and was successfully deployed, in which case it is unnecessary to redeploy it. */ private boolean alreadyDeployed(Version version, Application application, JobType job) { if ( ! job.isProduction()) throw new IllegalArgumentException(job + " is not a production job!"); return lastSuccessfulIs(version, job, application) || job.zone(controller.system()) .map(zone -> application.deployments().get(zone)) .map(deployment -> deployment.version().isAfter(version)) .orElse(false); } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if (application.deploying().get() instanceof Change.ApplicationChange) return true; if (application.deploymentJobs().hasFailures()) return true; if (application.isBlocked(clock.instant())) return true; return false; } private boolean lastSuccessfulIs(Version version, JobType jobType, Application application) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return false; Optional<JobStatus.JobRun> lastSuccessfulRun = status.lastSuccess(); if ( ! lastSuccessfulRun.isPresent()) return false; return lastSuccessfulRun.get().version().equals(version); } }
:)
private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; Change change = application.deploying().get(); for (JobType job : order.jobsFrom(application.deploymentSpec())) { if ( ! job.isProduction()) continue; Optional<ZoneId> zone = job.zone(this.controller.system()); if ( ! zone.isPresent()) continue; Deployment deployment = application.deployments().get(zone.get()); if (deployment == null) return false; if (change instanceof VersionChange) { if (((VersionChange)change).version().isAfter(deployment.version())) return false; } else if (((Change.ApplicationChange)change).revision().isPresent()) { if ( ! deployment.revision().equals(((Change.ApplicationChange)change).revision().get())) return false; } else { return false; } } return true; }
if (((VersionChange)change).version().isAfter(deployment.version())) return false;
private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; Change change = application.deploying().get(); for (JobType job : order.jobsFrom(application.deploymentSpec())) { if ( ! job.isProduction()) continue; Optional<ZoneId> zone = job.zone(this.controller.system()); if ( ! zone.isPresent()) continue; Deployment deployment = application.deployments().get(zone.get()); if (deployment == null) return false; if (change instanceof VersionChange) { if (((VersionChange)change).version().isAfter(deployment.version())) return false; } else if (((Change.ApplicationChange)change).revision().isPresent()) { if ( ! ((Change.ApplicationChange)change).revision().get().equals(deployment.revision())) return false; } else { return false; } } return true; }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (report.jobType() == JobType.component) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (retryBecauseOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (retryBecauseNewFailure(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); }); } /** Returns whether all production zones listed in deployment spec last were successful on the currently deploying change. */ /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** Find the next step to trigger if any, and triggers it */ public void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange) application.deploying().get()).version(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target) || systemTestStatus.isHanging(jobTimeoutLimit())) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } else { JobStatus componentStatus = application.deploymentJobs().jobStatus().get(JobType.component); if (changesAvailable(application, componentStatus, systemTestStatus)) { application = trigger(JobType.systemTest, application, false, "Available change in component"); controller.applications().store(application); } } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus) || nextStatus.isHanging(jobTimeoutLimit())) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; Change change = application.deploying().get(); if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if ( ! lastSuccessfulIs(targetVersion, JobType.stagingTest, application)) return false; if ( previous.type().isProduction() && ! isOnAtLeastProductionVersion(targetVersion, application, previous.type())) return false; if (next != null && next.type().isTest() && lastSuccessfulIs(targetVersion, next.type(), application)) return false; if (next != null && next.type().isProduction() && isOnAtLeastProductionVersion(targetVersion, application, next.type())) return false; return true; } else { if ( ! previous.lastSuccess().isPresent()) return false; if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; return previous.lastSuccess().get().revision().isPresent() && ! previous.lastSuccess().get().revision().equals(next.lastSuccess().get().revision()); } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { applications().lockOrThrow(applicationId, application -> { buildSystem.removeJobs(application.id()); applications().store(application.withDeploying(Optional.empty())); }); } private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), clock.instant(), application.deployVersionFor(jobType, controller), application.deployRevisionFor(jobType, controller), reason); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deploying().isPresent() && application.deploying().get().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof VersionChange && jobType.isProduction() && isOnAtLeastProductionVersion(((VersionChange) application.deploying().get()).version(), application, jobType)) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the current deployed version in the zone given by the job * is newer or equal to the given version. This may be the case even if the production job * in question failed, if the failure happens after deployment. * In that case we should never deploy an earlier version as that may potentially * downgrade production nodes which we are not guaranteed to support, and upgradibng to the current * version is just unnecessary work. */ private boolean isOnAtLeastProductionVersion(Version version, Application application, JobType job) { if ( ! job.isProduction()) return false; Optional<ZoneId> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; Deployment existingDeployment = application.deployments().get(zone.get()); if (existingDeployment == null) return false; return existingDeployment.version().isAfter(version) || existingDeployment.version().equals(version); } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if (application.deploying().get() instanceof Change.ApplicationChange) return true; if (application.deploymentJobs().hasFailures()) return true; if (application.isBlocked(clock.instant())) return true; return false; } private boolean lastSuccessfulIs(Version version, JobType jobType, Application application) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return false; Optional<JobStatus.JobRun> lastSuccessfulStagingRun = status.lastSuccess(); if ( ! lastSuccessfulStagingRun.isPresent()) return false; return lastSuccessfulStagingRun.get().version().equals(version); } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (report.jobType() == JobType.component) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (retryBecauseOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (retryBecauseNewFailure(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); }); } /** Returns whether all production zones listed in deployment spec has this change (or a newer version, if upgrade) */ /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** Find the next step to trigger if any, and triggers it */ public void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange) application.deploying().get()).version(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target) || systemTestStatus.isHanging(jobTimeoutLimit())) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } else { JobStatus componentStatus = application.deploymentJobs().jobStatus().get(JobType.component); if (componentStatus != null && changesAvailable(application, componentStatus, systemTestStatus)) { application = trigger(JobType.systemTest, application, false, "Available change in component"); controller.applications().store(application); } } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus) || nextStatus.isHanging(jobTimeoutLimit())) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; if (next == null) return true; Change change = application.deploying().get(); if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if (next.type().isTest()) { if ( ! lastSuccessfulIs(targetVersion, previous.type(), application)) return false; if (lastSuccessfulIs(targetVersion, next.type(), application)) return false; } else if (next.type().isProduction()) { if ( ! lastSuccessfulIs(targetVersion, JobType.stagingTest, application)) return false; if (previous.type().isProduction() && ! alreadyDeployed(targetVersion, application, previous.type())) return false; if (alreadyDeployed(targetVersion, application, next.type())) return false; } else throw new IllegalStateException("Unclassified type of next job: " + next); return true; } else { if ( ! previous.lastSuccess().isPresent()) return false; if ( ! next.lastSuccess().isPresent()) return true; return previous.lastSuccess().get().revision().isPresent() && ! previous.lastSuccess().get().revision().equals(next.lastSuccess().get().revision()); } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { applications().lockOrThrow(applicationId, application -> { buildSystem.removeJobs(application.id()); applications().store(application.withDeploying(Optional.empty())); }); } private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), clock.instant(), application.deployVersionFor(jobType, controller), application.deployRevisionFor(jobType, controller), reason); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deploying().isPresent() && application.deploying().get().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof VersionChange && jobType.isProduction() && alreadyDeployed(((VersionChange) application.deploying().get()).version(), application, jobType)) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the currently deployed version in the zone for the given production job is newer * than the given version, in which case we should avoid an unsupported downgrade, or if it is the * same version, and was successfully deployed, in which case it is unnecessary to redeploy it. */ private boolean alreadyDeployed(Version version, Application application, JobType job) { if ( ! job.isProduction()) throw new IllegalArgumentException(job + " is not a production job!"); return lastSuccessfulIs(version, job, application) || job.zone(controller.system()) .map(zone -> application.deployments().get(zone)) .map(deployment -> deployment.version().isAfter(version)) .orElse(false); } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if (application.deploying().get() instanceof Change.ApplicationChange) return true; if (application.deploymentJobs().hasFailures()) return true; if (application.isBlocked(clock.instant())) return true; return false; } private boolean lastSuccessfulIs(Version version, JobType jobType, Application application) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return false; Optional<JobStatus.JobRun> lastSuccessfulRun = status.lastSuccess(); if ( ! lastSuccessfulRun.isPresent()) return false; return lastSuccessfulRun.get().version().equals(version); } }
Not in CD or whatever. I didn't decide that job.zone(system) should return Optional, but it does ...
private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; Change change = application.deploying().get(); for (JobType job : order.jobsFrom(application.deploymentSpec())) { if ( ! job.isProduction()) continue; Optional<ZoneId> zone = job.zone(this.controller.system()); if ( ! zone.isPresent()) continue; Deployment deployment = application.deployments().get(zone.get()); if (deployment == null) return false; if (change instanceof VersionChange) { if (((VersionChange)change).version().isAfter(deployment.version())) return false; } else if (((Change.ApplicationChange)change).revision().isPresent()) { if ( ! deployment.revision().equals(((Change.ApplicationChange)change).revision().get())) return false; } else { return false; } } return true; }
if ( ! zone.isPresent()) continue;
private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; Change change = application.deploying().get(); for (JobType job : order.jobsFrom(application.deploymentSpec())) { if ( ! job.isProduction()) continue; Optional<ZoneId> zone = job.zone(this.controller.system()); if ( ! zone.isPresent()) continue; Deployment deployment = application.deployments().get(zone.get()); if (deployment == null) return false; if (change instanceof VersionChange) { if (((VersionChange)change).version().isAfter(deployment.version())) return false; } else if (((Change.ApplicationChange)change).revision().isPresent()) { if ( ! ((Change.ApplicationChange)change).revision().get().equals(deployment.revision())) return false; } else { return false; } } return true; }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (report.jobType() == JobType.component) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (retryBecauseOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (retryBecauseNewFailure(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); }); } /** Returns whether all production zones listed in deployment spec last were successful on the currently deploying change. */ /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** Find the next step to trigger if any, and triggers it */ public void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange) application.deploying().get()).version(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target) || systemTestStatus.isHanging(jobTimeoutLimit())) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } else { JobStatus componentStatus = application.deploymentJobs().jobStatus().get(JobType.component); if (changesAvailable(application, componentStatus, systemTestStatus)) { application = trigger(JobType.systemTest, application, false, "Available change in component"); controller.applications().store(application); } } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus) || nextStatus.isHanging(jobTimeoutLimit())) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; Change change = application.deploying().get(); if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if ( ! lastSuccessfulIs(targetVersion, JobType.stagingTest, application)) return false; if ( previous.type().isProduction() && ! isOnAtLeastProductionVersion(targetVersion, application, previous.type())) return false; if (next != null && next.type().isTest() && lastSuccessfulIs(targetVersion, next.type(), application)) return false; if (next != null && next.type().isProduction() && isOnAtLeastProductionVersion(targetVersion, application, next.type())) return false; return true; } else { if ( ! previous.lastSuccess().isPresent()) return false; if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; return previous.lastSuccess().get().revision().isPresent() && ! previous.lastSuccess().get().revision().equals(next.lastSuccess().get().revision()); } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { applications().lockOrThrow(applicationId, application -> { buildSystem.removeJobs(application.id()); applications().store(application.withDeploying(Optional.empty())); }); } private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), clock.instant(), application.deployVersionFor(jobType, controller), application.deployRevisionFor(jobType, controller), reason); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deploying().isPresent() && application.deploying().get().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof VersionChange && jobType.isProduction() && isOnAtLeastProductionVersion(((VersionChange) application.deploying().get()).version(), application, jobType)) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the current deployed version in the zone given by the job * is newer or equal to the given version. This may be the case even if the production job * in question failed, if the failure happens after deployment. * In that case we should never deploy an earlier version as that may potentially * downgrade production nodes which we are not guaranteed to support, and upgradibng to the current * version is just unnecessary work. */ private boolean isOnAtLeastProductionVersion(Version version, Application application, JobType job) { if ( ! job.isProduction()) return false; Optional<ZoneId> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; Deployment existingDeployment = application.deployments().get(zone.get()); if (existingDeployment == null) return false; return existingDeployment.version().isAfter(version) || existingDeployment.version().equals(version); } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if (application.deploying().get() instanceof Change.ApplicationChange) return true; if (application.deploymentJobs().hasFailures()) return true; if (application.isBlocked(clock.instant())) return true; return false; } private boolean lastSuccessfulIs(Version version, JobType jobType, Application application) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return false; Optional<JobStatus.JobRun> lastSuccessfulStagingRun = status.lastSuccess(); if ( ! lastSuccessfulStagingRun.isPresent()) return false; return lastSuccessfulStagingRun.get().version().equals(version); } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (report.jobType() == JobType.component) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (retryBecauseOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (retryBecauseNewFailure(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); }); } /** Returns whether all production zones listed in deployment spec has this change (or a newer version, if upgrade) */ /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** Find the next step to trigger if any, and triggers it */ public void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange) application.deploying().get()).version(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target) || systemTestStatus.isHanging(jobTimeoutLimit())) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } else { JobStatus componentStatus = application.deploymentJobs().jobStatus().get(JobType.component); if (componentStatus != null && changesAvailable(application, componentStatus, systemTestStatus)) { application = trigger(JobType.systemTest, application, false, "Available change in component"); controller.applications().store(application); } } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus) || nextStatus.isHanging(jobTimeoutLimit())) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; if (next == null) return true; Change change = application.deploying().get(); if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if (next.type().isTest()) { if ( ! lastSuccessfulIs(targetVersion, previous.type(), application)) return false; if (lastSuccessfulIs(targetVersion, next.type(), application)) return false; } else if (next.type().isProduction()) { if ( ! lastSuccessfulIs(targetVersion, JobType.stagingTest, application)) return false; if (previous.type().isProduction() && ! alreadyDeployed(targetVersion, application, previous.type())) return false; if (alreadyDeployed(targetVersion, application, next.type())) return false; } else throw new IllegalStateException("Unclassified type of next job: " + next); return true; } else { if ( ! previous.lastSuccess().isPresent()) return false; if ( ! next.lastSuccess().isPresent()) return true; return previous.lastSuccess().get().revision().isPresent() && ! previous.lastSuccess().get().revision().equals(next.lastSuccess().get().revision()); } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { applications().lockOrThrow(applicationId, application -> { buildSystem.removeJobs(application.id()); applications().store(application.withDeploying(Optional.empty())); }); } private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), clock.instant(), application.deployVersionFor(jobType, controller), application.deployRevisionFor(jobType, controller), reason); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deploying().isPresent() && application.deploying().get().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof VersionChange && jobType.isProduction() && alreadyDeployed(((VersionChange) application.deploying().get()).version(), application, jobType)) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the currently deployed version in the zone for the given production job is newer * than the given version, in which case we should avoid an unsupported downgrade, or if it is the * same version, and was successfully deployed, in which case it is unnecessary to redeploy it. */ private boolean alreadyDeployed(Version version, Application application, JobType job) { if ( ! job.isProduction()) throw new IllegalArgumentException(job + " is not a production job!"); return lastSuccessfulIs(version, job, application) || job.zone(controller.system()) .map(zone -> application.deployments().get(zone)) .map(deployment -> deployment.version().isAfter(version)) .orElse(false); } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if (application.deploying().get() instanceof Change.ApplicationChange) return true; if (application.deploymentJobs().hasFailures()) return true; if (application.isBlocked(clock.instant())) return true; return false; } private boolean lastSuccessfulIs(Version version, JobType jobType, Application application) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return false; Optional<JobStatus.JobRun> lastSuccessfulRun = status.lastSuccess(); if ( ! lastSuccessfulRun.isPresent()) return false; return lastSuccessfulRun.get().version().equals(version); } }
No but perhaps you mean if it was cancelled and then later we check for another Change which is an upgrade to an earlier version? Then it will return true but then it is also true that the upgrade is complete ...
private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; Change change = application.deploying().get(); for (JobType job : order.jobsFrom(application.deploymentSpec())) { if ( ! job.isProduction()) continue; Optional<ZoneId> zone = job.zone(this.controller.system()); if ( ! zone.isPresent()) continue; Deployment deployment = application.deployments().get(zone.get()); if (deployment == null) return false; if (change instanceof VersionChange) { if (((VersionChange)change).version().isAfter(deployment.version())) return false; } else if (((Change.ApplicationChange)change).revision().isPresent()) { if ( ! deployment.revision().equals(((Change.ApplicationChange)change).revision().get())) return false; } else { return false; } } return true; }
if ( ! application.deploying().isPresent()) return true;
private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; Change change = application.deploying().get(); for (JobType job : order.jobsFrom(application.deploymentSpec())) { if ( ! job.isProduction()) continue; Optional<ZoneId> zone = job.zone(this.controller.system()); if ( ! zone.isPresent()) continue; Deployment deployment = application.deployments().get(zone.get()); if (deployment == null) return false; if (change instanceof VersionChange) { if (((VersionChange)change).version().isAfter(deployment.version())) return false; } else if (((Change.ApplicationChange)change).revision().isPresent()) { if ( ! ((Change.ApplicationChange)change).revision().get().equals(deployment.revision())) return false; } else { return false; } } return true; }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (report.jobType() == JobType.component) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (retryBecauseOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (retryBecauseNewFailure(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); }); } /** Returns whether all production zones listed in deployment spec last were successful on the currently deploying change. */ /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** Find the next step to trigger if any, and triggers it */ public void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange) application.deploying().get()).version(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target) || systemTestStatus.isHanging(jobTimeoutLimit())) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } else { JobStatus componentStatus = application.deploymentJobs().jobStatus().get(JobType.component); if (changesAvailable(application, componentStatus, systemTestStatus)) { application = trigger(JobType.systemTest, application, false, "Available change in component"); controller.applications().store(application); } } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus) || nextStatus.isHanging(jobTimeoutLimit())) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; Change change = application.deploying().get(); if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if ( ! lastSuccessfulIs(targetVersion, JobType.stagingTest, application)) return false; if ( previous.type().isProduction() && ! isOnAtLeastProductionVersion(targetVersion, application, previous.type())) return false; if (next != null && next.type().isTest() && lastSuccessfulIs(targetVersion, next.type(), application)) return false; if (next != null && next.type().isProduction() && isOnAtLeastProductionVersion(targetVersion, application, next.type())) return false; return true; } else { if ( ! previous.lastSuccess().isPresent()) return false; if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; return previous.lastSuccess().get().revision().isPresent() && ! previous.lastSuccess().get().revision().equals(next.lastSuccess().get().revision()); } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { applications().lockOrThrow(applicationId, application -> { buildSystem.removeJobs(application.id()); applications().store(application.withDeploying(Optional.empty())); }); } private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), clock.instant(), application.deployVersionFor(jobType, controller), application.deployRevisionFor(jobType, controller), reason); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deploying().isPresent() && application.deploying().get().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof VersionChange && jobType.isProduction() && isOnAtLeastProductionVersion(((VersionChange) application.deploying().get()).version(), application, jobType)) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the current deployed version in the zone given by the job * is newer or equal to the given version. This may be the case even if the production job * in question failed, if the failure happens after deployment. * In that case we should never deploy an earlier version as that may potentially * downgrade production nodes which we are not guaranteed to support, and upgradibng to the current * version is just unnecessary work. */ private boolean isOnAtLeastProductionVersion(Version version, Application application, JobType job) { if ( ! job.isProduction()) return false; Optional<ZoneId> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; Deployment existingDeployment = application.deployments().get(zone.get()); if (existingDeployment == null) return false; return existingDeployment.version().isAfter(version) || existingDeployment.version().equals(version); } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if (application.deploying().get() instanceof Change.ApplicationChange) return true; if (application.deploymentJobs().hasFailures()) return true; if (application.isBlocked(clock.instant())) return true; return false; } private boolean lastSuccessfulIs(Version version, JobType jobType, Application application) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return false; Optional<JobStatus.JobRun> lastSuccessfulStagingRun = status.lastSuccess(); if ( ! lastSuccessfulStagingRun.isPresent()) return false; return lastSuccessfulStagingRun.get().version().equals(version); } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (report.jobType() == JobType.component) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (retryBecauseOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (retryBecauseNewFailure(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); }); } /** Returns whether all production zones listed in deployment spec has this change (or a newer version, if upgrade) */ /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** Find the next step to trigger if any, and triggers it */ public void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange) application.deploying().get()).version(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target) || systemTestStatus.isHanging(jobTimeoutLimit())) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } else { JobStatus componentStatus = application.deploymentJobs().jobStatus().get(JobType.component); if (componentStatus != null && changesAvailable(application, componentStatus, systemTestStatus)) { application = trigger(JobType.systemTest, application, false, "Available change in component"); controller.applications().store(application); } } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus) || nextStatus.isHanging(jobTimeoutLimit())) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; if (next == null) return true; Change change = application.deploying().get(); if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if (next.type().isTest()) { if ( ! lastSuccessfulIs(targetVersion, previous.type(), application)) return false; if (lastSuccessfulIs(targetVersion, next.type(), application)) return false; } else if (next.type().isProduction()) { if ( ! lastSuccessfulIs(targetVersion, JobType.stagingTest, application)) return false; if (previous.type().isProduction() && ! alreadyDeployed(targetVersion, application, previous.type())) return false; if (alreadyDeployed(targetVersion, application, next.type())) return false; } else throw new IllegalStateException("Unclassified type of next job: " + next); return true; } else { if ( ! previous.lastSuccess().isPresent()) return false; if ( ! next.lastSuccess().isPresent()) return true; return previous.lastSuccess().get().revision().isPresent() && ! previous.lastSuccess().get().revision().equals(next.lastSuccess().get().revision()); } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { applications().lockOrThrow(applicationId, application -> { buildSystem.removeJobs(application.id()); applications().store(application.withDeploying(Optional.empty())); }); } private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), clock.instant(), application.deployVersionFor(jobType, controller), application.deployRevisionFor(jobType, controller), reason); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deploying().isPresent() && application.deploying().get().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof VersionChange && jobType.isProduction() && alreadyDeployed(((VersionChange) application.deploying().get()).version(), application, jobType)) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the currently deployed version in the zone for the given production job is newer * than the given version, in which case we should avoid an unsupported downgrade, or if it is the * same version, and was successfully deployed, in which case it is unnecessary to redeploy it. */ private boolean alreadyDeployed(Version version, Application application, JobType job) { if ( ! job.isProduction()) throw new IllegalArgumentException(job + " is not a production job!"); return lastSuccessfulIs(version, job, application) || job.zone(controller.system()) .map(zone -> application.deployments().get(zone)) .map(deployment -> deployment.version().isAfter(version)) .orElse(false); } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if (application.deploying().get() instanceof Change.ApplicationChange) return true; if (application.deploymentJobs().hasFailures()) return true; if (application.isBlocked(clock.instant())) return true; return false; } private boolean lastSuccessfulIs(Version version, JobType jobType, Application application) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return false; Optional<JobStatus.JobRun> lastSuccessfulRun = status.lastSuccess(); if ( ! lastSuccessfulRun.isPresent()) return false; return lastSuccessfulRun.get().version().equals(version); } }
I can't get this to be right: 1. System test succeeds with version _A_. 2. Staging test dies with version _A_, i.e., no report is generated, and the job is left hanging, to be triggered by the ReadyJobTrigger. 3. We now enter this method, looking to see if we should trigger `stagingTest` with the target version _A_, but then `lastSuccessfulIs(A, stagingTest, app)` is `false`, so we return `false`, and don't trigger `stagingTest`, which we should really do. Am I just wrong?
private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; Change change = application.deploying().get(); if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if ( ! lastSuccessfulIs(targetVersion, JobType.stagingTest, application)) return false; if ( previous.type().isProduction() && ! isOnAtLeastProductionVersion(targetVersion, application, previous.type())) return false; if (next != null && next.type().isTest() && lastSuccessfulIs(targetVersion, next.type(), application)) return false; if (next != null && next.type().isProduction() && isOnAtLeastProductionVersion(targetVersion, application, next.type())) return false; return true; } else { if ( ! previous.lastSuccess().isPresent()) return false; if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; return previous.lastSuccess().get().revision().isPresent() && ! previous.lastSuccess().get().revision().equals(next.lastSuccess().get().revision()); } }
return false;
private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; if (next == null) return true; Change change = application.deploying().get(); if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if (next.type().isTest()) { if ( ! lastSuccessfulIs(targetVersion, previous.type(), application)) return false; if (lastSuccessfulIs(targetVersion, next.type(), application)) return false; } else if (next.type().isProduction()) { if ( ! lastSuccessfulIs(targetVersion, JobType.stagingTest, application)) return false; if (previous.type().isProduction() && ! alreadyDeployed(targetVersion, application, previous.type())) return false; if (alreadyDeployed(targetVersion, application, next.type())) return false; } else throw new IllegalStateException("Unclassified type of next job: " + next); return true; } else { if ( ! previous.lastSuccess().isPresent()) return false; if ( ! next.lastSuccess().isPresent()) return true; return previous.lastSuccess().get().revision().isPresent() && ! previous.lastSuccess().get().revision().equals(next.lastSuccess().get().revision()); } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (report.jobType() == JobType.component) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (retryBecauseOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (retryBecauseNewFailure(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); }); } /** Returns whether all production zones listed in deployment spec has this change (or a newer version, if upgrade) */ private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; Change change = application.deploying().get(); for (JobType job : order.jobsFrom(application.deploymentSpec())) { if ( ! job.isProduction()) continue; Optional<ZoneId> zone = job.zone(this.controller.system()); if ( ! zone.isPresent()) continue; Deployment deployment = application.deployments().get(zone.get()); if (deployment == null) return false; if (change instanceof VersionChange) { if (((VersionChange)change).version().isAfter(deployment.version())) return false; } else if (((Change.ApplicationChange)change).revision().isPresent()) { if ( ! ((Change.ApplicationChange)change).revision().get().equals(deployment.revision())) return false; } else { return false; } } return true; } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** Find the next step to trigger if any, and triggers it */ public void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange) application.deploying().get()).version(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target) || systemTestStatus.isHanging(jobTimeoutLimit())) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } else { JobStatus componentStatus = application.deploymentJobs().jobStatus().get(JobType.component); if (changesAvailable(application, componentStatus, systemTestStatus)) { application = trigger(JobType.systemTest, application, false, "Available change in component"); controller.applications().store(application); } } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus) || nextStatus.isHanging(jobTimeoutLimit())) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { applications().lockOrThrow(applicationId, application -> { buildSystem.removeJobs(application.id()); applications().store(application.withDeploying(Optional.empty())); }); } private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), clock.instant(), application.deployVersionFor(jobType, controller), application.deployRevisionFor(jobType, controller), reason); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deploying().isPresent() && application.deploying().get().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof VersionChange && jobType.isProduction() && isOnAtLeastProductionVersion(((VersionChange) application.deploying().get()).version(), application, jobType)) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the current deployed version in the zone given by the job * is newer or equal to the given version. This may be the case even if the production job * in question failed, if the failure happens after deployment. * In that case we should never deploy an earlier version as that may potentially * downgrade production nodes which we are not guaranteed to support, and upgradibng to the current * version is just unnecessary work. */ private boolean isOnAtLeastProductionVersion(Version version, Application application, JobType job) { if ( ! job.isProduction()) return false; Optional<ZoneId> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; Deployment existingDeployment = application.deployments().get(zone.get()); if (existingDeployment == null) return false; return existingDeployment.version().isAfter(version) || existingDeployment.version().equals(version); } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if (application.deploying().get() instanceof Change.ApplicationChange) return true; if (application.deploymentJobs().hasFailures()) return true; if (application.isBlocked(clock.instant())) return true; return false; } private boolean lastSuccessfulIs(Version version, JobType jobType, Application application) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return false; Optional<JobStatus.JobRun> lastSuccessfulStagingRun = status.lastSuccess(); if ( ! lastSuccessfulStagingRun.isPresent()) return false; return lastSuccessfulStagingRun.get().version().equals(version); } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (report.jobType() == JobType.component) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (retryBecauseOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (retryBecauseNewFailure(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); }); } /** Returns whether all production zones listed in deployment spec has this change (or a newer version, if upgrade) */ private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; Change change = application.deploying().get(); for (JobType job : order.jobsFrom(application.deploymentSpec())) { if ( ! job.isProduction()) continue; Optional<ZoneId> zone = job.zone(this.controller.system()); if ( ! zone.isPresent()) continue; Deployment deployment = application.deployments().get(zone.get()); if (deployment == null) return false; if (change instanceof VersionChange) { if (((VersionChange)change).version().isAfter(deployment.version())) return false; } else if (((Change.ApplicationChange)change).revision().isPresent()) { if ( ! ((Change.ApplicationChange)change).revision().get().equals(deployment.revision())) return false; } else { return false; } } return true; } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** Find the next step to trigger if any, and triggers it */ public void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange) application.deploying().get()).version(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target) || systemTestStatus.isHanging(jobTimeoutLimit())) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } else { JobStatus componentStatus = application.deploymentJobs().jobStatus().get(JobType.component); if (componentStatus != null && changesAvailable(application, componentStatus, systemTestStatus)) { application = trigger(JobType.systemTest, application, false, "Available change in component"); controller.applications().store(application); } } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus) || nextStatus.isHanging(jobTimeoutLimit())) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { applications().lockOrThrow(applicationId, application -> { buildSystem.removeJobs(application.id()); applications().store(application.withDeploying(Optional.empty())); }); } private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), clock.instant(), application.deployVersionFor(jobType, controller), application.deployRevisionFor(jobType, controller), reason); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deploying().isPresent() && application.deploying().get().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof VersionChange && jobType.isProduction() && alreadyDeployed(((VersionChange) application.deploying().get()).version(), application, jobType)) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the currently deployed version in the zone for the given production job is newer * than the given version, in which case we should avoid an unsupported downgrade, or if it is the * same version, and was successfully deployed, in which case it is unnecessary to redeploy it. */ private boolean alreadyDeployed(Version version, Application application, JobType job) { if ( ! job.isProduction()) throw new IllegalArgumentException(job + " is not a production job!"); return lastSuccessfulIs(version, job, application) || job.zone(controller.system()) .map(zone -> application.deployments().get(zone)) .map(deployment -> deployment.version().isAfter(version)) .orElse(false); } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if (application.deploying().get() instanceof Change.ApplicationChange) return true; if (application.deploymentJobs().hasFailures()) return true; if (application.isBlocked(clock.instant())) return true; return false; } private boolean lastSuccessfulIs(Version version, JobType jobType, Application application) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return false; Optional<JobStatus.JobRun> lastSuccessfulRun = status.lastSuccess(); if ( ! lastSuccessfulRun.isPresent()) return false; return lastSuccessfulRun.get().version().equals(version); } }
This stops retries on production jobs which die _or fail, for the second time,_ after deployment. But it seems `isHanging` covers these ... albeit with a longer delay ... which may become acceptable once this delay is reduced, with an improved `isRunning` :)
private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; Change change = application.deploying().get(); if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if ( ! lastSuccessfulIs(targetVersion, JobType.stagingTest, application)) return false; if ( previous.type().isProduction() && ! isOnAtLeastProductionVersion(targetVersion, application, previous.type())) return false; if (next != null && next.type().isTest() && lastSuccessfulIs(targetVersion, next.type(), application)) return false; if (next != null && next.type().isProduction() && isOnAtLeastProductionVersion(targetVersion, application, next.type())) return false; return true; } else { if ( ! previous.lastSuccess().isPresent()) return false; if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; return previous.lastSuccess().get().revision().isPresent() && ! previous.lastSuccess().get().revision().equals(next.lastSuccess().get().revision()); } }
return false;
private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; if (next == null) return true; Change change = application.deploying().get(); if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if (next.type().isTest()) { if ( ! lastSuccessfulIs(targetVersion, previous.type(), application)) return false; if (lastSuccessfulIs(targetVersion, next.type(), application)) return false; } else if (next.type().isProduction()) { if ( ! lastSuccessfulIs(targetVersion, JobType.stagingTest, application)) return false; if (previous.type().isProduction() && ! alreadyDeployed(targetVersion, application, previous.type())) return false; if (alreadyDeployed(targetVersion, application, next.type())) return false; } else throw new IllegalStateException("Unclassified type of next job: " + next); return true; } else { if ( ! previous.lastSuccess().isPresent()) return false; if ( ! next.lastSuccess().isPresent()) return true; return previous.lastSuccess().get().revision().isPresent() && ! previous.lastSuccess().get().revision().equals(next.lastSuccess().get().revision()); } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (report.jobType() == JobType.component) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (retryBecauseOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (retryBecauseNewFailure(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); }); } /** Returns whether all production zones listed in deployment spec has this change (or a newer version, if upgrade) */ private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; Change change = application.deploying().get(); for (JobType job : order.jobsFrom(application.deploymentSpec())) { if ( ! job.isProduction()) continue; Optional<ZoneId> zone = job.zone(this.controller.system()); if ( ! zone.isPresent()) continue; Deployment deployment = application.deployments().get(zone.get()); if (deployment == null) return false; if (change instanceof VersionChange) { if (((VersionChange)change).version().isAfter(deployment.version())) return false; } else if (((Change.ApplicationChange)change).revision().isPresent()) { if ( ! ((Change.ApplicationChange)change).revision().get().equals(deployment.revision())) return false; } else { return false; } } return true; } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** Find the next step to trigger if any, and triggers it */ public void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange) application.deploying().get()).version(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target) || systemTestStatus.isHanging(jobTimeoutLimit())) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } else { JobStatus componentStatus = application.deploymentJobs().jobStatus().get(JobType.component); if (changesAvailable(application, componentStatus, systemTestStatus)) { application = trigger(JobType.systemTest, application, false, "Available change in component"); controller.applications().store(application); } } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus) || nextStatus.isHanging(jobTimeoutLimit())) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { applications().lockOrThrow(applicationId, application -> { buildSystem.removeJobs(application.id()); applications().store(application.withDeploying(Optional.empty())); }); } private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), clock.instant(), application.deployVersionFor(jobType, controller), application.deployRevisionFor(jobType, controller), reason); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deploying().isPresent() && application.deploying().get().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof VersionChange && jobType.isProduction() && isOnAtLeastProductionVersion(((VersionChange) application.deploying().get()).version(), application, jobType)) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the current deployed version in the zone given by the job * is newer or equal to the given version. This may be the case even if the production job * in question failed, if the failure happens after deployment. * In that case we should never deploy an earlier version as that may potentially * downgrade production nodes which we are not guaranteed to support, and upgradibng to the current * version is just unnecessary work. */ private boolean isOnAtLeastProductionVersion(Version version, Application application, JobType job) { if ( ! job.isProduction()) return false; Optional<ZoneId> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; Deployment existingDeployment = application.deployments().get(zone.get()); if (existingDeployment == null) return false; return existingDeployment.version().isAfter(version) || existingDeployment.version().equals(version); } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if (application.deploying().get() instanceof Change.ApplicationChange) return true; if (application.deploymentJobs().hasFailures()) return true; if (application.isBlocked(clock.instant())) return true; return false; } private boolean lastSuccessfulIs(Version version, JobType jobType, Application application) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return false; Optional<JobStatus.JobRun> lastSuccessfulStagingRun = status.lastSuccess(); if ( ! lastSuccessfulStagingRun.isPresent()) return false; return lastSuccessfulStagingRun.get().version().equals(version); } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (report.jobType() == JobType.component) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (retryBecauseOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (retryBecauseNewFailure(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); }); } /** Returns whether all production zones listed in deployment spec has this change (or a newer version, if upgrade) */ private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; Change change = application.deploying().get(); for (JobType job : order.jobsFrom(application.deploymentSpec())) { if ( ! job.isProduction()) continue; Optional<ZoneId> zone = job.zone(this.controller.system()); if ( ! zone.isPresent()) continue; Deployment deployment = application.deployments().get(zone.get()); if (deployment == null) return false; if (change instanceof VersionChange) { if (((VersionChange)change).version().isAfter(deployment.version())) return false; } else if (((Change.ApplicationChange)change).revision().isPresent()) { if ( ! ((Change.ApplicationChange)change).revision().get().equals(deployment.revision())) return false; } else { return false; } } return true; } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** Find the next step to trigger if any, and triggers it */ public void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange) application.deploying().get()).version(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target) || systemTestStatus.isHanging(jobTimeoutLimit())) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } else { JobStatus componentStatus = application.deploymentJobs().jobStatus().get(JobType.component); if (componentStatus != null && changesAvailable(application, componentStatus, systemTestStatus)) { application = trigger(JobType.systemTest, application, false, "Available change in component"); controller.applications().store(application); } } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus) || nextStatus.isHanging(jobTimeoutLimit())) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { applications().lockOrThrow(applicationId, application -> { buildSystem.removeJobs(application.id()); applications().store(application.withDeploying(Optional.empty())); }); } private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), clock.instant(), application.deployVersionFor(jobType, controller), application.deployRevisionFor(jobType, controller), reason); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deploying().isPresent() && application.deploying().get().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof VersionChange && jobType.isProduction() && alreadyDeployed(((VersionChange) application.deploying().get()).version(), application, jobType)) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the currently deployed version in the zone for the given production job is newer * than the given version, in which case we should avoid an unsupported downgrade, or if it is the * same version, and was successfully deployed, in which case it is unnecessary to redeploy it. */ private boolean alreadyDeployed(Version version, Application application, JobType job) { if ( ! job.isProduction()) throw new IllegalArgumentException(job + " is not a production job!"); return lastSuccessfulIs(version, job, application) || job.zone(controller.system()) .map(zone -> application.deployments().get(zone)) .map(deployment -> deployment.version().isAfter(version)) .orElse(false); } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if (application.deploying().get() instanceof Change.ApplicationChange) return true; if (application.deploymentJobs().hasFailures()) return true; if (application.isBlocked(clock.instant())) return true; return false; } private boolean lastSuccessfulIs(Version version, JobType jobType, Application application) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return false; Optional<JobStatus.JobRun> lastSuccessfulRun = status.lastSuccess(); if ( ! lastSuccessfulRun.isPresent()) return false; return lastSuccessfulRun.get().version().equals(version); } }
Ah, `isHanging` to the rescue again, huh?
private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; Change change = application.deploying().get(); if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if ( ! lastSuccessfulIs(targetVersion, JobType.stagingTest, application)) return false; if ( previous.type().isProduction() && ! isOnAtLeastProductionVersion(targetVersion, application, previous.type())) return false; if (next != null && next.type().isTest() && lastSuccessfulIs(targetVersion, next.type(), application)) return false; if (next != null && next.type().isProduction() && isOnAtLeastProductionVersion(targetVersion, application, next.type())) return false; return true; } else { if ( ! previous.lastSuccess().isPresent()) return false; if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; return previous.lastSuccess().get().revision().isPresent() && ! previous.lastSuccess().get().revision().equals(next.lastSuccess().get().revision()); } }
return false;
private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; if (next == null) return true; Change change = application.deploying().get(); if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if (next.type().isTest()) { if ( ! lastSuccessfulIs(targetVersion, previous.type(), application)) return false; if (lastSuccessfulIs(targetVersion, next.type(), application)) return false; } else if (next.type().isProduction()) { if ( ! lastSuccessfulIs(targetVersion, JobType.stagingTest, application)) return false; if (previous.type().isProduction() && ! alreadyDeployed(targetVersion, application, previous.type())) return false; if (alreadyDeployed(targetVersion, application, next.type())) return false; } else throw new IllegalStateException("Unclassified type of next job: " + next); return true; } else { if ( ! previous.lastSuccess().isPresent()) return false; if ( ! next.lastSuccess().isPresent()) return true; return previous.lastSuccess().get().revision().isPresent() && ! previous.lastSuccess().get().revision().equals(next.lastSuccess().get().revision()); } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (report.jobType() == JobType.component) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (retryBecauseOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (retryBecauseNewFailure(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); }); } /** Returns whether all production zones listed in deployment spec has this change (or a newer version, if upgrade) */ private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; Change change = application.deploying().get(); for (JobType job : order.jobsFrom(application.deploymentSpec())) { if ( ! job.isProduction()) continue; Optional<ZoneId> zone = job.zone(this.controller.system()); if ( ! zone.isPresent()) continue; Deployment deployment = application.deployments().get(zone.get()); if (deployment == null) return false; if (change instanceof VersionChange) { if (((VersionChange)change).version().isAfter(deployment.version())) return false; } else if (((Change.ApplicationChange)change).revision().isPresent()) { if ( ! ((Change.ApplicationChange)change).revision().get().equals(deployment.revision())) return false; } else { return false; } } return true; } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** Find the next step to trigger if any, and triggers it */ public void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange) application.deploying().get()).version(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target) || systemTestStatus.isHanging(jobTimeoutLimit())) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } else { JobStatus componentStatus = application.deploymentJobs().jobStatus().get(JobType.component); if (changesAvailable(application, componentStatus, systemTestStatus)) { application = trigger(JobType.systemTest, application, false, "Available change in component"); controller.applications().store(application); } } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus) || nextStatus.isHanging(jobTimeoutLimit())) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { applications().lockOrThrow(applicationId, application -> { buildSystem.removeJobs(application.id()); applications().store(application.withDeploying(Optional.empty())); }); } private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), clock.instant(), application.deployVersionFor(jobType, controller), application.deployRevisionFor(jobType, controller), reason); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deploying().isPresent() && application.deploying().get().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof VersionChange && jobType.isProduction() && isOnAtLeastProductionVersion(((VersionChange) application.deploying().get()).version(), application, jobType)) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the current deployed version in the zone given by the job * is newer or equal to the given version. This may be the case even if the production job * in question failed, if the failure happens after deployment. * In that case we should never deploy an earlier version as that may potentially * downgrade production nodes which we are not guaranteed to support, and upgradibng to the current * version is just unnecessary work. */ private boolean isOnAtLeastProductionVersion(Version version, Application application, JobType job) { if ( ! job.isProduction()) return false; Optional<ZoneId> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; Deployment existingDeployment = application.deployments().get(zone.get()); if (existingDeployment == null) return false; return existingDeployment.version().isAfter(version) || existingDeployment.version().equals(version); } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if (application.deploying().get() instanceof Change.ApplicationChange) return true; if (application.deploymentJobs().hasFailures()) return true; if (application.isBlocked(clock.instant())) return true; return false; } private boolean lastSuccessfulIs(Version version, JobType jobType, Application application) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return false; Optional<JobStatus.JobRun> lastSuccessfulStagingRun = status.lastSuccess(); if ( ! lastSuccessfulStagingRun.isPresent()) return false; return lastSuccessfulStagingRun.get().version().equals(version); } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (report.jobType() == JobType.component) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (retryBecauseOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (retryBecauseNewFailure(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); }); } /** Returns whether all production zones listed in deployment spec has this change (or a newer version, if upgrade) */ private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; Change change = application.deploying().get(); for (JobType job : order.jobsFrom(application.deploymentSpec())) { if ( ! job.isProduction()) continue; Optional<ZoneId> zone = job.zone(this.controller.system()); if ( ! zone.isPresent()) continue; Deployment deployment = application.deployments().get(zone.get()); if (deployment == null) return false; if (change instanceof VersionChange) { if (((VersionChange)change).version().isAfter(deployment.version())) return false; } else if (((Change.ApplicationChange)change).revision().isPresent()) { if ( ! ((Change.ApplicationChange)change).revision().get().equals(deployment.revision())) return false; } else { return false; } } return true; } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** Find the next step to trigger if any, and triggers it */ public void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange) application.deploying().get()).version(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target) || systemTestStatus.isHanging(jobTimeoutLimit())) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } else { JobStatus componentStatus = application.deploymentJobs().jobStatus().get(JobType.component); if (componentStatus != null && changesAvailable(application, componentStatus, systemTestStatus)) { application = trigger(JobType.systemTest, application, false, "Available change in component"); controller.applications().store(application); } } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus) || nextStatus.isHanging(jobTimeoutLimit())) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { applications().lockOrThrow(applicationId, application -> { buildSystem.removeJobs(application.id()); applications().store(application.withDeploying(Optional.empty())); }); } private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), clock.instant(), application.deployVersionFor(jobType, controller), application.deployRevisionFor(jobType, controller), reason); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deploying().isPresent() && application.deploying().get().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof VersionChange && jobType.isProduction() && alreadyDeployed(((VersionChange) application.deploying().get()).version(), application, jobType)) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the currently deployed version in the zone for the given production job is newer * than the given version, in which case we should avoid an unsupported downgrade, or if it is the * same version, and was successfully deployed, in which case it is unnecessary to redeploy it. */ private boolean alreadyDeployed(Version version, Application application, JobType job) { if ( ! job.isProduction()) throw new IllegalArgumentException(job + " is not a production job!"); return lastSuccessfulIs(version, job, application) || job.zone(controller.system()) .map(zone -> application.deployments().get(zone)) .map(deployment -> deployment.version().isAfter(version)) .orElse(false); } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if (application.deploying().get() instanceof Change.ApplicationChange) return true; if (application.deploymentJobs().hasFailures()) return true; if (application.isBlocked(clock.instant())) return true; return false; } private boolean lastSuccessfulIs(Version version, JobType jobType, Application application) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return false; Optional<JobStatus.JobRun> lastSuccessfulRun = status.lastSuccess(); if ( ! lastSuccessfulRun.isPresent()) return false; return lastSuccessfulRun.get().version().equals(version); } }
What if `stagingTest` doesn't die, but simply fails (twice) -- then `isHanging` won't save you, and you'll still return `false`.
private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; Change change = application.deploying().get(); if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if ( ! lastSuccessfulIs(targetVersion, JobType.stagingTest, application)) return false; if ( previous.type().isProduction() && ! isOnAtLeastProductionVersion(targetVersion, application, previous.type())) return false; if (next != null && next.type().isTest() && lastSuccessfulIs(targetVersion, next.type(), application)) return false; if (next != null && next.type().isProduction() && isOnAtLeastProductionVersion(targetVersion, application, next.type())) return false; return true; } else { if ( ! previous.lastSuccess().isPresent()) return false; if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; return previous.lastSuccess().get().revision().isPresent() && ! previous.lastSuccess().get().revision().equals(next.lastSuccess().get().revision()); } }
return false;
private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; if (next == null) return true; Change change = application.deploying().get(); if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if (next.type().isTest()) { if ( ! lastSuccessfulIs(targetVersion, previous.type(), application)) return false; if (lastSuccessfulIs(targetVersion, next.type(), application)) return false; } else if (next.type().isProduction()) { if ( ! lastSuccessfulIs(targetVersion, JobType.stagingTest, application)) return false; if (previous.type().isProduction() && ! alreadyDeployed(targetVersion, application, previous.type())) return false; if (alreadyDeployed(targetVersion, application, next.type())) return false; } else throw new IllegalStateException("Unclassified type of next job: " + next); return true; } else { if ( ! previous.lastSuccess().isPresent()) return false; if ( ! next.lastSuccess().isPresent()) return true; return previous.lastSuccess().get().revision().isPresent() && ! previous.lastSuccess().get().revision().equals(next.lastSuccess().get().revision()); } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (report.jobType() == JobType.component) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (retryBecauseOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (retryBecauseNewFailure(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); }); } /** Returns whether all production zones listed in deployment spec has this change (or a newer version, if upgrade) */ private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; Change change = application.deploying().get(); for (JobType job : order.jobsFrom(application.deploymentSpec())) { if ( ! job.isProduction()) continue; Optional<ZoneId> zone = job.zone(this.controller.system()); if ( ! zone.isPresent()) continue; Deployment deployment = application.deployments().get(zone.get()); if (deployment == null) return false; if (change instanceof VersionChange) { if (((VersionChange)change).version().isAfter(deployment.version())) return false; } else if (((Change.ApplicationChange)change).revision().isPresent()) { if ( ! ((Change.ApplicationChange)change).revision().get().equals(deployment.revision())) return false; } else { return false; } } return true; } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** Find the next step to trigger if any, and triggers it */ public void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange) application.deploying().get()).version(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target) || systemTestStatus.isHanging(jobTimeoutLimit())) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } else { JobStatus componentStatus = application.deploymentJobs().jobStatus().get(JobType.component); if (changesAvailable(application, componentStatus, systemTestStatus)) { application = trigger(JobType.systemTest, application, false, "Available change in component"); controller.applications().store(application); } } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus) || nextStatus.isHanging(jobTimeoutLimit())) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { applications().lockOrThrow(applicationId, application -> { buildSystem.removeJobs(application.id()); applications().store(application.withDeploying(Optional.empty())); }); } private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), clock.instant(), application.deployVersionFor(jobType, controller), application.deployRevisionFor(jobType, controller), reason); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deploying().isPresent() && application.deploying().get().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof VersionChange && jobType.isProduction() && isOnAtLeastProductionVersion(((VersionChange) application.deploying().get()).version(), application, jobType)) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the current deployed version in the zone given by the job * is newer or equal to the given version. This may be the case even if the production job * in question failed, if the failure happens after deployment. * In that case we should never deploy an earlier version as that may potentially * downgrade production nodes which we are not guaranteed to support, and upgradibng to the current * version is just unnecessary work. */ private boolean isOnAtLeastProductionVersion(Version version, Application application, JobType job) { if ( ! job.isProduction()) return false; Optional<ZoneId> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; Deployment existingDeployment = application.deployments().get(zone.get()); if (existingDeployment == null) return false; return existingDeployment.version().isAfter(version) || existingDeployment.version().equals(version); } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if (application.deploying().get() instanceof Change.ApplicationChange) return true; if (application.deploymentJobs().hasFailures()) return true; if (application.isBlocked(clock.instant())) return true; return false; } private boolean lastSuccessfulIs(Version version, JobType jobType, Application application) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return false; Optional<JobStatus.JobRun> lastSuccessfulStagingRun = status.lastSuccess(); if ( ! lastSuccessfulStagingRun.isPresent()) return false; return lastSuccessfulStagingRun.get().version().equals(version); } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (report.jobType() == JobType.component) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (retryBecauseOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (retryBecauseNewFailure(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); }); } /** Returns whether all production zones listed in deployment spec has this change (or a newer version, if upgrade) */ private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; Change change = application.deploying().get(); for (JobType job : order.jobsFrom(application.deploymentSpec())) { if ( ! job.isProduction()) continue; Optional<ZoneId> zone = job.zone(this.controller.system()); if ( ! zone.isPresent()) continue; Deployment deployment = application.deployments().get(zone.get()); if (deployment == null) return false; if (change instanceof VersionChange) { if (((VersionChange)change).version().isAfter(deployment.version())) return false; } else if (((Change.ApplicationChange)change).revision().isPresent()) { if ( ! ((Change.ApplicationChange)change).revision().get().equals(deployment.revision())) return false; } else { return false; } } return true; } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** Find the next step to trigger if any, and triggers it */ public void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange) application.deploying().get()).version(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target) || systemTestStatus.isHanging(jobTimeoutLimit())) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } else { JobStatus componentStatus = application.deploymentJobs().jobStatus().get(JobType.component); if (componentStatus != null && changesAvailable(application, componentStatus, systemTestStatus)) { application = trigger(JobType.systemTest, application, false, "Available change in component"); controller.applications().store(application); } } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus) || nextStatus.isHanging(jobTimeoutLimit())) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { applications().lockOrThrow(applicationId, application -> { buildSystem.removeJobs(application.id()); applications().store(application.withDeploying(Optional.empty())); }); } private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), clock.instant(), application.deployVersionFor(jobType, controller), application.deployRevisionFor(jobType, controller), reason); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deploying().isPresent() && application.deploying().get().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof VersionChange && jobType.isProduction() && alreadyDeployed(((VersionChange) application.deploying().get()).version(), application, jobType)) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the currently deployed version in the zone for the given production job is newer * than the given version, in which case we should avoid an unsupported downgrade, or if it is the * same version, and was successfully deployed, in which case it is unnecessary to redeploy it. */ private boolean alreadyDeployed(Version version, Application application, JobType job) { if ( ! job.isProduction()) throw new IllegalArgumentException(job + " is not a production job!"); return lastSuccessfulIs(version, job, application) || job.zone(controller.system()) .map(zone -> application.deployments().get(zone)) .map(deployment -> deployment.version().isAfter(version)) .orElse(false); } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if (application.deploying().get() instanceof Change.ApplicationChange) return true; if (application.deploymentJobs().hasFailures()) return true; if (application.isBlocked(clock.instant())) return true; return false; } private boolean lastSuccessfulIs(Version version, JobType jobType, Application application) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return false; Optional<JobStatus.JobRun> lastSuccessfulRun = status.lastSuccess(); if ( ! lastSuccessfulRun.isPresent()) return false; return lastSuccessfulRun.get().version().equals(version); } }
Hmm... 1. Production job `eu-1` deploys version _A_, but fails the verification (aborts or fails for the second time). 2. We enter this method again with `previous` set to `eu-1`, and then pass this `if` clause. 3. The below `is`s don't stop us either, and we end up triggering `eu-2`, even though `eu-1` failed verification. I can't find anything to stop this from happening?
private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; Change change = application.deploying().get(); if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if ( ! lastSuccessfulIs(targetVersion, JobType.stagingTest, application)) return false; if ( previous.type().isProduction() && ! isOnAtLeastProductionVersion(targetVersion, application, previous.type())) return false; if (next != null && next.type().isTest() && lastSuccessfulIs(targetVersion, next.type(), application)) return false; if (next != null && next.type().isProduction() && isOnAtLeastProductionVersion(targetVersion, application, next.type())) return false; return true; } else { if ( ! previous.lastSuccess().isPresent()) return false; if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; return previous.lastSuccess().get().revision().isPresent() && ! previous.lastSuccess().get().revision().equals(next.lastSuccess().get().revision()); } }
return false;
private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; if (next == null) return true; Change change = application.deploying().get(); if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if (next.type().isTest()) { if ( ! lastSuccessfulIs(targetVersion, previous.type(), application)) return false; if (lastSuccessfulIs(targetVersion, next.type(), application)) return false; } else if (next.type().isProduction()) { if ( ! lastSuccessfulIs(targetVersion, JobType.stagingTest, application)) return false; if (previous.type().isProduction() && ! alreadyDeployed(targetVersion, application, previous.type())) return false; if (alreadyDeployed(targetVersion, application, next.type())) return false; } else throw new IllegalStateException("Unclassified type of next job: " + next); return true; } else { if ( ! previous.lastSuccess().isPresent()) return false; if ( ! next.lastSuccess().isPresent()) return true; return previous.lastSuccess().get().revision().isPresent() && ! previous.lastSuccess().get().revision().equals(next.lastSuccess().get().revision()); } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (report.jobType() == JobType.component) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (retryBecauseOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (retryBecauseNewFailure(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); }); } /** Returns whether all production zones listed in deployment spec has this change (or a newer version, if upgrade) */ private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; Change change = application.deploying().get(); for (JobType job : order.jobsFrom(application.deploymentSpec())) { if ( ! job.isProduction()) continue; Optional<ZoneId> zone = job.zone(this.controller.system()); if ( ! zone.isPresent()) continue; Deployment deployment = application.deployments().get(zone.get()); if (deployment == null) return false; if (change instanceof VersionChange) { if (((VersionChange)change).version().isAfter(deployment.version())) return false; } else if (((Change.ApplicationChange)change).revision().isPresent()) { if ( ! ((Change.ApplicationChange)change).revision().get().equals(deployment.revision())) return false; } else { return false; } } return true; } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** Find the next step to trigger if any, and triggers it */ public void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange) application.deploying().get()).version(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target) || systemTestStatus.isHanging(jobTimeoutLimit())) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } else { JobStatus componentStatus = application.deploymentJobs().jobStatus().get(JobType.component); if (changesAvailable(application, componentStatus, systemTestStatus)) { application = trigger(JobType.systemTest, application, false, "Available change in component"); controller.applications().store(application); } } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus) || nextStatus.isHanging(jobTimeoutLimit())) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { applications().lockOrThrow(applicationId, application -> { buildSystem.removeJobs(application.id()); applications().store(application.withDeploying(Optional.empty())); }); } private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), clock.instant(), application.deployVersionFor(jobType, controller), application.deployRevisionFor(jobType, controller), reason); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deploying().isPresent() && application.deploying().get().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof VersionChange && jobType.isProduction() && isOnAtLeastProductionVersion(((VersionChange) application.deploying().get()).version(), application, jobType)) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the current deployed version in the zone given by the job * is newer or equal to the given version. This may be the case even if the production job * in question failed, if the failure happens after deployment. * In that case we should never deploy an earlier version as that may potentially * downgrade production nodes which we are not guaranteed to support, and upgradibng to the current * version is just unnecessary work. */ private boolean isOnAtLeastProductionVersion(Version version, Application application, JobType job) { if ( ! job.isProduction()) return false; Optional<ZoneId> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; Deployment existingDeployment = application.deployments().get(zone.get()); if (existingDeployment == null) return false; return existingDeployment.version().isAfter(version) || existingDeployment.version().equals(version); } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if (application.deploying().get() instanceof Change.ApplicationChange) return true; if (application.deploymentJobs().hasFailures()) return true; if (application.isBlocked(clock.instant())) return true; return false; } private boolean lastSuccessfulIs(Version version, JobType jobType, Application application) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return false; Optional<JobStatus.JobRun> lastSuccessfulStagingRun = status.lastSuccess(); if ( ! lastSuccessfulStagingRun.isPresent()) return false; return lastSuccessfulStagingRun.get().version().equals(version); } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (report.jobType() == JobType.component) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (retryBecauseOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (retryBecauseNewFailure(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); }); } /** Returns whether all production zones listed in deployment spec has this change (or a newer version, if upgrade) */ private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; Change change = application.deploying().get(); for (JobType job : order.jobsFrom(application.deploymentSpec())) { if ( ! job.isProduction()) continue; Optional<ZoneId> zone = job.zone(this.controller.system()); if ( ! zone.isPresent()) continue; Deployment deployment = application.deployments().get(zone.get()); if (deployment == null) return false; if (change instanceof VersionChange) { if (((VersionChange)change).version().isAfter(deployment.version())) return false; } else if (((Change.ApplicationChange)change).revision().isPresent()) { if ( ! ((Change.ApplicationChange)change).revision().get().equals(deployment.revision())) return false; } else { return false; } } return true; } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** Find the next step to trigger if any, and triggers it */ public void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange) application.deploying().get()).version(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target) || systemTestStatus.isHanging(jobTimeoutLimit())) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } else { JobStatus componentStatus = application.deploymentJobs().jobStatus().get(JobType.component); if (componentStatus != null && changesAvailable(application, componentStatus, systemTestStatus)) { application = trigger(JobType.systemTest, application, false, "Available change in component"); controller.applications().store(application); } } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus) || nextStatus.isHanging(jobTimeoutLimit())) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { applications().lockOrThrow(applicationId, application -> { buildSystem.removeJobs(application.id()); applications().store(application.withDeploying(Optional.empty())); }); } private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), clock.instant(), application.deployVersionFor(jobType, controller), application.deployRevisionFor(jobType, controller), reason); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deploying().isPresent() && application.deploying().get().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof VersionChange && jobType.isProduction() && alreadyDeployed(((VersionChange) application.deploying().get()).version(), application, jobType)) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the currently deployed version in the zone for the given production job is newer * than the given version, in which case we should avoid an unsupported downgrade, or if it is the * same version, and was successfully deployed, in which case it is unnecessary to redeploy it. */ private boolean alreadyDeployed(Version version, Application application, JobType job) { if ( ! job.isProduction()) throw new IllegalArgumentException(job + " is not a production job!"); return lastSuccessfulIs(version, job, application) || job.zone(controller.system()) .map(zone -> application.deployments().get(zone)) .map(deployment -> deployment.version().isAfter(version)) .orElse(false); } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if (application.deploying().get() instanceof Change.ApplicationChange) return true; if (application.deploymentJobs().hasFailures()) return true; if (application.isBlocked(clock.instant())) return true; return false; } private boolean lastSuccessfulIs(Version version, JobType jobType, Application application) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return false; Optional<JobStatus.JobRun> lastSuccessfulRun = status.lastSuccess(); if ( ! lastSuccessfulRun.isPresent()) return false; return lastSuccessfulRun.get().version().equals(version); } }
We really should separate out verification from deployment, as comparing apples one place and oranges another, while making sure the conditions remain the same, is rather straining ...
private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; Change change = application.deploying().get(); if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if ( ! lastSuccessfulIs(targetVersion, JobType.stagingTest, application)) return false; if ( previous.type().isProduction() && ! isOnAtLeastProductionVersion(targetVersion, application, previous.type())) return false; if (next != null && next.type().isTest() && lastSuccessfulIs(targetVersion, next.type(), application)) return false; if (next != null && next.type().isProduction() && isOnAtLeastProductionVersion(targetVersion, application, next.type())) return false; return true; } else { if ( ! previous.lastSuccess().isPresent()) return false; if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; return previous.lastSuccess().get().revision().isPresent() && ! previous.lastSuccess().get().revision().equals(next.lastSuccess().get().revision()); } }
! previous.lastSuccess().get().revision().equals(next.lastSuccess().get().revision());
private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; if (next == null) return true; Change change = application.deploying().get(); if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if (next.type().isTest()) { if ( ! lastSuccessfulIs(targetVersion, previous.type(), application)) return false; if (lastSuccessfulIs(targetVersion, next.type(), application)) return false; } else if (next.type().isProduction()) { if ( ! lastSuccessfulIs(targetVersion, JobType.stagingTest, application)) return false; if (previous.type().isProduction() && ! alreadyDeployed(targetVersion, application, previous.type())) return false; if (alreadyDeployed(targetVersion, application, next.type())) return false; } else throw new IllegalStateException("Unclassified type of next job: " + next); return true; } else { if ( ! previous.lastSuccess().isPresent()) return false; if ( ! next.lastSuccess().isPresent()) return true; return previous.lastSuccess().get().revision().isPresent() && ! previous.lastSuccess().get().revision().equals(next.lastSuccess().get().revision()); } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (report.jobType() == JobType.component) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (retryBecauseOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (retryBecauseNewFailure(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); }); } /** Returns whether all production zones listed in deployment spec has this change (or a newer version, if upgrade) */ private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; Change change = application.deploying().get(); for (JobType job : order.jobsFrom(application.deploymentSpec())) { if ( ! job.isProduction()) continue; Optional<ZoneId> zone = job.zone(this.controller.system()); if ( ! zone.isPresent()) continue; Deployment deployment = application.deployments().get(zone.get()); if (deployment == null) return false; if (change instanceof VersionChange) { if (((VersionChange)change).version().isAfter(deployment.version())) return false; } else if (((Change.ApplicationChange)change).revision().isPresent()) { if ( ! ((Change.ApplicationChange)change).revision().get().equals(deployment.revision())) return false; } else { return false; } } return true; } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** Find the next step to trigger if any, and triggers it */ public void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange) application.deploying().get()).version(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target) || systemTestStatus.isHanging(jobTimeoutLimit())) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } else { JobStatus componentStatus = application.deploymentJobs().jobStatus().get(JobType.component); if (changesAvailable(application, componentStatus, systemTestStatus)) { application = trigger(JobType.systemTest, application, false, "Available change in component"); controller.applications().store(application); } } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus) || nextStatus.isHanging(jobTimeoutLimit())) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { applications().lockOrThrow(applicationId, application -> { buildSystem.removeJobs(application.id()); applications().store(application.withDeploying(Optional.empty())); }); } private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), clock.instant(), application.deployVersionFor(jobType, controller), application.deployRevisionFor(jobType, controller), reason); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deploying().isPresent() && application.deploying().get().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof VersionChange && jobType.isProduction() && isOnAtLeastProductionVersion(((VersionChange) application.deploying().get()).version(), application, jobType)) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the current deployed version in the zone given by the job * is newer or equal to the given version. This may be the case even if the production job * in question failed, if the failure happens after deployment. * In that case we should never deploy an earlier version as that may potentially * downgrade production nodes which we are not guaranteed to support, and upgradibng to the current * version is just unnecessary work. */ private boolean isOnAtLeastProductionVersion(Version version, Application application, JobType job) { if ( ! job.isProduction()) return false; Optional<ZoneId> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; Deployment existingDeployment = application.deployments().get(zone.get()); if (existingDeployment == null) return false; return existingDeployment.version().isAfter(version) || existingDeployment.version().equals(version); } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if (application.deploying().get() instanceof Change.ApplicationChange) return true; if (application.deploymentJobs().hasFailures()) return true; if (application.isBlocked(clock.instant())) return true; return false; } private boolean lastSuccessfulIs(Version version, JobType jobType, Application application) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return false; Optional<JobStatus.JobRun> lastSuccessfulStagingRun = status.lastSuccess(); if ( ! lastSuccessfulStagingRun.isPresent()) return false; return lastSuccessfulStagingRun.get().version().equals(version); } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (report.jobType() == JobType.component) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (retryBecauseOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (retryBecauseNewFailure(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); }); } /** Returns whether all production zones listed in deployment spec has this change (or a newer version, if upgrade) */ private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; Change change = application.deploying().get(); for (JobType job : order.jobsFrom(application.deploymentSpec())) { if ( ! job.isProduction()) continue; Optional<ZoneId> zone = job.zone(this.controller.system()); if ( ! zone.isPresent()) continue; Deployment deployment = application.deployments().get(zone.get()); if (deployment == null) return false; if (change instanceof VersionChange) { if (((VersionChange)change).version().isAfter(deployment.version())) return false; } else if (((Change.ApplicationChange)change).revision().isPresent()) { if ( ! ((Change.ApplicationChange)change).revision().get().equals(deployment.revision())) return false; } else { return false; } } return true; } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** Find the next step to trigger if any, and triggers it */ public void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange) application.deploying().get()).version(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target) || systemTestStatus.isHanging(jobTimeoutLimit())) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } else { JobStatus componentStatus = application.deploymentJobs().jobStatus().get(JobType.component); if (componentStatus != null && changesAvailable(application, componentStatus, systemTestStatus)) { application = trigger(JobType.systemTest, application, false, "Available change in component"); controller.applications().store(application); } } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus) || nextStatus.isHanging(jobTimeoutLimit())) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { applications().lockOrThrow(applicationId, application -> { buildSystem.removeJobs(application.id()); applications().store(application.withDeploying(Optional.empty())); }); } private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), clock.instant(), application.deployVersionFor(jobType, controller), application.deployRevisionFor(jobType, controller), reason); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deploying().isPresent() && application.deploying().get().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof VersionChange && jobType.isProduction() && alreadyDeployed(((VersionChange) application.deploying().get()).version(), application, jobType)) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the currently deployed version in the zone for the given production job is newer * than the given version, in which case we should avoid an unsupported downgrade, or if it is the * same version, and was successfully deployed, in which case it is unnecessary to redeploy it. */ private boolean alreadyDeployed(Version version, Application application, JobType job) { if ( ! job.isProduction()) throw new IllegalArgumentException(job + " is not a production job!"); return lastSuccessfulIs(version, job, application) || job.zone(controller.system()) .map(zone -> application.deployments().get(zone)) .map(deployment -> deployment.version().isAfter(version)) .orElse(false); } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if (application.deploying().get() instanceof Change.ApplicationChange) return true; if (application.deploymentJobs().hasFailures()) return true; if (application.isBlocked(clock.instant())) return true; return false; } private boolean lastSuccessfulIs(Version version, JobType jobType, Application application) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return false; Optional<JobStatus.JobRun> lastSuccessfulRun = status.lastSuccess(); if ( ! lastSuccessfulRun.isPresent()) return false; return lastSuccessfulRun.get().version().equals(version); } }
The comment here was hidden because of a new commit, so I'll answer again: No, it returns `true`.
private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; Change change = application.deploying().get(); for (JobType job : order.jobsFrom(application.deploymentSpec())) { if ( ! job.isProduction()) continue; Optional<ZoneId> zone = job.zone(this.controller.system()); if ( ! zone.isPresent()) continue; Deployment deployment = application.deployments().get(zone.get()); if (deployment == null) return false; if (change instanceof VersionChange) { if (((VersionChange)change).version().isAfter(deployment.version())) return false; } else if (((Change.ApplicationChange)change).revision().isPresent()) { if ( ! ((Change.ApplicationChange)change).revision().get().equals(deployment.revision())) return false; } else { return false; } } return true; }
if ( ! application.deploying().isPresent()) return true;
private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; Change change = application.deploying().get(); for (JobType job : order.jobsFrom(application.deploymentSpec())) { if ( ! job.isProduction()) continue; Optional<ZoneId> zone = job.zone(this.controller.system()); if ( ! zone.isPresent()) continue; Deployment deployment = application.deployments().get(zone.get()); if (deployment == null) return false; if (change instanceof VersionChange) { if (((VersionChange)change).version().isAfter(deployment.version())) return false; } else if (((Change.ApplicationChange)change).revision().isPresent()) { if ( ! ((Change.ApplicationChange)change).revision().get().equals(deployment.revision())) return false; } else { return false; } } return true; }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (report.jobType() == JobType.component) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (retryBecauseOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (retryBecauseNewFailure(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); }); } /** Returns whether all production zones listed in deployment spec has this change (or a newer version, if upgrade) */ /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** Find the next step to trigger if any, and triggers it */ public void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange) application.deploying().get()).version(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target) || systemTestStatus.isHanging(jobTimeoutLimit())) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } else { JobStatus componentStatus = application.deploymentJobs().jobStatus().get(JobType.component); if (changesAvailable(application, componentStatus, systemTestStatus)) { application = trigger(JobType.systemTest, application, false, "Available change in component"); controller.applications().store(application); } } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus) || nextStatus.isHanging(jobTimeoutLimit())) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; Change change = application.deploying().get(); if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if ( ! lastSuccessfulIs(targetVersion, JobType.stagingTest, application)) return false; if ( previous.type().isProduction() && ! isOnAtLeastProductionVersion(targetVersion, application, previous.type())) return false; if (next != null && next.type().isTest() && lastSuccessfulIs(targetVersion, next.type(), application)) return false; if (next != null && next.type().isProduction() && isOnAtLeastProductionVersion(targetVersion, application, next.type())) return false; return true; } else { if ( ! previous.lastSuccess().isPresent()) return false; if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; return previous.lastSuccess().get().revision().isPresent() && ! previous.lastSuccess().get().revision().equals(next.lastSuccess().get().revision()); } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { applications().lockOrThrow(applicationId, application -> { buildSystem.removeJobs(application.id()); applications().store(application.withDeploying(Optional.empty())); }); } private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), clock.instant(), application.deployVersionFor(jobType, controller), application.deployRevisionFor(jobType, controller), reason); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deploying().isPresent() && application.deploying().get().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof VersionChange && jobType.isProduction() && isOnAtLeastProductionVersion(((VersionChange) application.deploying().get()).version(), application, jobType)) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the current deployed version in the zone given by the job * is newer or equal to the given version. This may be the case even if the production job * in question failed, if the failure happens after deployment. * In that case we should never deploy an earlier version as that may potentially * downgrade production nodes which we are not guaranteed to support, and upgradibng to the current * version is just unnecessary work. */ private boolean isOnAtLeastProductionVersion(Version version, Application application, JobType job) { if ( ! job.isProduction()) return false; Optional<ZoneId> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; Deployment existingDeployment = application.deployments().get(zone.get()); if (existingDeployment == null) return false; return existingDeployment.version().isAfter(version) || existingDeployment.version().equals(version); } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if (application.deploying().get() instanceof Change.ApplicationChange) return true; if (application.deploymentJobs().hasFailures()) return true; if (application.isBlocked(clock.instant())) return true; return false; } private boolean lastSuccessfulIs(Version version, JobType jobType, Application application) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return false; Optional<JobStatus.JobRun> lastSuccessfulStagingRun = status.lastSuccess(); if ( ! lastSuccessfulStagingRun.isPresent()) return false; return lastSuccessfulStagingRun.get().version().equals(version); } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (report.jobType() == JobType.component) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (retryBecauseOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (retryBecauseNewFailure(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); }); } /** Returns whether all production zones listed in deployment spec has this change (or a newer version, if upgrade) */ /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** Find the next step to trigger if any, and triggers it */ public void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange) application.deploying().get()).version(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target) || systemTestStatus.isHanging(jobTimeoutLimit())) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } else { JobStatus componentStatus = application.deploymentJobs().jobStatus().get(JobType.component); if (componentStatus != null && changesAvailable(application, componentStatus, systemTestStatus)) { application = trigger(JobType.systemTest, application, false, "Available change in component"); controller.applications().store(application); } } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus) || nextStatus.isHanging(jobTimeoutLimit())) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; if (next == null) return true; Change change = application.deploying().get(); if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if (next.type().isTest()) { if ( ! lastSuccessfulIs(targetVersion, previous.type(), application)) return false; if (lastSuccessfulIs(targetVersion, next.type(), application)) return false; } else if (next.type().isProduction()) { if ( ! lastSuccessfulIs(targetVersion, JobType.stagingTest, application)) return false; if (previous.type().isProduction() && ! alreadyDeployed(targetVersion, application, previous.type())) return false; if (alreadyDeployed(targetVersion, application, next.type())) return false; } else throw new IllegalStateException("Unclassified type of next job: " + next); return true; } else { if ( ! previous.lastSuccess().isPresent()) return false; if ( ! next.lastSuccess().isPresent()) return true; return previous.lastSuccess().get().revision().isPresent() && ! previous.lastSuccess().get().revision().equals(next.lastSuccess().get().revision()); } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { applications().lockOrThrow(applicationId, application -> { buildSystem.removeJobs(application.id()); applications().store(application.withDeploying(Optional.empty())); }); } private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), clock.instant(), application.deployVersionFor(jobType, controller), application.deployRevisionFor(jobType, controller), reason); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deploying().isPresent() && application.deploying().get().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof VersionChange && jobType.isProduction() && alreadyDeployed(((VersionChange) application.deploying().get()).version(), application, jobType)) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the currently deployed version in the zone for the given production job is newer * than the given version, in which case we should avoid an unsupported downgrade, or if it is the * same version, and was successfully deployed, in which case it is unnecessary to redeploy it. */ private boolean alreadyDeployed(Version version, Application application, JobType job) { if ( ! job.isProduction()) throw new IllegalArgumentException(job + " is not a production job!"); return lastSuccessfulIs(version, job, application) || job.zone(controller.system()) .map(zone -> application.deployments().get(zone)) .map(deployment -> deployment.version().isAfter(version)) .orElse(false); } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if (application.deploying().get() instanceof Change.ApplicationChange) return true; if (application.deploymentJobs().hasFailures()) return true; if (application.isBlocked(clock.instant())) return true; return false; } private boolean lastSuccessfulIs(Version version, JobType jobType, Application application) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return false; Optional<JobStatus.JobRun> lastSuccessfulRun = status.lastSuccess(); if ( ! lastSuccessfulRun.isPresent()) return false; return lastSuccessfulRun.get().version().equals(version); } }
Copying my answer: No but perhaps you mean if it was cancelled and then later we check for another Change which is an upgrade to an earlier version? Then it will return true but then it is also true that the upgrade is complete ...
private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; Change change = application.deploying().get(); for (JobType job : order.jobsFrom(application.deploymentSpec())) { if ( ! job.isProduction()) continue; Optional<ZoneId> zone = job.zone(this.controller.system()); if ( ! zone.isPresent()) continue; Deployment deployment = application.deployments().get(zone.get()); if (deployment == null) return false; if (change instanceof VersionChange) { if (((VersionChange)change).version().isAfter(deployment.version())) return false; } else if (((Change.ApplicationChange)change).revision().isPresent()) { if ( ! ((Change.ApplicationChange)change).revision().get().equals(deployment.revision())) return false; } else { return false; } } return true; }
if ( ! application.deploying().isPresent()) return true;
private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; Change change = application.deploying().get(); for (JobType job : order.jobsFrom(application.deploymentSpec())) { if ( ! job.isProduction()) continue; Optional<ZoneId> zone = job.zone(this.controller.system()); if ( ! zone.isPresent()) continue; Deployment deployment = application.deployments().get(zone.get()); if (deployment == null) return false; if (change instanceof VersionChange) { if (((VersionChange)change).version().isAfter(deployment.version())) return false; } else if (((Change.ApplicationChange)change).revision().isPresent()) { if ( ! ((Change.ApplicationChange)change).revision().get().equals(deployment.revision())) return false; } else { return false; } } return true; }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (report.jobType() == JobType.component) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (retryBecauseOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (retryBecauseNewFailure(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); }); } /** Returns whether all production zones listed in deployment spec has this change (or a newer version, if upgrade) */ /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** Find the next step to trigger if any, and triggers it */ public void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange) application.deploying().get()).version(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target) || systemTestStatus.isHanging(jobTimeoutLimit())) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } else { JobStatus componentStatus = application.deploymentJobs().jobStatus().get(JobType.component); if (changesAvailable(application, componentStatus, systemTestStatus)) { application = trigger(JobType.systemTest, application, false, "Available change in component"); controller.applications().store(application); } } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus) || nextStatus.isHanging(jobTimeoutLimit())) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; Change change = application.deploying().get(); if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if ( ! lastSuccessfulIs(targetVersion, JobType.stagingTest, application)) return false; if ( previous.type().isProduction() && ! isOnAtLeastProductionVersion(targetVersion, application, previous.type())) return false; if (next != null && next.type().isTest() && lastSuccessfulIs(targetVersion, next.type(), application)) return false; if (next != null && next.type().isProduction() && isOnAtLeastProductionVersion(targetVersion, application, next.type())) return false; return true; } else { if ( ! previous.lastSuccess().isPresent()) return false; if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; return previous.lastSuccess().get().revision().isPresent() && ! previous.lastSuccess().get().revision().equals(next.lastSuccess().get().revision()); } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { applications().lockOrThrow(applicationId, application -> { buildSystem.removeJobs(application.id()); applications().store(application.withDeploying(Optional.empty())); }); } private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), clock.instant(), application.deployVersionFor(jobType, controller), application.deployRevisionFor(jobType, controller), reason); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deploying().isPresent() && application.deploying().get().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof VersionChange && jobType.isProduction() && isOnAtLeastProductionVersion(((VersionChange) application.deploying().get()).version(), application, jobType)) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the current deployed version in the zone given by the job * is newer or equal to the given version. This may be the case even if the production job * in question failed, if the failure happens after deployment. * In that case we should never deploy an earlier version as that may potentially * downgrade production nodes which we are not guaranteed to support, and upgradibng to the current * version is just unnecessary work. */ private boolean isOnAtLeastProductionVersion(Version version, Application application, JobType job) { if ( ! job.isProduction()) return false; Optional<ZoneId> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; Deployment existingDeployment = application.deployments().get(zone.get()); if (existingDeployment == null) return false; return existingDeployment.version().isAfter(version) || existingDeployment.version().equals(version); } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if (application.deploying().get() instanceof Change.ApplicationChange) return true; if (application.deploymentJobs().hasFailures()) return true; if (application.isBlocked(clock.instant())) return true; return false; } private boolean lastSuccessfulIs(Version version, JobType jobType, Application application) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return false; Optional<JobStatus.JobRun> lastSuccessfulStagingRun = status.lastSuccess(); if ( ! lastSuccessfulStagingRun.isPresent()) return false; return lastSuccessfulStagingRun.get().version().equals(version); } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (report.jobType() == JobType.component) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (retryBecauseOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (retryBecauseNewFailure(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); }); } /** Returns whether all production zones listed in deployment spec has this change (or a newer version, if upgrade) */ /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** Find the next step to trigger if any, and triggers it */ public void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange) application.deploying().get()).version(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target) || systemTestStatus.isHanging(jobTimeoutLimit())) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } else { JobStatus componentStatus = application.deploymentJobs().jobStatus().get(JobType.component); if (componentStatus != null && changesAvailable(application, componentStatus, systemTestStatus)) { application = trigger(JobType.systemTest, application, false, "Available change in component"); controller.applications().store(application); } } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus) || nextStatus.isHanging(jobTimeoutLimit())) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; if (next == null) return true; Change change = application.deploying().get(); if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if (next.type().isTest()) { if ( ! lastSuccessfulIs(targetVersion, previous.type(), application)) return false; if (lastSuccessfulIs(targetVersion, next.type(), application)) return false; } else if (next.type().isProduction()) { if ( ! lastSuccessfulIs(targetVersion, JobType.stagingTest, application)) return false; if (previous.type().isProduction() && ! alreadyDeployed(targetVersion, application, previous.type())) return false; if (alreadyDeployed(targetVersion, application, next.type())) return false; } else throw new IllegalStateException("Unclassified type of next job: " + next); return true; } else { if ( ! previous.lastSuccess().isPresent()) return false; if ( ! next.lastSuccess().isPresent()) return true; return previous.lastSuccess().get().revision().isPresent() && ! previous.lastSuccess().get().revision().equals(next.lastSuccess().get().revision()); } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { applications().lockOrThrow(applicationId, application -> { buildSystem.removeJobs(application.id()); applications().store(application.withDeploying(Optional.empty())); }); } private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), clock.instant(), application.deployVersionFor(jobType, controller), application.deployRevisionFor(jobType, controller), reason); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deploying().isPresent() && application.deploying().get().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof VersionChange && jobType.isProduction() && alreadyDeployed(((VersionChange) application.deploying().get()).version(), application, jobType)) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the currently deployed version in the zone for the given production job is newer * than the given version, in which case we should avoid an unsupported downgrade, or if it is the * same version, and was successfully deployed, in which case it is unnecessary to redeploy it. */ private boolean alreadyDeployed(Version version, Application application, JobType job) { if ( ! job.isProduction()) throw new IllegalArgumentException(job + " is not a production job!"); return lastSuccessfulIs(version, job, application) || job.zone(controller.system()) .map(zone -> application.deployments().get(zone)) .map(deployment -> deployment.version().isAfter(version)) .orElse(false); } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if (application.deploying().get() instanceof Change.ApplicationChange) return true; if (application.deploymentJobs().hasFailures()) return true; if (application.isBlocked(clock.instant())) return true; return false; } private boolean lastSuccessfulIs(Version version, JobType jobType, Application application) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return false; Optional<JobStatus.JobRun> lastSuccessfulRun = status.lastSuccess(); if ( ! lastSuccessfulRun.isPresent()) return false; return lastSuccessfulRun.get().version().equals(version); } }
I see this is a feature in the test scenario below, necessary to avoid deadlocks. Perhaps it would be better to differentiate between `isOnNewerVersion` and `isOnAtLeastVersion`, though?
private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; Change change = application.deploying().get(); if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if ( ! lastSuccessfulIs(targetVersion, JobType.stagingTest, application)) return false; if ( previous.type().isProduction() && ! isOnAtLeastProductionVersion(targetVersion, application, previous.type())) return false; if (next != null && next.type().isTest() && lastSuccessfulIs(targetVersion, next.type(), application)) return false; if (next != null && next.type().isProduction() && isOnAtLeastProductionVersion(targetVersion, application, next.type())) return false; return true; } else { if ( ! previous.lastSuccess().isPresent()) return false; if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; return previous.lastSuccess().get().revision().isPresent() && ! previous.lastSuccess().get().revision().equals(next.lastSuccess().get().revision()); } }
return false;
private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; if (next == null) return true; Change change = application.deploying().get(); if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if (next.type().isTest()) { if ( ! lastSuccessfulIs(targetVersion, previous.type(), application)) return false; if (lastSuccessfulIs(targetVersion, next.type(), application)) return false; } else if (next.type().isProduction()) { if ( ! lastSuccessfulIs(targetVersion, JobType.stagingTest, application)) return false; if (previous.type().isProduction() && ! alreadyDeployed(targetVersion, application, previous.type())) return false; if (alreadyDeployed(targetVersion, application, next.type())) return false; } else throw new IllegalStateException("Unclassified type of next job: " + next); return true; } else { if ( ! previous.lastSuccess().isPresent()) return false; if ( ! next.lastSuccess().isPresent()) return true; return previous.lastSuccess().get().revision().isPresent() && ! previous.lastSuccess().get().revision().equals(next.lastSuccess().get().revision()); } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (report.jobType() == JobType.component) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (retryBecauseOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (retryBecauseNewFailure(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); }); } /** Returns whether all production zones listed in deployment spec has this change (or a newer version, if upgrade) */ private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; Change change = application.deploying().get(); for (JobType job : order.jobsFrom(application.deploymentSpec())) { if ( ! job.isProduction()) continue; Optional<ZoneId> zone = job.zone(this.controller.system()); if ( ! zone.isPresent()) continue; Deployment deployment = application.deployments().get(zone.get()); if (deployment == null) return false; if (change instanceof VersionChange) { if (((VersionChange)change).version().isAfter(deployment.version())) return false; } else if (((Change.ApplicationChange)change).revision().isPresent()) { if ( ! ((Change.ApplicationChange)change).revision().get().equals(deployment.revision())) return false; } else { return false; } } return true; } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** Find the next step to trigger if any, and triggers it */ public void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange) application.deploying().get()).version(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target) || systemTestStatus.isHanging(jobTimeoutLimit())) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } else { JobStatus componentStatus = application.deploymentJobs().jobStatus().get(JobType.component); if (changesAvailable(application, componentStatus, systemTestStatus)) { application = trigger(JobType.systemTest, application, false, "Available change in component"); controller.applications().store(application); } } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus) || nextStatus.isHanging(jobTimeoutLimit())) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { applications().lockOrThrow(applicationId, application -> { buildSystem.removeJobs(application.id()); applications().store(application.withDeploying(Optional.empty())); }); } private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), clock.instant(), application.deployVersionFor(jobType, controller), application.deployRevisionFor(jobType, controller), reason); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deploying().isPresent() && application.deploying().get().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof VersionChange && jobType.isProduction() && isOnAtLeastProductionVersion(((VersionChange) application.deploying().get()).version(), application, jobType)) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the current deployed version in the zone given by the job * is newer or equal to the given version. This may be the case even if the production job * in question failed, if the failure happens after deployment. * In that case we should never deploy an earlier version as that may potentially * downgrade production nodes which we are not guaranteed to support, and upgradibng to the current * version is just unnecessary work. */ private boolean isOnAtLeastProductionVersion(Version version, Application application, JobType job) { if ( ! job.isProduction()) return false; Optional<ZoneId> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; Deployment existingDeployment = application.deployments().get(zone.get()); if (existingDeployment == null) return false; return existingDeployment.version().isAfter(version) || existingDeployment.version().equals(version); } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if (application.deploying().get() instanceof Change.ApplicationChange) return true; if (application.deploymentJobs().hasFailures()) return true; if (application.isBlocked(clock.instant())) return true; return false; } private boolean lastSuccessfulIs(Version version, JobType jobType, Application application) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return false; Optional<JobStatus.JobRun> lastSuccessfulStagingRun = status.lastSuccess(); if ( ! lastSuccessfulStagingRun.isPresent()) return false; return lastSuccessfulStagingRun.get().version().equals(version); } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (report.jobType() == JobType.component) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (retryBecauseOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (retryBecauseNewFailure(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); }); } /** Returns whether all production zones listed in deployment spec has this change (or a newer version, if upgrade) */ private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; Change change = application.deploying().get(); for (JobType job : order.jobsFrom(application.deploymentSpec())) { if ( ! job.isProduction()) continue; Optional<ZoneId> zone = job.zone(this.controller.system()); if ( ! zone.isPresent()) continue; Deployment deployment = application.deployments().get(zone.get()); if (deployment == null) return false; if (change instanceof VersionChange) { if (((VersionChange)change).version().isAfter(deployment.version())) return false; } else if (((Change.ApplicationChange)change).revision().isPresent()) { if ( ! ((Change.ApplicationChange)change).revision().get().equals(deployment.revision())) return false; } else { return false; } } return true; } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** Find the next step to trigger if any, and triggers it */ public void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange) application.deploying().get()).version(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target) || systemTestStatus.isHanging(jobTimeoutLimit())) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } else { JobStatus componentStatus = application.deploymentJobs().jobStatus().get(JobType.component); if (componentStatus != null && changesAvailable(application, componentStatus, systemTestStatus)) { application = trigger(JobType.systemTest, application, false, "Available change in component"); controller.applications().store(application); } } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus) || nextStatus.isHanging(jobTimeoutLimit())) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { applications().lockOrThrow(applicationId, application -> { buildSystem.removeJobs(application.id()); applications().store(application.withDeploying(Optional.empty())); }); } private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), clock.instant(), application.deployVersionFor(jobType, controller), application.deployRevisionFor(jobType, controller), reason); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deploying().isPresent() && application.deploying().get().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof VersionChange && jobType.isProduction() && alreadyDeployed(((VersionChange) application.deploying().get()).version(), application, jobType)) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the currently deployed version in the zone for the given production job is newer * than the given version, in which case we should avoid an unsupported downgrade, or if it is the * same version, and was successfully deployed, in which case it is unnecessary to redeploy it. */ private boolean alreadyDeployed(Version version, Application application, JobType job) { if ( ! job.isProduction()) throw new IllegalArgumentException(job + " is not a production job!"); return lastSuccessfulIs(version, job, application) || job.zone(controller.system()) .map(zone -> application.deployments().get(zone)) .map(deployment -> deployment.version().isAfter(version)) .orElse(false); } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if (application.deploying().get() instanceof Change.ApplicationChange) return true; if (application.deploymentJobs().hasFailures()) return true; if (application.isBlocked(clock.instant())) return true; return false; } private boolean lastSuccessfulIs(Version version, JobType jobType, Application application) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return false; Optional<JobStatus.JobRun> lastSuccessfulRun = status.lastSuccess(); if ( ! lastSuccessfulRun.isPresent()) return false; return lastSuccessfulRun.get().version().equals(version); } }
I think you are right (about the last thing). Fixed.
private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; Change change = application.deploying().get(); if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if ( ! lastSuccessfulIs(targetVersion, JobType.stagingTest, application)) return false; if ( previous.type().isProduction() && ! isOnAtLeastProductionVersion(targetVersion, application, previous.type())) return false; if (next != null && next.type().isTest() && lastSuccessfulIs(targetVersion, next.type(), application)) return false; if (next != null && next.type().isProduction() && isOnAtLeastProductionVersion(targetVersion, application, next.type())) return false; return true; } else { if ( ! previous.lastSuccess().isPresent()) return false; if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; return previous.lastSuccess().get().revision().isPresent() && ! previous.lastSuccess().get().revision().equals(next.lastSuccess().get().revision()); } }
return false;
private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; if (next == null) return true; Change change = application.deploying().get(); if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if (next.type().isTest()) { if ( ! lastSuccessfulIs(targetVersion, previous.type(), application)) return false; if (lastSuccessfulIs(targetVersion, next.type(), application)) return false; } else if (next.type().isProduction()) { if ( ! lastSuccessfulIs(targetVersion, JobType.stagingTest, application)) return false; if (previous.type().isProduction() && ! alreadyDeployed(targetVersion, application, previous.type())) return false; if (alreadyDeployed(targetVersion, application, next.type())) return false; } else throw new IllegalStateException("Unclassified type of next job: " + next); return true; } else { if ( ! previous.lastSuccess().isPresent()) return false; if ( ! next.lastSuccess().isPresent()) return true; return previous.lastSuccess().get().revision().isPresent() && ! previous.lastSuccess().get().revision().equals(next.lastSuccess().get().revision()); } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (report.jobType() == JobType.component) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (retryBecauseOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (retryBecauseNewFailure(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); }); } /** Returns whether all production zones listed in deployment spec has this change (or a newer version, if upgrade) */ private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; Change change = application.deploying().get(); for (JobType job : order.jobsFrom(application.deploymentSpec())) { if ( ! job.isProduction()) continue; Optional<ZoneId> zone = job.zone(this.controller.system()); if ( ! zone.isPresent()) continue; Deployment deployment = application.deployments().get(zone.get()); if (deployment == null) return false; if (change instanceof VersionChange) { if (((VersionChange)change).version().isAfter(deployment.version())) return false; } else if (((Change.ApplicationChange)change).revision().isPresent()) { if ( ! ((Change.ApplicationChange)change).revision().get().equals(deployment.revision())) return false; } else { return false; } } return true; } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** Find the next step to trigger if any, and triggers it */ public void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange) application.deploying().get()).version(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target) || systemTestStatus.isHanging(jobTimeoutLimit())) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } else { JobStatus componentStatus = application.deploymentJobs().jobStatus().get(JobType.component); if (changesAvailable(application, componentStatus, systemTestStatus)) { application = trigger(JobType.systemTest, application, false, "Available change in component"); controller.applications().store(application); } } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus) || nextStatus.isHanging(jobTimeoutLimit())) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { applications().lockOrThrow(applicationId, application -> { buildSystem.removeJobs(application.id()); applications().store(application.withDeploying(Optional.empty())); }); } private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), clock.instant(), application.deployVersionFor(jobType, controller), application.deployRevisionFor(jobType, controller), reason); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deploying().isPresent() && application.deploying().get().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof VersionChange && jobType.isProduction() && isOnAtLeastProductionVersion(((VersionChange) application.deploying().get()).version(), application, jobType)) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the current deployed version in the zone given by the job * is newer or equal to the given version. This may be the case even if the production job * in question failed, if the failure happens after deployment. * In that case we should never deploy an earlier version as that may potentially * downgrade production nodes which we are not guaranteed to support, and upgradibng to the current * version is just unnecessary work. */ private boolean isOnAtLeastProductionVersion(Version version, Application application, JobType job) { if ( ! job.isProduction()) return false; Optional<ZoneId> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; Deployment existingDeployment = application.deployments().get(zone.get()); if (existingDeployment == null) return false; return existingDeployment.version().isAfter(version) || existingDeployment.version().equals(version); } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if (application.deploying().get() instanceof Change.ApplicationChange) return true; if (application.deploymentJobs().hasFailures()) return true; if (application.isBlocked(clock.instant())) return true; return false; } private boolean lastSuccessfulIs(Version version, JobType jobType, Application application) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return false; Optional<JobStatus.JobRun> lastSuccessfulStagingRun = status.lastSuccess(); if ( ! lastSuccessfulStagingRun.isPresent()) return false; return lastSuccessfulStagingRun.get().version().equals(version); } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (report.jobType() == JobType.component) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (retryBecauseOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (retryBecauseNewFailure(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); }); } /** Returns whether all production zones listed in deployment spec has this change (or a newer version, if upgrade) */ private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; Change change = application.deploying().get(); for (JobType job : order.jobsFrom(application.deploymentSpec())) { if ( ! job.isProduction()) continue; Optional<ZoneId> zone = job.zone(this.controller.system()); if ( ! zone.isPresent()) continue; Deployment deployment = application.deployments().get(zone.get()); if (deployment == null) return false; if (change instanceof VersionChange) { if (((VersionChange)change).version().isAfter(deployment.version())) return false; } else if (((Change.ApplicationChange)change).revision().isPresent()) { if ( ! ((Change.ApplicationChange)change).revision().get().equals(deployment.revision())) return false; } else { return false; } } return true; } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** Find the next step to trigger if any, and triggers it */ public void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange) application.deploying().get()).version(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target) || systemTestStatus.isHanging(jobTimeoutLimit())) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } else { JobStatus componentStatus = application.deploymentJobs().jobStatus().get(JobType.component); if (componentStatus != null && changesAvailable(application, componentStatus, systemTestStatus)) { application = trigger(JobType.systemTest, application, false, "Available change in component"); controller.applications().store(application); } } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus) || nextStatus.isHanging(jobTimeoutLimit())) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { applications().lockOrThrow(applicationId, application -> { buildSystem.removeJobs(application.id()); applications().store(application.withDeploying(Optional.empty())); }); } private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), clock.instant(), application.deployVersionFor(jobType, controller), application.deployRevisionFor(jobType, controller), reason); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deploying().isPresent() && application.deploying().get().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof VersionChange && jobType.isProduction() && alreadyDeployed(((VersionChange) application.deploying().get()).version(), application, jobType)) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the currently deployed version in the zone for the given production job is newer * than the given version, in which case we should avoid an unsupported downgrade, or if it is the * same version, and was successfully deployed, in which case it is unnecessary to redeploy it. */ private boolean alreadyDeployed(Version version, Application application, JobType job) { if ( ! job.isProduction()) throw new IllegalArgumentException(job + " is not a production job!"); return lastSuccessfulIs(version, job, application) || job.zone(controller.system()) .map(zone -> application.deployments().get(zone)) .map(deployment -> deployment.version().isAfter(version)) .orElse(false); } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if (application.deploying().get() instanceof Change.ApplicationChange) return true; if (application.deploymentJobs().hasFailures()) return true; if (application.isBlocked(clock.instant())) return true; return false; } private boolean lastSuccessfulIs(Version version, JobType jobType, Application application) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return false; Optional<JobStatus.JobRun> lastSuccessfulRun = status.lastSuccess(); if ( ! lastSuccessfulRun.isPresent()) return false; return lastSuccessfulRun.get().version().equals(version); } }
If they fail then won't be on atLeastProductionversion targetVersion, so how do this fail?
private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; Change change = application.deploying().get(); if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if ( ! lastSuccessfulIs(targetVersion, JobType.stagingTest, application)) return false; if ( previous.type().isProduction() && ! isOnAtLeastProductionVersion(targetVersion, application, previous.type())) return false; if (next != null && next.type().isTest() && lastSuccessfulIs(targetVersion, next.type(), application)) return false; if (next != null && next.type().isProduction() && isOnAtLeastProductionVersion(targetVersion, application, next.type())) return false; return true; } else { if ( ! previous.lastSuccess().isPresent()) return false; if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; return previous.lastSuccess().get().revision().isPresent() && ! previous.lastSuccess().get().revision().equals(next.lastSuccess().get().revision()); } }
return false;
private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; if (next == null) return true; Change change = application.deploying().get(); if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if (next.type().isTest()) { if ( ! lastSuccessfulIs(targetVersion, previous.type(), application)) return false; if (lastSuccessfulIs(targetVersion, next.type(), application)) return false; } else if (next.type().isProduction()) { if ( ! lastSuccessfulIs(targetVersion, JobType.stagingTest, application)) return false; if (previous.type().isProduction() && ! alreadyDeployed(targetVersion, application, previous.type())) return false; if (alreadyDeployed(targetVersion, application, next.type())) return false; } else throw new IllegalStateException("Unclassified type of next job: " + next); return true; } else { if ( ! previous.lastSuccess().isPresent()) return false; if ( ! next.lastSuccess().isPresent()) return true; return previous.lastSuccess().get().revision().isPresent() && ! previous.lastSuccess().get().revision().equals(next.lastSuccess().get().revision()); } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (report.jobType() == JobType.component) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (retryBecauseOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (retryBecauseNewFailure(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); }); } /** Returns whether all production zones listed in deployment spec has this change (or a newer version, if upgrade) */ private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; Change change = application.deploying().get(); for (JobType job : order.jobsFrom(application.deploymentSpec())) { if ( ! job.isProduction()) continue; Optional<ZoneId> zone = job.zone(this.controller.system()); if ( ! zone.isPresent()) continue; Deployment deployment = application.deployments().get(zone.get()); if (deployment == null) return false; if (change instanceof VersionChange) { if (((VersionChange)change).version().isAfter(deployment.version())) return false; } else if (((Change.ApplicationChange)change).revision().isPresent()) { if ( ! ((Change.ApplicationChange)change).revision().get().equals(deployment.revision())) return false; } else { return false; } } return true; } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** Find the next step to trigger if any, and triggers it */ public void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange) application.deploying().get()).version(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target) || systemTestStatus.isHanging(jobTimeoutLimit())) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } else { JobStatus componentStatus = application.deploymentJobs().jobStatus().get(JobType.component); if (changesAvailable(application, componentStatus, systemTestStatus)) { application = trigger(JobType.systemTest, application, false, "Available change in component"); controller.applications().store(application); } } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus) || nextStatus.isHanging(jobTimeoutLimit())) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { applications().lockOrThrow(applicationId, application -> { buildSystem.removeJobs(application.id()); applications().store(application.withDeploying(Optional.empty())); }); } private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), clock.instant(), application.deployVersionFor(jobType, controller), application.deployRevisionFor(jobType, controller), reason); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deploying().isPresent() && application.deploying().get().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof VersionChange && jobType.isProduction() && isOnAtLeastProductionVersion(((VersionChange) application.deploying().get()).version(), application, jobType)) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the current deployed version in the zone given by the job * is newer or equal to the given version. This may be the case even if the production job * in question failed, if the failure happens after deployment. * In that case we should never deploy an earlier version as that may potentially * downgrade production nodes which we are not guaranteed to support, and upgradibng to the current * version is just unnecessary work. */ private boolean isOnAtLeastProductionVersion(Version version, Application application, JobType job) { if ( ! job.isProduction()) return false; Optional<ZoneId> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; Deployment existingDeployment = application.deployments().get(zone.get()); if (existingDeployment == null) return false; return existingDeployment.version().isAfter(version) || existingDeployment.version().equals(version); } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if (application.deploying().get() instanceof Change.ApplicationChange) return true; if (application.deploymentJobs().hasFailures()) return true; if (application.isBlocked(clock.instant())) return true; return false; } private boolean lastSuccessfulIs(Version version, JobType jobType, Application application) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return false; Optional<JobStatus.JobRun> lastSuccessfulStagingRun = status.lastSuccess(); if ( ! lastSuccessfulStagingRun.isPresent()) return false; return lastSuccessfulStagingRun.get().version().equals(version); } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (report.jobType() == JobType.component) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (retryBecauseOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (retryBecauseNewFailure(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); }); } /** Returns whether all production zones listed in deployment spec has this change (or a newer version, if upgrade) */ private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; Change change = application.deploying().get(); for (JobType job : order.jobsFrom(application.deploymentSpec())) { if ( ! job.isProduction()) continue; Optional<ZoneId> zone = job.zone(this.controller.system()); if ( ! zone.isPresent()) continue; Deployment deployment = application.deployments().get(zone.get()); if (deployment == null) return false; if (change instanceof VersionChange) { if (((VersionChange)change).version().isAfter(deployment.version())) return false; } else if (((Change.ApplicationChange)change).revision().isPresent()) { if ( ! ((Change.ApplicationChange)change).revision().get().equals(deployment.revision())) return false; } else { return false; } } return true; } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** Find the next step to trigger if any, and triggers it */ public void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange) application.deploying().get()).version(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target) || systemTestStatus.isHanging(jobTimeoutLimit())) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } else { JobStatus componentStatus = application.deploymentJobs().jobStatus().get(JobType.component); if (componentStatus != null && changesAvailable(application, componentStatus, systemTestStatus)) { application = trigger(JobType.systemTest, application, false, "Available change in component"); controller.applications().store(application); } } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus) || nextStatus.isHanging(jobTimeoutLimit())) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { applications().lockOrThrow(applicationId, application -> { buildSystem.removeJobs(application.id()); applications().store(application.withDeploying(Optional.empty())); }); } private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), clock.instant(), application.deployVersionFor(jobType, controller), application.deployRevisionFor(jobType, controller), reason); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deploying().isPresent() && application.deploying().get().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof VersionChange && jobType.isProduction() && alreadyDeployed(((VersionChange) application.deploying().get()).version(), application, jobType)) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the currently deployed version in the zone for the given production job is newer * than the given version, in which case we should avoid an unsupported downgrade, or if it is the * same version, and was successfully deployed, in which case it is unnecessary to redeploy it. */ private boolean alreadyDeployed(Version version, Application application, JobType job) { if ( ! job.isProduction()) throw new IllegalArgumentException(job + " is not a production job!"); return lastSuccessfulIs(version, job, application) || job.zone(controller.system()) .map(zone -> application.deployments().get(zone)) .map(deployment -> deployment.version().isAfter(version)) .orElse(false); } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if (application.deploying().get() instanceof Change.ApplicationChange) return true; if (application.deploymentJobs().hasFailures()) return true; if (application.isBlocked(clock.instant())) return true; return false; } private boolean lastSuccessfulIs(Version version, JobType jobType, Application application) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return false; Optional<JobStatus.JobRun> lastSuccessfulRun = status.lastSuccess(); if ( ! lastSuccessfulRun.isPresent()) return false; return lastSuccessfulRun.get().version().equals(version); } }
The doc is good enough now, after you changed it.
private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; Change change = application.deploying().get(); for (JobType job : order.jobsFrom(application.deploymentSpec())) { if ( ! job.isProduction()) continue; Optional<ZoneId> zone = job.zone(this.controller.system()); if ( ! zone.isPresent()) continue; Deployment deployment = application.deployments().get(zone.get()); if (deployment == null) return false; if (change instanceof VersionChange) { if (((VersionChange)change).version().isAfter(deployment.version())) return false; } else if (((Change.ApplicationChange)change).revision().isPresent()) { if ( ! deployment.revision().equals(((Change.ApplicationChange)change).revision().get())) return false; } else { return false; } } return true; }
if ( ! application.deploying().isPresent()) return true;
private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; Change change = application.deploying().get(); for (JobType job : order.jobsFrom(application.deploymentSpec())) { if ( ! job.isProduction()) continue; Optional<ZoneId> zone = job.zone(this.controller.system()); if ( ! zone.isPresent()) continue; Deployment deployment = application.deployments().get(zone.get()); if (deployment == null) return false; if (change instanceof VersionChange) { if (((VersionChange)change).version().isAfter(deployment.version())) return false; } else if (((Change.ApplicationChange)change).revision().isPresent()) { if ( ! ((Change.ApplicationChange)change).revision().get().equals(deployment.revision())) return false; } else { return false; } } return true; }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (report.jobType() == JobType.component) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (retryBecauseOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (retryBecauseNewFailure(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); }); } /** Returns whether all production zones listed in deployment spec last were successful on the currently deploying change. */ /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** Find the next step to trigger if any, and triggers it */ public void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange) application.deploying().get()).version(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target) || systemTestStatus.isHanging(jobTimeoutLimit())) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } else { JobStatus componentStatus = application.deploymentJobs().jobStatus().get(JobType.component); if (changesAvailable(application, componentStatus, systemTestStatus)) { application = trigger(JobType.systemTest, application, false, "Available change in component"); controller.applications().store(application); } } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus) || nextStatus.isHanging(jobTimeoutLimit())) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; Change change = application.deploying().get(); if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if ( ! lastSuccessfulIs(targetVersion, JobType.stagingTest, application)) return false; if ( previous.type().isProduction() && ! isOnAtLeastProductionVersion(targetVersion, application, previous.type())) return false; if (next != null && next.type().isTest() && lastSuccessfulIs(targetVersion, next.type(), application)) return false; if (next != null && next.type().isProduction() && isOnAtLeastProductionVersion(targetVersion, application, next.type())) return false; return true; } else { if ( ! previous.lastSuccess().isPresent()) return false; if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; return previous.lastSuccess().get().revision().isPresent() && ! previous.lastSuccess().get().revision().equals(next.lastSuccess().get().revision()); } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { applications().lockOrThrow(applicationId, application -> { buildSystem.removeJobs(application.id()); applications().store(application.withDeploying(Optional.empty())); }); } private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), clock.instant(), application.deployVersionFor(jobType, controller), application.deployRevisionFor(jobType, controller), reason); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deploying().isPresent() && application.deploying().get().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof VersionChange && jobType.isProduction() && isOnAtLeastProductionVersion(((VersionChange) application.deploying().get()).version(), application, jobType)) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the current deployed version in the zone given by the job * is newer or equal to the given version. This may be the case even if the production job * in question failed, if the failure happens after deployment. * In that case we should never deploy an earlier version as that may potentially * downgrade production nodes which we are not guaranteed to support, and upgradibng to the current * version is just unnecessary work. */ private boolean isOnAtLeastProductionVersion(Version version, Application application, JobType job) { if ( ! job.isProduction()) return false; Optional<ZoneId> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; Deployment existingDeployment = application.deployments().get(zone.get()); if (existingDeployment == null) return false; return existingDeployment.version().isAfter(version) || existingDeployment.version().equals(version); } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if (application.deploying().get() instanceof Change.ApplicationChange) return true; if (application.deploymentJobs().hasFailures()) return true; if (application.isBlocked(clock.instant())) return true; return false; } private boolean lastSuccessfulIs(Version version, JobType jobType, Application application) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return false; Optional<JobStatus.JobRun> lastSuccessfulStagingRun = status.lastSuccess(); if ( ! lastSuccessfulStagingRun.isPresent()) return false; return lastSuccessfulStagingRun.get().version().equals(version); } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (report.jobType() == JobType.component) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (retryBecauseOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (retryBecauseNewFailure(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); }); } /** Returns whether all production zones listed in deployment spec has this change (or a newer version, if upgrade) */ /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** Find the next step to trigger if any, and triggers it */ public void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange) application.deploying().get()).version(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target) || systemTestStatus.isHanging(jobTimeoutLimit())) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } else { JobStatus componentStatus = application.deploymentJobs().jobStatus().get(JobType.component); if (componentStatus != null && changesAvailable(application, componentStatus, systemTestStatus)) { application = trigger(JobType.systemTest, application, false, "Available change in component"); controller.applications().store(application); } } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus) || nextStatus.isHanging(jobTimeoutLimit())) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; if (next == null) return true; Change change = application.deploying().get(); if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if (next.type().isTest()) { if ( ! lastSuccessfulIs(targetVersion, previous.type(), application)) return false; if (lastSuccessfulIs(targetVersion, next.type(), application)) return false; } else if (next.type().isProduction()) { if ( ! lastSuccessfulIs(targetVersion, JobType.stagingTest, application)) return false; if (previous.type().isProduction() && ! alreadyDeployed(targetVersion, application, previous.type())) return false; if (alreadyDeployed(targetVersion, application, next.type())) return false; } else throw new IllegalStateException("Unclassified type of next job: " + next); return true; } else { if ( ! previous.lastSuccess().isPresent()) return false; if ( ! next.lastSuccess().isPresent()) return true; return previous.lastSuccess().get().revision().isPresent() && ! previous.lastSuccess().get().revision().equals(next.lastSuccess().get().revision()); } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { applications().lockOrThrow(applicationId, application -> { buildSystem.removeJobs(application.id()); applications().store(application.withDeploying(Optional.empty())); }); } private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), clock.instant(), application.deployVersionFor(jobType, controller), application.deployRevisionFor(jobType, controller), reason); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deploying().isPresent() && application.deploying().get().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof VersionChange && jobType.isProduction() && alreadyDeployed(((VersionChange) application.deploying().get()).version(), application, jobType)) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the currently deployed version in the zone for the given production job is newer * than the given version, in which case we should avoid an unsupported downgrade, or if it is the * same version, and was successfully deployed, in which case it is unnecessary to redeploy it. */ private boolean alreadyDeployed(Version version, Application application, JobType job) { if ( ! job.isProduction()) throw new IllegalArgumentException(job + " is not a production job!"); return lastSuccessfulIs(version, job, application) || job.zone(controller.system()) .map(zone -> application.deployments().get(zone)) .map(deployment -> deployment.version().isAfter(version)) .orElse(false); } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if (application.deploying().get() instanceof Change.ApplicationChange) return true; if (application.deploymentJobs().hasFailures()) return true; if (application.isBlocked(clock.instant())) return true; return false; } private boolean lastSuccessfulIs(Version version, JobType jobType, Application application) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return false; Optional<JobStatus.JobRun> lastSuccessfulRun = status.lastSuccess(); if ( ! lastSuccessfulRun.isPresent()) return false; return lastSuccessfulRun.get().version().equals(version); } }
ApplicationApiHandler.java#795. I guess you can avoid it in unit tests, though.
private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; Change change = application.deploying().get(); for (JobType job : order.jobsFrom(application.deploymentSpec())) { if ( ! job.isProduction()) continue; Optional<ZoneId> zone = job.zone(this.controller.system()); if ( ! zone.isPresent()) continue; Deployment deployment = application.deployments().get(zone.get()); if (deployment == null) return false; if (change instanceof VersionChange) { if (((VersionChange)change).version().isAfter(deployment.version())) return false; } else if (((Change.ApplicationChange)change).revision().isPresent()) { if ( ! deployment.revision().equals(((Change.ApplicationChange)change).revision().get())) return false; } else { return false; } } return true; }
if ( ! zone.isPresent()) continue;
private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; Change change = application.deploying().get(); for (JobType job : order.jobsFrom(application.deploymentSpec())) { if ( ! job.isProduction()) continue; Optional<ZoneId> zone = job.zone(this.controller.system()); if ( ! zone.isPresent()) continue; Deployment deployment = application.deployments().get(zone.get()); if (deployment == null) return false; if (change instanceof VersionChange) { if (((VersionChange)change).version().isAfter(deployment.version())) return false; } else if (((Change.ApplicationChange)change).revision().isPresent()) { if ( ! ((Change.ApplicationChange)change).revision().get().equals(deployment.revision())) return false; } else { return false; } } return true; }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (report.jobType() == JobType.component) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (retryBecauseOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (retryBecauseNewFailure(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); }); } /** Returns whether all production zones listed in deployment spec last were successful on the currently deploying change. */ /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** Find the next step to trigger if any, and triggers it */ public void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange) application.deploying().get()).version(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target) || systemTestStatus.isHanging(jobTimeoutLimit())) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } else { JobStatus componentStatus = application.deploymentJobs().jobStatus().get(JobType.component); if (changesAvailable(application, componentStatus, systemTestStatus)) { application = trigger(JobType.systemTest, application, false, "Available change in component"); controller.applications().store(application); } } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus) || nextStatus.isHanging(jobTimeoutLimit())) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; Change change = application.deploying().get(); if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if ( ! lastSuccessfulIs(targetVersion, JobType.stagingTest, application)) return false; if ( previous.type().isProduction() && ! isOnAtLeastProductionVersion(targetVersion, application, previous.type())) return false; if (next != null && next.type().isTest() && lastSuccessfulIs(targetVersion, next.type(), application)) return false; if (next != null && next.type().isProduction() && isOnAtLeastProductionVersion(targetVersion, application, next.type())) return false; return true; } else { if ( ! previous.lastSuccess().isPresent()) return false; if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; return previous.lastSuccess().get().revision().isPresent() && ! previous.lastSuccess().get().revision().equals(next.lastSuccess().get().revision()); } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { applications().lockOrThrow(applicationId, application -> { buildSystem.removeJobs(application.id()); applications().store(application.withDeploying(Optional.empty())); }); } private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), clock.instant(), application.deployVersionFor(jobType, controller), application.deployRevisionFor(jobType, controller), reason); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deploying().isPresent() && application.deploying().get().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof VersionChange && jobType.isProduction() && isOnAtLeastProductionVersion(((VersionChange) application.deploying().get()).version(), application, jobType)) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the current deployed version in the zone given by the job * is newer or equal to the given version. This may be the case even if the production job * in question failed, if the failure happens after deployment. * In that case we should never deploy an earlier version as that may potentially * downgrade production nodes which we are not guaranteed to support, and upgradibng to the current * version is just unnecessary work. */ private boolean isOnAtLeastProductionVersion(Version version, Application application, JobType job) { if ( ! job.isProduction()) return false; Optional<ZoneId> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; Deployment existingDeployment = application.deployments().get(zone.get()); if (existingDeployment == null) return false; return existingDeployment.version().isAfter(version) || existingDeployment.version().equals(version); } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if (application.deploying().get() instanceof Change.ApplicationChange) return true; if (application.deploymentJobs().hasFailures()) return true; if (application.isBlocked(clock.instant())) return true; return false; } private boolean lastSuccessfulIs(Version version, JobType jobType, Application application) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return false; Optional<JobStatus.JobRun> lastSuccessfulStagingRun = status.lastSuccess(); if ( ! lastSuccessfulStagingRun.isPresent()) return false; return lastSuccessfulStagingRun.get().version().equals(version); } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (report.jobType() == JobType.component) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (retryBecauseOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (retryBecauseNewFailure(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); }); } /** Returns whether all production zones listed in deployment spec has this change (or a newer version, if upgrade) */ /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** Find the next step to trigger if any, and triggers it */ public void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange) application.deploying().get()).version(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target) || systemTestStatus.isHanging(jobTimeoutLimit())) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } else { JobStatus componentStatus = application.deploymentJobs().jobStatus().get(JobType.component); if (componentStatus != null && changesAvailable(application, componentStatus, systemTestStatus)) { application = trigger(JobType.systemTest, application, false, "Available change in component"); controller.applications().store(application); } } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus) || nextStatus.isHanging(jobTimeoutLimit())) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; if (next == null) return true; Change change = application.deploying().get(); if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if (next.type().isTest()) { if ( ! lastSuccessfulIs(targetVersion, previous.type(), application)) return false; if (lastSuccessfulIs(targetVersion, next.type(), application)) return false; } else if (next.type().isProduction()) { if ( ! lastSuccessfulIs(targetVersion, JobType.stagingTest, application)) return false; if (previous.type().isProduction() && ! alreadyDeployed(targetVersion, application, previous.type())) return false; if (alreadyDeployed(targetVersion, application, next.type())) return false; } else throw new IllegalStateException("Unclassified type of next job: " + next); return true; } else { if ( ! previous.lastSuccess().isPresent()) return false; if ( ! next.lastSuccess().isPresent()) return true; return previous.lastSuccess().get().revision().isPresent() && ! previous.lastSuccess().get().revision().equals(next.lastSuccess().get().revision()); } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { applications().lockOrThrow(applicationId, application -> { buildSystem.removeJobs(application.id()); applications().store(application.withDeploying(Optional.empty())); }); } private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), clock.instant(), application.deployVersionFor(jobType, controller), application.deployRevisionFor(jobType, controller), reason); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deploying().isPresent() && application.deploying().get().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof VersionChange && jobType.isProduction() && alreadyDeployed(((VersionChange) application.deploying().get()).version(), application, jobType)) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the currently deployed version in the zone for the given production job is newer * than the given version, in which case we should avoid an unsupported downgrade, or if it is the * same version, and was successfully deployed, in which case it is unnecessary to redeploy it. */ private boolean alreadyDeployed(Version version, Application application, JobType job) { if ( ! job.isProduction()) throw new IllegalArgumentException(job + " is not a production job!"); return lastSuccessfulIs(version, job, application) || job.zone(controller.system()) .map(zone -> application.deployments().get(zone)) .map(deployment -> deployment.version().isAfter(version)) .orElse(false); } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if (application.deploying().get() instanceof Change.ApplicationChange) return true; if (application.deploymentJobs().hasFailures()) return true; if (application.isBlocked(clock.instant())) return true; return false; } private boolean lastSuccessfulIs(Version version, JobType jobType, Application application) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return false; Optional<JobStatus.JobRun> lastSuccessfulRun = status.lastSuccess(); if ( ! lastSuccessfulRun.isPresent()) return false; return lastSuccessfulRun.get().version().equals(version); } }
If the deployment is successful, but not the rest of the job. We still want to differentiate between the rest of the job failing or not?
private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; Change change = application.deploying().get(); if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if ( ! lastSuccessfulIs(targetVersion, JobType.stagingTest, application)) return false; if ( previous.type().isProduction() && ! isOnAtLeastProductionVersion(targetVersion, application, previous.type())) return false; if (next != null && next.type().isTest() && lastSuccessfulIs(targetVersion, next.type(), application)) return false; if (next != null && next.type().isProduction() && isOnAtLeastProductionVersion(targetVersion, application, next.type())) return false; return true; } else { if ( ! previous.lastSuccess().isPresent()) return false; if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; return previous.lastSuccess().get().revision().isPresent() && ! previous.lastSuccess().get().revision().equals(next.lastSuccess().get().revision()); } }
return false;
private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; if (next == null) return true; Change change = application.deploying().get(); if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if (next.type().isTest()) { if ( ! lastSuccessfulIs(targetVersion, previous.type(), application)) return false; if (lastSuccessfulIs(targetVersion, next.type(), application)) return false; } else if (next.type().isProduction()) { if ( ! lastSuccessfulIs(targetVersion, JobType.stagingTest, application)) return false; if (previous.type().isProduction() && ! alreadyDeployed(targetVersion, application, previous.type())) return false; if (alreadyDeployed(targetVersion, application, next.type())) return false; } else throw new IllegalStateException("Unclassified type of next job: " + next); return true; } else { if ( ! previous.lastSuccess().isPresent()) return false; if ( ! next.lastSuccess().isPresent()) return true; return previous.lastSuccess().get().revision().isPresent() && ! previous.lastSuccess().get().revision().equals(next.lastSuccess().get().revision()); } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (report.jobType() == JobType.component) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (retryBecauseOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (retryBecauseNewFailure(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); }); } /** Returns whether all production zones listed in deployment spec has this change (or a newer version, if upgrade) */ private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; Change change = application.deploying().get(); for (JobType job : order.jobsFrom(application.deploymentSpec())) { if ( ! job.isProduction()) continue; Optional<ZoneId> zone = job.zone(this.controller.system()); if ( ! zone.isPresent()) continue; Deployment deployment = application.deployments().get(zone.get()); if (deployment == null) return false; if (change instanceof VersionChange) { if (((VersionChange)change).version().isAfter(deployment.version())) return false; } else if (((Change.ApplicationChange)change).revision().isPresent()) { if ( ! ((Change.ApplicationChange)change).revision().get().equals(deployment.revision())) return false; } else { return false; } } return true; } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** Find the next step to trigger if any, and triggers it */ public void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange) application.deploying().get()).version(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target) || systemTestStatus.isHanging(jobTimeoutLimit())) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } else { JobStatus componentStatus = application.deploymentJobs().jobStatus().get(JobType.component); if (changesAvailable(application, componentStatus, systemTestStatus)) { application = trigger(JobType.systemTest, application, false, "Available change in component"); controller.applications().store(application); } } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus) || nextStatus.isHanging(jobTimeoutLimit())) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { applications().lockOrThrow(applicationId, application -> { buildSystem.removeJobs(application.id()); applications().store(application.withDeploying(Optional.empty())); }); } private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), clock.instant(), application.deployVersionFor(jobType, controller), application.deployRevisionFor(jobType, controller), reason); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deploying().isPresent() && application.deploying().get().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof VersionChange && jobType.isProduction() && isOnAtLeastProductionVersion(((VersionChange) application.deploying().get()).version(), application, jobType)) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the current deployed version in the zone given by the job * is newer or equal to the given version. This may be the case even if the production job * in question failed, if the failure happens after deployment. * In that case we should never deploy an earlier version as that may potentially * downgrade production nodes which we are not guaranteed to support, and upgradibng to the current * version is just unnecessary work. */ private boolean isOnAtLeastProductionVersion(Version version, Application application, JobType job) { if ( ! job.isProduction()) return false; Optional<ZoneId> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; Deployment existingDeployment = application.deployments().get(zone.get()); if (existingDeployment == null) return false; return existingDeployment.version().isAfter(version) || existingDeployment.version().equals(version); } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if (application.deploying().get() instanceof Change.ApplicationChange) return true; if (application.deploymentJobs().hasFailures()) return true; if (application.isBlocked(clock.instant())) return true; return false; } private boolean lastSuccessfulIs(Version version, JobType jobType, Application application) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return false; Optional<JobStatus.JobRun> lastSuccessfulStagingRun = status.lastSuccess(); if ( ! lastSuccessfulStagingRun.isPresent()) return false; return lastSuccessfulStagingRun.get().version().equals(version); } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (report.jobType() == JobType.component) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (retryBecauseOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (retryBecauseNewFailure(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); }); } /** Returns whether all production zones listed in deployment spec has this change (or a newer version, if upgrade) */ private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; Change change = application.deploying().get(); for (JobType job : order.jobsFrom(application.deploymentSpec())) { if ( ! job.isProduction()) continue; Optional<ZoneId> zone = job.zone(this.controller.system()); if ( ! zone.isPresent()) continue; Deployment deployment = application.deployments().get(zone.get()); if (deployment == null) return false; if (change instanceof VersionChange) { if (((VersionChange)change).version().isAfter(deployment.version())) return false; } else if (((Change.ApplicationChange)change).revision().isPresent()) { if ( ! ((Change.ApplicationChange)change).revision().get().equals(deployment.revision())) return false; } else { return false; } } return true; } /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** Find the next step to trigger if any, and triggers it */ public void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange) application.deploying().get()).version(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target) || systemTestStatus.isHanging(jobTimeoutLimit())) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } else { JobStatus componentStatus = application.deploymentJobs().jobStatus().get(JobType.component); if (componentStatus != null && changesAvailable(application, componentStatus, systemTestStatus)) { application = trigger(JobType.systemTest, application, false, "Available change in component"); controller.applications().store(application); } } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus) || nextStatus.isHanging(jobTimeoutLimit())) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { applications().lockOrThrow(applicationId, application -> { buildSystem.removeJobs(application.id()); applications().store(application.withDeploying(Optional.empty())); }); } private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), clock.instant(), application.deployVersionFor(jobType, controller), application.deployRevisionFor(jobType, controller), reason); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deploying().isPresent() && application.deploying().get().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof VersionChange && jobType.isProduction() && alreadyDeployed(((VersionChange) application.deploying().get()).version(), application, jobType)) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the currently deployed version in the zone for the given production job is newer * than the given version, in which case we should avoid an unsupported downgrade, or if it is the * same version, and was successfully deployed, in which case it is unnecessary to redeploy it. */ private boolean alreadyDeployed(Version version, Application application, JobType job) { if ( ! job.isProduction()) throw new IllegalArgumentException(job + " is not a production job!"); return lastSuccessfulIs(version, job, application) || job.zone(controller.system()) .map(zone -> application.deployments().get(zone)) .map(deployment -> deployment.version().isAfter(version)) .orElse(false); } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if (application.deploying().get() instanceof Change.ApplicationChange) return true; if (application.deploymentJobs().hasFailures()) return true; if (application.isBlocked(clock.instant())) return true; return false; } private boolean lastSuccessfulIs(Version version, JobType jobType, Application application) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return false; Optional<JobStatus.JobRun> lastSuccessfulRun = status.lastSuccess(); if ( ! lastSuccessfulRun.isPresent()) return false; return lastSuccessfulRun.get().version().equals(version); } }
I don't see what you mean. I actually need to handle the Optional. Anyway, this isn't important ...
private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; Change change = application.deploying().get(); for (JobType job : order.jobsFrom(application.deploymentSpec())) { if ( ! job.isProduction()) continue; Optional<ZoneId> zone = job.zone(this.controller.system()); if ( ! zone.isPresent()) continue; Deployment deployment = application.deployments().get(zone.get()); if (deployment == null) return false; if (change instanceof VersionChange) { if (((VersionChange)change).version().isAfter(deployment.version())) return false; } else if (((Change.ApplicationChange)change).revision().isPresent()) { if ( ! deployment.revision().equals(((Change.ApplicationChange)change).revision().get())) return false; } else { return false; } } return true; }
if ( ! zone.isPresent()) continue;
private boolean deploymentComplete(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; Change change = application.deploying().get(); for (JobType job : order.jobsFrom(application.deploymentSpec())) { if ( ! job.isProduction()) continue; Optional<ZoneId> zone = job.zone(this.controller.system()); if ( ! zone.isPresent()) continue; Deployment deployment = application.deployments().get(zone.get()); if (deployment == null) return false; if (change instanceof VersionChange) { if (((VersionChange)change).version().isAfter(deployment.version())) return false; } else if (((Change.ApplicationChange)change).revision().isPresent()) { if ( ! ((Change.ApplicationChange)change).revision().get().equals(deployment.revision())) return false; } else { return false; } } return true; }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (report.jobType() == JobType.component) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (retryBecauseOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (retryBecauseNewFailure(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); }); } /** Returns whether all production zones listed in deployment spec last were successful on the currently deploying change. */ /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** Find the next step to trigger if any, and triggers it */ public void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange) application.deploying().get()).version(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target) || systemTestStatus.isHanging(jobTimeoutLimit())) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } else { JobStatus componentStatus = application.deploymentJobs().jobStatus().get(JobType.component); if (changesAvailable(application, componentStatus, systemTestStatus)) { application = trigger(JobType.systemTest, application, false, "Available change in component"); controller.applications().store(application); } } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus) || nextStatus.isHanging(jobTimeoutLimit())) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; Change change = application.deploying().get(); if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if ( ! lastSuccessfulIs(targetVersion, JobType.stagingTest, application)) return false; if ( previous.type().isProduction() && ! isOnAtLeastProductionVersion(targetVersion, application, previous.type())) return false; if (next != null && next.type().isTest() && lastSuccessfulIs(targetVersion, next.type(), application)) return false; if (next != null && next.type().isProduction() && isOnAtLeastProductionVersion(targetVersion, application, next.type())) return false; return true; } else { if ( ! previous.lastSuccess().isPresent()) return false; if (next == null) return true; if ( ! next.lastSuccess().isPresent()) return true; return previous.lastSuccess().get().revision().isPresent() && ! previous.lastSuccess().get().revision().equals(next.lastSuccess().get().revision()); } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { applications().lockOrThrow(applicationId, application -> { buildSystem.removeJobs(application.id()); applications().store(application.withDeploying(Optional.empty())); }); } private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), clock.instant(), application.deployVersionFor(jobType, controller), application.deployRevisionFor(jobType, controller), reason); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deploying().isPresent() && application.deploying().get().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof VersionChange && jobType.isProduction() && isOnAtLeastProductionVersion(((VersionChange) application.deploying().get()).version(), application, jobType)) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the current deployed version in the zone given by the job * is newer or equal to the given version. This may be the case even if the production job * in question failed, if the failure happens after deployment. * In that case we should never deploy an earlier version as that may potentially * downgrade production nodes which we are not guaranteed to support, and upgradibng to the current * version is just unnecessary work. */ private boolean isOnAtLeastProductionVersion(Version version, Application application, JobType job) { if ( ! job.isProduction()) return false; Optional<ZoneId> zone = job.zone(controller.system()); if ( ! zone.isPresent()) return false; Deployment existingDeployment = application.deployments().get(zone.get()); if (existingDeployment == null) return false; return existingDeployment.version().isAfter(version) || existingDeployment.version().equals(version); } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if (application.deploying().get() instanceof Change.ApplicationChange) return true; if (application.deploymentJobs().hasFailures()) return true; if (application.isBlocked(clock.instant())) return true; return false; } private boolean lastSuccessfulIs(Version version, JobType jobType, Application application) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return false; Optional<JobStatus.JobRun> lastSuccessfulStagingRun = status.lastSuccess(); if ( ! lastSuccessfulStagingRun.isPresent()) return false; return lastSuccessfulStagingRun.get().version().equals(version); } }
class DeploymentTrigger { /** The max duration a job may run before we consider it dead/hanging */ private final Duration jobTimeout; private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildSystem buildSystem; private final DeploymentOrder order; public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) { Objects.requireNonNull(controller,"controller cannot be null"); Objects.requireNonNull(curator,"curator cannot be null"); Objects.requireNonNull(clock,"clock cannot be null"); this.controller = controller; this.clock = clock; this.buildSystem = new PolledBuildSystem(controller, curator); this.order = new DeploymentOrder(controller); this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1); } /** Returns the time in the past before which jobs are at this moment considered unresponsive */ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); } public BuildSystem buildSystem() { return buildSystem; } public DeploymentOrder deploymentOrder() { return order; } /** * Called each time a job completes (successfully or not) to cause triggering of one or more follow-up jobs * (which may possibly the same job once over). * * @param report information about the job that just completed */ public void triggerFromCompletion(JobReport report) { applications().lockOrThrow(report.applicationId(), application -> { application = application.withJobCompletion(report, clock.instant(), controller); if (report.success()) { if (report.jobType() == JobType.component) { if (acceptNewRevisionNow(application)) { if ( ! ( application.deploying().isPresent() && (application.deploying().get() instanceof Change.VersionChange))) application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown())); } else { applications().store(application.withOutstandingChange(true)); return; } } else if (deploymentComplete(application)) { application = application.withDeploying(Optional.empty()); } } if (report.success()) application = trigger(order.nextAfter(report.jobType(), application), application, report.jobType().jobName() + " completed"); else if (retryBecauseOutOfCapacity(application, report.jobType())) application = trigger(report.jobType(), application, true, "Retrying on out of capacity"); else if (retryBecauseNewFailure(application, report.jobType())) application = trigger(report.jobType(), application, false, "Immediate retry on failure"); applications().store(application); }); } /** Returns whether all production zones listed in deployment spec has this change (or a newer version, if upgrade) */ /** * Find jobs that can and should run but are currently not. */ public void triggerReadyJobs() { ApplicationList applications = ApplicationList.from(applications().asList()); applications = applications.notPullRequest(); for (Application application : applications.asList()) applications().lockIfPresent(application.id(), this::triggerReadyJobs); } /** Find the next step to trigger if any, and triggers it */ public void triggerReadyJobs(LockedApplication application) { if ( ! application.deploying().isPresent()) return; List<JobType> jobs = order.jobsFrom(application.deploymentSpec()); if ( ! jobs.isEmpty() && jobs.get(0).equals(JobType.systemTest) ) { JobStatus systemTestStatus = application.deploymentJobs().jobStatus().get(JobType.systemTest); if (application.deploying().get() instanceof Change.VersionChange) { Version target = ((Change.VersionChange) application.deploying().get()).version(); if (systemTestStatus == null || ! systemTestStatus.lastTriggered().isPresent() || ! systemTestStatus.isSuccess() || ! systemTestStatus.lastTriggered().get().version().equals(target) || systemTestStatus.isHanging(jobTimeoutLimit())) { application = trigger(JobType.systemTest, application, false, "Upgrade to " + target); controller.applications().store(application); } } else { JobStatus componentStatus = application.deploymentJobs().jobStatus().get(JobType.component); if (componentStatus != null && changesAvailable(application, componentStatus, systemTestStatus)) { application = trigger(JobType.systemTest, application, false, "Available change in component"); controller.applications().store(application); } } } for (JobType jobType : jobs) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null) continue; if (jobStatus.isRunning(jobTimeoutLimit())) continue; List<JobType> nextToTrigger = new ArrayList<>(); for (JobType nextJobType : order.nextAfter(jobType, application)) { JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType); if (changesAvailable(application, jobStatus, nextStatus) || nextStatus.isHanging(jobTimeoutLimit())) nextToTrigger.add(nextJobType); } application = trigger(nextToTrigger, application, "Available change in " + jobType.jobName()); controller.applications().store(application); } } /** * Returns true if the previous job has completed successfully with a revision and/or version which is * newer (different) than the one last completed successfully in next */ private boolean changesAvailable(Application application, JobStatus previous, JobStatus next) { if ( ! application.deploying().isPresent()) return false; if (next == null) return true; Change change = application.deploying().get(); if (change instanceof Change.VersionChange) { Version targetVersion = ((Change.VersionChange)change).version(); if (next.type().isTest()) { if ( ! lastSuccessfulIs(targetVersion, previous.type(), application)) return false; if (lastSuccessfulIs(targetVersion, next.type(), application)) return false; } else if (next.type().isProduction()) { if ( ! lastSuccessfulIs(targetVersion, JobType.stagingTest, application)) return false; if (previous.type().isProduction() && ! alreadyDeployed(targetVersion, application, previous.type())) return false; if (alreadyDeployed(targetVersion, application, next.type())) return false; } else throw new IllegalStateException("Unclassified type of next job: " + next); return true; } else { if ( ! previous.lastSuccess().isPresent()) return false; if ( ! next.lastSuccess().isPresent()) return true; return previous.lastSuccess().get().revision().isPresent() && ! previous.lastSuccess().get().revision().equals(next.lastSuccess().get().revision()); } } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already have an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.deploying().get() + " is already in progress"); application = application.withDeploying(Optional.of(change)); if (change instanceof Change.ApplicationChange) application = application.withOutstandingChange(false); application = trigger(JobType.systemTest, application, false, (change instanceof Change.VersionChange ? "Upgrading to " + ((Change.VersionChange)change).version() : "Deploying " + change)); applications().store(application); }); } /** * Cancels any ongoing upgrade of the given application * * @param applicationId the application to trigger */ public void cancelChange(ApplicationId applicationId) { applications().lockOrThrow(applicationId, application -> { buildSystem.removeJobs(application.id()); applications().store(application.withDeploying(Optional.empty())); }); } private ApplicationController applications() { return controller.applications(); } /** Retry immediately only if this job just started failing. Otherwise retry periodically */ private boolean retryBecauseNewFailure(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); return (jobStatus != null && jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofSeconds(10)))); } /** Decide whether to retry due to capacity restrictions */ private boolean retryBecauseOutOfCapacity(Application application, JobType jobType) { JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType); if (jobStatus == null || ! jobStatus.jobError().equals(Optional.of(JobError.outOfCapacity))) return false; return jobStatus.firstFailing().get().at().isAfter(clock.instant().minus(Duration.ofMinutes(15))); } /** Returns whether the given job type should be triggered according to deployment spec */ private boolean hasJob(JobType jobType, Application application) { if ( ! jobType.isProduction()) return true; return application.deploymentSpec().includes(jobType.environment(), jobType.region(controller.system())); } /** * Trigger a job for an application * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to put the job at the front of the build system queue (or the back) * @param reason describes why the job is triggered * @return the application in the triggered state, which *must* be stored by the caller */ private LockedApplication trigger(JobType jobType, LockedApplication application, boolean first, String reason) { if (jobType.isProduction() && isRunningProductionJob(application)) return application; return triggerAllowParallel(jobType, application, first, false, reason); } private LockedApplication trigger(List<JobType> jobs, LockedApplication application, String reason) { if (jobs.stream().anyMatch(JobType::isProduction) && isRunningProductionJob(application)) return application; for (JobType job : jobs) application = triggerAllowParallel(job, application, false, false, reason); return application; } /** * Trigger a job for an application, if allowed * * @param jobType the type of the job to trigger, or null to trigger nothing * @param application the application to trigger the job for * @param first whether to trigger the job before other jobs * @param force true to disable checks which should normally prevent this triggering from happening * @param reason describes why the job is triggered * @return the application in the triggered state, if actually triggered. This *must* be stored by the caller */ public LockedApplication triggerAllowParallel(JobType jobType, LockedApplication application, boolean first, boolean force, String reason) { if (jobType == null) return application; if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) { log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType, application, reason)); return application; } if ( ! force && ! allowedTriggering(jobType, application)) return application; log.info(String.format("Triggering %s for %s, %s: %s", jobType, application, application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"), reason)); buildSystem.addJob(application.id(), jobType, first); return application.withJobTriggering(jobType, application.deploying(), clock.instant(), application.deployVersionFor(jobType, controller), application.deployRevisionFor(jobType, controller), reason); } /** Returns true if the given proposed job triggering should be effected */ private boolean allowedTriggering(JobType jobType, LockedApplication application) { if (jobType.isProduction() && application.deploying().isPresent() && application.deploying().get().blockedBy(application.deploymentSpec(), clock.instant())) return false; if (application.deploying().isPresent() && application.deploying().get() instanceof VersionChange && jobType.isProduction() && alreadyDeployed(((VersionChange) application.deploying().get()).version(), application, jobType)) return false; if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) return false; if ( ! hasJob(jobType, application)) return false; if ( ! application.deploymentJobs().projectId().isPresent()) return false; return true; } private boolean isRunningProductionJob(Application application) { return JobList.from(application) .production() .running(jobTimeoutLimit()) .anyMatch(); } /** * Returns whether the currently deployed version in the zone for the given production job is newer * than the given version, in which case we should avoid an unsupported downgrade, or if it is the * same version, and was successfully deployed, in which case it is unnecessary to redeploy it. */ private boolean alreadyDeployed(Version version, Application application, JobType job) { if ( ! job.isProduction()) throw new IllegalArgumentException(job + " is not a production job!"); return lastSuccessfulIs(version, job, application) || job.zone(controller.system()) .map(zone -> application.deployments().get(zone)) .map(deployment -> deployment.version().isAfter(version)) .orElse(false); } private boolean acceptNewRevisionNow(LockedApplication application) { if ( ! application.deploying().isPresent()) return true; if (application.deploying().get() instanceof Change.ApplicationChange) return true; if (application.deploymentJobs().hasFailures()) return true; if (application.isBlocked(clock.instant())) return true; return false; } private boolean lastSuccessfulIs(Version version, JobType jobType, Application application) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return false; Optional<JobStatus.JobRun> lastSuccessfulRun = status.lastSuccess(); if ( ! lastSuccessfulRun.isPresent()) return false; return lastSuccessfulRun.get().version().equals(version); } }
How does this work for potentially large tensors?
private ExpressionNode transformFeature(ReferenceNode feature) { try { if ( ! feature.getName().equals("tensorflow")) return feature; if (feature.getArguments().isEmpty()) throw new IllegalArgumentException("A tensorflow node must take an argument pointing to " + "the tensorflow model directory under [application]/models"); ImportResult result = tensorFlowImporter.importModel(asString(feature.getArguments().expressions().get(0))); ImportResult.Signature signature = chooseOrDefault("signatures", result.signatures(), optionalArgument(1, feature.getArguments())); String output = chooseOrDefault("outputs", signature.outputs(), optionalArgument(2, feature.getArguments())); result.constants().forEach((k, v) -> profile.addConstantTensor(k, new TensorValue(v))); return result.expressions().get(output).getRoot(); } catch (IllegalArgumentException e) { throw new IllegalArgumentException("Could not import tensorflow model from " + feature, e); } }
result.constants().forEach((k, v) -> profile.addConstantTensor(k, new TensorValue(v)));
private ExpressionNode transformFeature(ReferenceNode feature) { try { if ( ! feature.getName().equals("tensorflow")) return feature; if (feature.getArguments().isEmpty()) throw new IllegalArgumentException("A tensorflow node must take an argument pointing to " + "the tensorflow model directory under [application]/models"); ImportResult result = tensorFlowImporter.importModel(asString(feature.getArguments().expressions().get(0))); ImportResult.Signature signature = chooseOrDefault("signatures", result.signatures(), optionalArgument(1, feature.getArguments())); String output = chooseOrDefault("outputs", signature.outputs(), optionalArgument(2, feature.getArguments())); result.constants().forEach((k, v) -> profile.addConstantTensor(k, new TensorValue(v))); return result.expressions().get(output).getRoot(); } catch (IllegalArgumentException e) { throw new IllegalArgumentException("Could not import tensorflow model from " + feature, e); } }
class TensorFlowFeatureConverter extends ExpressionTransformer { private final TensorFlowImporter tensorFlowImporter = new TensorFlowImporter(); private final RankProfile profile; public TensorFlowFeatureConverter(RankProfile profile) { this.profile = profile; } @Override public ExpressionNode transform(ExpressionNode node) { if (node instanceof ReferenceNode) return transformFeature((ReferenceNode) node); else if (node instanceof CompositeNode) return super.transformChildren((CompositeNode) node); else return node; } /** * Returns the specified, existing map value, or the only map value if no key is specified. * Throws IllegalArgumentException in all other cases. */ private <T> T chooseOrDefault(String valueDescription, Map<String, T> map, Optional<String> key) { if ( ! key.isPresent()) { if (map.size() == 0) throw new IllegalArgumentException("No " + valueDescription + " are present"); if (map.size() > 1) throw new IllegalArgumentException("Model has multiple " + valueDescription + ", but no " + valueDescription + " argument is specified"); return map.values().stream().findFirst().get(); } else { T value = map.get(key.get()); if (value == null) throw new IllegalArgumentException("Model does not have the specified " + valueDescription + " '" + key.get() + "'"); return value; } } private Optional<String> optionalArgument(int argumentIndex, Arguments arguments) { if (argumentIndex >= arguments.expressions().size()) return Optional.empty(); return Optional.of(asString(arguments.expressions().get(argumentIndex))); } private String asString(ExpressionNode node) { if ( ! (node instanceof ConstantNode)) throw new IllegalArgumentException("Expected a constant string as tensorflow argument, but got '" + node); return stripQuotes(((ConstantNode)node).sourceString()); } private String stripQuotes(String s) { if ( ! isQuoteSign(s.codePointAt(0))) return s; if ( ! isQuoteSign(s.codePointAt(s.length() - 1 ))) throw new IllegalArgumentException("tensorflow argument [" + s + "] is missing endquote"); return s.substring(1, s.length()-1); } private boolean isQuoteSign(int c) { return c == '\'' || c == '"'; } }
class TensorFlowFeatureConverter extends ExpressionTransformer { private final TensorFlowImporter tensorFlowImporter = new TensorFlowImporter(); private final RankProfile profile; public TensorFlowFeatureConverter(RankProfile profile) { this.profile = profile; } @Override public ExpressionNode transform(ExpressionNode node) { if (node instanceof ReferenceNode) return transformFeature((ReferenceNode) node); else if (node instanceof CompositeNode) return super.transformChildren((CompositeNode) node); else return node; } /** * Returns the specified, existing map value, or the only map value if no key is specified. * Throws IllegalArgumentException in all other cases. */ private <T> T chooseOrDefault(String valueDescription, Map<String, T> map, Optional<String> key) { if ( ! key.isPresent()) { if (map.size() == 0) throw new IllegalArgumentException("No " + valueDescription + " are present"); if (map.size() > 1) throw new IllegalArgumentException("Model has multiple " + valueDescription + ", but no " + valueDescription + " argument is specified"); return map.values().stream().findFirst().get(); } else { T value = map.get(key.get()); if (value == null) throw new IllegalArgumentException("Model does not have the specified " + valueDescription + " '" + key.get() + "'"); return value; } } private Optional<String> optionalArgument(int argumentIndex, Arguments arguments) { if (argumentIndex >= arguments.expressions().size()) return Optional.empty(); return Optional.of(asString(arguments.expressions().get(argumentIndex))); } private String asString(ExpressionNode node) { if ( ! (node instanceof ConstantNode)) throw new IllegalArgumentException("Expected a constant string as tensorflow argument, but got '" + node); return stripQuotes(((ConstantNode)node).sourceString()); } private String stripQuotes(String s) { if ( ! isQuoteSign(s.codePointAt(0))) return s; if ( ! isQuoteSign(s.codePointAt(s.length() - 1 ))) throw new IllegalArgumentException("tensorflow argument [" + s + "] is missing endquote"); return s.substring(1, s.length()-1); } private boolean isQuoteSign(int c) { return c == '\'' || c == '"'; } }
We could consider validating that macros for the placeholders actually are present. Or issue a warning if not found?
private ExpressionNode transformFeature(ReferenceNode feature) { try { if ( ! feature.getName().equals("tensorflow")) return feature; if (feature.getArguments().isEmpty()) throw new IllegalArgumentException("A tensorflow node must take an argument pointing to " + "the tensorflow model directory under [application]/models"); ImportResult result = tensorFlowImporter.importModel(asString(feature.getArguments().expressions().get(0))); ImportResult.Signature signature = chooseOrDefault("signatures", result.signatures(), optionalArgument(1, feature.getArguments())); String output = chooseOrDefault("outputs", signature.outputs(), optionalArgument(2, feature.getArguments())); result.constants().forEach((k, v) -> profile.addConstantTensor(k, new TensorValue(v))); return result.expressions().get(output).getRoot(); } catch (IllegalArgumentException e) { throw new IllegalArgumentException("Could not import tensorflow model from " + feature, e); } }
}
private ExpressionNode transformFeature(ReferenceNode feature) { try { if ( ! feature.getName().equals("tensorflow")) return feature; if (feature.getArguments().isEmpty()) throw new IllegalArgumentException("A tensorflow node must take an argument pointing to " + "the tensorflow model directory under [application]/models"); ImportResult result = tensorFlowImporter.importModel(asString(feature.getArguments().expressions().get(0))); ImportResult.Signature signature = chooseOrDefault("signatures", result.signatures(), optionalArgument(1, feature.getArguments())); String output = chooseOrDefault("outputs", signature.outputs(), optionalArgument(2, feature.getArguments())); result.constants().forEach((k, v) -> profile.addConstantTensor(k, new TensorValue(v))); return result.expressions().get(output).getRoot(); } catch (IllegalArgumentException e) { throw new IllegalArgumentException("Could not import tensorflow model from " + feature, e); } }
class TensorFlowFeatureConverter extends ExpressionTransformer { private final TensorFlowImporter tensorFlowImporter = new TensorFlowImporter(); private final RankProfile profile; public TensorFlowFeatureConverter(RankProfile profile) { this.profile = profile; } @Override public ExpressionNode transform(ExpressionNode node) { if (node instanceof ReferenceNode) return transformFeature((ReferenceNode) node); else if (node instanceof CompositeNode) return super.transformChildren((CompositeNode) node); else return node; } /** * Returns the specified, existing map value, or the only map value if no key is specified. * Throws IllegalArgumentException in all other cases. */ private <T> T chooseOrDefault(String valueDescription, Map<String, T> map, Optional<String> key) { if ( ! key.isPresent()) { if (map.size() == 0) throw new IllegalArgumentException("No " + valueDescription + " are present"); if (map.size() > 1) throw new IllegalArgumentException("Model has multiple " + valueDescription + ", but no " + valueDescription + " argument is specified"); return map.values().stream().findFirst().get(); } else { T value = map.get(key.get()); if (value == null) throw new IllegalArgumentException("Model does not have the specified " + valueDescription + " '" + key.get() + "'"); return value; } } private Optional<String> optionalArgument(int argumentIndex, Arguments arguments) { if (argumentIndex >= arguments.expressions().size()) return Optional.empty(); return Optional.of(asString(arguments.expressions().get(argumentIndex))); } private String asString(ExpressionNode node) { if ( ! (node instanceof ConstantNode)) throw new IllegalArgumentException("Expected a constant string as tensorflow argument, but got '" + node); return stripQuotes(((ConstantNode)node).sourceString()); } private String stripQuotes(String s) { if ( ! isQuoteSign(s.codePointAt(0))) return s; if ( ! isQuoteSign(s.codePointAt(s.length() - 1 ))) throw new IllegalArgumentException("tensorflow argument [" + s + "] is missing endquote"); return s.substring(1, s.length()-1); } private boolean isQuoteSign(int c) { return c == '\'' || c == '"'; } }
class TensorFlowFeatureConverter extends ExpressionTransformer { private final TensorFlowImporter tensorFlowImporter = new TensorFlowImporter(); private final RankProfile profile; public TensorFlowFeatureConverter(RankProfile profile) { this.profile = profile; } @Override public ExpressionNode transform(ExpressionNode node) { if (node instanceof ReferenceNode) return transformFeature((ReferenceNode) node); else if (node instanceof CompositeNode) return super.transformChildren((CompositeNode) node); else return node; } /** * Returns the specified, existing map value, or the only map value if no key is specified. * Throws IllegalArgumentException in all other cases. */ private <T> T chooseOrDefault(String valueDescription, Map<String, T> map, Optional<String> key) { if ( ! key.isPresent()) { if (map.size() == 0) throw new IllegalArgumentException("No " + valueDescription + " are present"); if (map.size() > 1) throw new IllegalArgumentException("Model has multiple " + valueDescription + ", but no " + valueDescription + " argument is specified"); return map.values().stream().findFirst().get(); } else { T value = map.get(key.get()); if (value == null) throw new IllegalArgumentException("Model does not have the specified " + valueDescription + " '" + key.get() + "'"); return value; } } private Optional<String> optionalArgument(int argumentIndex, Arguments arguments) { if (argumentIndex >= arguments.expressions().size()) return Optional.empty(); return Optional.of(asString(arguments.expressions().get(argumentIndex))); } private String asString(ExpressionNode node) { if ( ! (node instanceof ConstantNode)) throw new IllegalArgumentException("Expected a constant string as tensorflow argument, but got '" + node); return stripQuotes(((ConstantNode)node).sourceString()); } private String stripQuotes(String s) { if ( ! isQuoteSign(s.codePointAt(0))) return s; if ( ! isQuoteSign(s.codePointAt(s.length() - 1 ))) throw new IllegalArgumentException("tensorflow argument [" + s + "] is missing endquote"); return s.substring(1, s.length()-1); } private boolean isQuoteSign(int c) { return c == '\'' || c == '"'; } }
Ok, can't watch that sad-face any more. Done!
public URI historicBadge(ApplicationId id, JobType type, int historyLength) { List<Run> runs = new ArrayList<>(runs(id, type).values()); Optional<Run> lastCompleted = runs.isEmpty() ? Optional.empty() : runs.size() == 1 || runs.get(runs.size() - 1).hasEnded() ? Optional.of(runs.get(runs.size() - 1)) : Optional.of(runs.get(runs.size() - 2)); return badges.historic(id, lastCompleted, runs.subList(Math.max(0, runs.size() - historyLength), runs.size())); }
Optional<Run> lastCompleted = runs.isEmpty() ? Optional.empty()
public URI historicBadge(ApplicationId id, JobType type, int historyLength) { List<Run> runs = new ArrayList<>(runs(id, type).values()); Run lastCompleted = null; if (runs.size() == 1) lastCompleted = runs.get(0); if (runs.size() > 1 && ! lastCompleted.hasEnded()) lastCompleted = runs.get(runs.size() - 2); return badges.historic(id, Optional.ofNullable(lastCompleted), runs.subList(Math.max(0, runs.size() - historyLength), runs.size())); }
class JobController { private static final int historyLength = 256; private final Controller controller; private final CuratorDb curator; private final BufferedLogStore logs; private final TesterCloud cloud; private final Badges badges; public JobController(Controller controller, RunDataStore runDataStore, TesterCloud testerCloud) { this.controller = controller; this.curator = controller.curator(); this.logs = new BufferedLogStore(curator, runDataStore); this.cloud = testerCloud; this.badges = new Badges(controller.zoneRegistry().badgeUrl()); } public TesterCloud cloud() { return cloud; } public int historyLength() { return historyLength; } /** Rewrite all job data with the newest format. */ public void updateStorage() { for (ApplicationId id : applications()) for (JobType type : jobs(id)) { locked(id, type, runs -> { curator.readLastRun(id, type).ifPresent(curator::writeLastRun); }); } } /** Returns all entries currently logged for the given run. */ public Optional<RunLog> details(RunId id) { return details(id, -1); } /** Returns the logged entries for the given run, which are after the given id threshold. */ public Optional<RunLog> details(RunId id, long after) { try (Lock __ = curator.lock(id.application(), id.type())) { Run run = runs(id.application(), id.type()).get(id); if (run == null) return Optional.empty(); return active(id).isPresent() ? Optional.of(logs.readActive(id.application(), id.type(), after)) : logs.readFinished(id, after); } } /** Stores the given log records for the given run and step. */ public void log(RunId id, Step step, Level level, List<String> messages) { locked(id, __ -> { List<LogEntry> entries = messages.stream() .map(message -> new LogEntry(0, controller.clock().millis(), LogEntry.typeOf(level), message)) .collect(toList()); logs.append(id.application(), id.type(), step, entries); return __; }); } /** Stores the given log record for the given run and step. */ public void log(RunId id, Step step, Level level, String message) { log(id, step, level, Collections.singletonList(message)); } /** Fetches any new test log entries, and records the id of the last of these, for continuation. */ public void updateTestLog(RunId id) { locked(id, run -> { if ( ! run.readySteps().contains(endTests)) return run; Optional<URI> testerEndpoint = testerEndpoint(id); if ( ! testerEndpoint.isPresent()) return run; List<LogEntry> entries = cloud.getLog(testerEndpoint.get(), run.lastTestLogEntry()); if (entries.isEmpty()) return run; logs.append(id.application(), id.type(), endTests, entries); return run.with(entries.stream().mapToLong(LogEntry::id).max().getAsLong()); }); } /** Returns a list of all application which have registered. */ public List<ApplicationId> applications() { return copyOf(controller.applications().asList().stream() .filter(application -> application.deploymentJobs().deployedInternally()) .map(Application::id) .iterator()); } /** Returns all job types which have been run for the given application. */ public List<JobType> jobs(ApplicationId id) { return copyOf(Stream.of(JobType.values()) .filter(type -> last(id, type).isPresent()) .iterator()); } /** Returns an immutable map of all known runs for the given application and job type. */ public Map<RunId, Run> runs(ApplicationId id, JobType type) { SortedMap<RunId, Run> runs = curator.readHistoricRuns(id, type); last(id, type).ifPresent(run -> runs.putIfAbsent(run.id(), run)); return ImmutableMap.copyOf(runs); } /** Returns the run with the given id, if it exists. */ public Optional<Run> run(RunId id) { return runs(id.application(), id.type()).values().stream() .filter(run -> run.id().equals(id)) .findAny(); } /** Returns the last run of the given type, for the given application, if one has been run. */ public Optional<Run> last(ApplicationId id, JobType type) { return curator.readLastRun(id, type); } /** Returns the run with the given id, provided it is still active. */ public Optional<Run> active(RunId id) { return last(id.application(), id.type()) .filter(run -> ! run.hasEnded()) .filter(run -> run.id().equals(id)); } /** Returns a list of all active runs. */ public List<Run> active() { return copyOf(applications().stream() .flatMap(id -> Stream.of(JobType.values()) .map(type -> last(id, type)) .filter(Optional::isPresent).map(Optional::get) .filter(run -> ! run.hasEnded())) .iterator()); } /** Changes the status of the given step, for the given run, provided it is still active. */ public void update(RunId id, RunStatus status, LockedStep step) { locked(id, run -> run.with(status, step)); } /** Changes the status of the given run to inactive, and stores it as a historic run. */ public void finish(RunId id) { locked(id, run -> { Run finishedRun = run.finished(controller.clock().instant()); locked(id.application(), id.type(), runs -> { runs.put(run.id(), finishedRun); long last = id.number(); Iterator<RunId> ids = runs.keySet().iterator(); for (RunId old = ids.next(); old.number() <= last - historyLength; old = ids.next()) { logs.delete(old); ids.remove(); } }); logs.flush(id); return finishedRun; }); } /** Marks the given run as aborted; no further normal steps will run, but run-always steps will try to succeed. */ public void abort(RunId id) { locked(id, run -> run.aborted()); } /** * Accepts and stores a new application package and test jar pair under a generated application version key. */ public ApplicationVersion submit(ApplicationId id, SourceRevision revision, String authorEmail, long projectId, byte[] packageBytes, byte[] testPackageBytes) { AtomicReference<ApplicationVersion> version = new AtomicReference<>(); controller.applications().lockOrThrow(id, application -> { if ( ! application.get().deploymentJobs().deployedInternally()) { application.get().deployments().values().stream() .map(Deployment::applicationVersion) .distinct() .forEach(appVersion -> { byte[] content = controller.applications().artifacts().getApplicationPackage(application.get().id(), appVersion.id()); controller.applications().applicationStore().put(application.get().id(), appVersion, content); }); } long run = nextBuild(id); version.set(ApplicationVersion.from(revision, run, authorEmail)); controller.applications().applicationStore().put(id, version.get(), packageBytes); controller.applications().applicationStore().put(TesterId.of(id), version.get(), testPackageBytes); prunePackages(id); controller.applications().storeWithUpdatedConfig(application.withBuiltInternally(true), new ApplicationPackage(packageBytes)); controller.applications().deploymentTrigger().notifyOfCompletion(DeploymentJobs.JobReport.ofSubmission(id, projectId, version.get())); }); return version.get(); } /** Orders a run of the given type, or throws an IllegalStateException if that job type is already running. */ public void start(ApplicationId id, JobType type, Versions versions) { controller.applications().lockIfPresent(id, application -> { if ( ! application.get().deploymentJobs().deployedInternally()) throw new IllegalArgumentException(id + " is not built here!"); locked(id, type, __ -> { Optional<Run> last = last(id, type); if (last.flatMap(run -> active(run.id())).isPresent()) throw new IllegalStateException("Can not start " + type + " for " + id + "; it is already running!"); RunId newId = new RunId(id, type, last.map(run -> run.id().number()).orElse(0L) + 1); curator.writeLastRun(Run.initial(newId, versions, controller.clock().instant())); }); }); } /** Unregisters the given application and makes all associated data eligible for garbage collection. */ public void unregister(ApplicationId id) { controller.applications().lockIfPresent(id, application -> { controller.applications().store(application.withBuiltInternally(false)); jobs(id).forEach(type -> last(id, type).ifPresent(last -> abort(last.id()))); }); } /** Deletes run data, packages and tester deployments for applications which are unknown, or no longer built internally. */ public void collectGarbage() { Set<ApplicationId> applicationsToBuild = new HashSet<>(applications()); curator.applicationsWithJobs().stream() .filter(id -> ! applicationsToBuild.contains(id)) .forEach(id -> { try { TesterId tester = TesterId.of(id); for (JobType type : jobs(id)) locked(id, type, deactivateTester, __ -> { try (Lock ___ = curator.lock(id, type)) { deactivateTester(tester, type); curator.deleteRunData(id, type); logs.delete(id); } }); } catch (TimeoutException e) { return; } curator.deleteRunData(id); }); } public void deactivateTester(TesterId id, JobType type) { try { controller.configServer().deactivate(new DeploymentId(id.id(), type.zone(controller.system()))); } catch (NoInstanceException ignored) { } } /** Returns a URI which points at a badge showing historic status of given length for the given job type for the given application. */ /** Returns a URI which points at a badge showing current status for all jobs for the given application. */ public URI overviewBadge(ApplicationId id) { DeploymentSteps steps = new DeploymentSteps(controller.applications().require(id).deploymentSpec(), controller::system); return badges.overview(id, steps.jobs().stream() .map(type -> last(id, type)) .filter(Optional::isPresent).map(Optional::get) .collect(toList())); } /** Returns a URI of the tester endpoint retrieved from the routing generator, provided it matches an expected form. */ Optional<URI> testerEndpoint(RunId id) { ApplicationId tester = id.tester().id(); return controller.applications().getDeploymentEndpoints(new DeploymentId(tester, id.type().zone(controller.system()))) .flatMap(uris -> uris.stream().findAny()); } private long nextBuild(ApplicationId id) { return 1 + controller.applications().require(id).deploymentJobs() .statusOf(JobType.component) .flatMap(JobStatus::lastCompleted) .map(JobStatus.JobRun::id) .orElse(0L); } private void prunePackages(ApplicationId id) { controller.applications().lockIfPresent(id, application -> { application.get().productionDeployments().values().stream() .map(Deployment::applicationVersion) .min(Comparator.comparingLong(applicationVersion -> applicationVersion.buildNumber().getAsLong())) .ifPresent(oldestDeployed -> { controller.applications().applicationStore().prune(id, oldestDeployed); controller.applications().applicationStore().prune(TesterId.of(id), oldestDeployed); }); }); } /** Locks and modifies the list of historic runs for the given application and job type. */ private void locked(ApplicationId id, JobType type, Consumer<SortedMap<RunId, Run>> modifications) { try (Lock __ = curator.lock(id, type)) { SortedMap<RunId, Run> runs = curator.readHistoricRuns(id, type); modifications.accept(runs); curator.writeHistoricRuns(id, type, runs.values()); } } /** Locks and modifies the run with the given id, provided it is still active. */ private void locked(RunId id, UnaryOperator<Run> modifications) { try (Lock __ = curator.lock(id.application(), id.type())) { active(id).ifPresent(run -> { run = modifications.apply(run); curator.writeLastRun(run); }); } } /** Locks the given step and checks none of its prerequisites are running, then performs the given actions. */ public void locked(ApplicationId id, JobType type, Step step, Consumer<LockedStep> action) throws TimeoutException { try (Lock lock = curator.lock(id, type, step)) { for (Step prerequisite : step.prerequisites()) try (Lock __ = curator.lock(id, type, prerequisite)) { ; } action.accept(new LockedStep(lock, step)); } } }
class JobController { private static final int historyLength = 256; private final Controller controller; private final CuratorDb curator; private final BufferedLogStore logs; private final TesterCloud cloud; private final Badges badges; public JobController(Controller controller, RunDataStore runDataStore, TesterCloud testerCloud) { this.controller = controller; this.curator = controller.curator(); this.logs = new BufferedLogStore(curator, runDataStore); this.cloud = testerCloud; this.badges = new Badges(controller.zoneRegistry().badgeUrl()); } public TesterCloud cloud() { return cloud; } public int historyLength() { return historyLength; } /** Rewrite all job data with the newest format. */ public void updateStorage() { for (ApplicationId id : applications()) for (JobType type : jobs(id)) { locked(id, type, runs -> { curator.readLastRun(id, type).ifPresent(curator::writeLastRun); }); } } /** Returns all entries currently logged for the given run. */ public Optional<RunLog> details(RunId id) { return details(id, -1); } /** Returns the logged entries for the given run, which are after the given id threshold. */ public Optional<RunLog> details(RunId id, long after) { try (Lock __ = curator.lock(id.application(), id.type())) { Run run = runs(id.application(), id.type()).get(id); if (run == null) return Optional.empty(); return active(id).isPresent() ? Optional.of(logs.readActive(id.application(), id.type(), after)) : logs.readFinished(id, after); } } /** Stores the given log records for the given run and step. */ public void log(RunId id, Step step, Level level, List<String> messages) { locked(id, __ -> { List<LogEntry> entries = messages.stream() .map(message -> new LogEntry(0, controller.clock().millis(), LogEntry.typeOf(level), message)) .collect(toList()); logs.append(id.application(), id.type(), step, entries); return __; }); } /** Stores the given log record for the given run and step. */ public void log(RunId id, Step step, Level level, String message) { log(id, step, level, Collections.singletonList(message)); } /** Fetches any new test log entries, and records the id of the last of these, for continuation. */ public void updateTestLog(RunId id) { locked(id, run -> { if ( ! run.readySteps().contains(endTests)) return run; Optional<URI> testerEndpoint = testerEndpoint(id); if ( ! testerEndpoint.isPresent()) return run; List<LogEntry> entries = cloud.getLog(testerEndpoint.get(), run.lastTestLogEntry()); if (entries.isEmpty()) return run; logs.append(id.application(), id.type(), endTests, entries); return run.with(entries.stream().mapToLong(LogEntry::id).max().getAsLong()); }); } /** Returns a list of all application which have registered. */ public List<ApplicationId> applications() { return copyOf(controller.applications().asList().stream() .filter(application -> application.deploymentJobs().deployedInternally()) .map(Application::id) .iterator()); } /** Returns all job types which have been run for the given application. */ public List<JobType> jobs(ApplicationId id) { return copyOf(Stream.of(JobType.values()) .filter(type -> last(id, type).isPresent()) .iterator()); } /** Returns an immutable map of all known runs for the given application and job type. */ public Map<RunId, Run> runs(ApplicationId id, JobType type) { SortedMap<RunId, Run> runs = curator.readHistoricRuns(id, type); last(id, type).ifPresent(run -> runs.putIfAbsent(run.id(), run)); return ImmutableMap.copyOf(runs); } /** Returns the run with the given id, if it exists. */ public Optional<Run> run(RunId id) { return runs(id.application(), id.type()).values().stream() .filter(run -> run.id().equals(id)) .findAny(); } /** Returns the last run of the given type, for the given application, if one has been run. */ public Optional<Run> last(ApplicationId id, JobType type) { return curator.readLastRun(id, type); } /** Returns the run with the given id, provided it is still active. */ public Optional<Run> active(RunId id) { return last(id.application(), id.type()) .filter(run -> ! run.hasEnded()) .filter(run -> run.id().equals(id)); } /** Returns a list of all active runs. */ public List<Run> active() { return copyOf(applications().stream() .flatMap(id -> Stream.of(JobType.values()) .map(type -> last(id, type)) .filter(Optional::isPresent).map(Optional::get) .filter(run -> ! run.hasEnded())) .iterator()); } /** Changes the status of the given step, for the given run, provided it is still active. */ public void update(RunId id, RunStatus status, LockedStep step) { locked(id, run -> run.with(status, step)); } /** Changes the status of the given run to inactive, and stores it as a historic run. */ public void finish(RunId id) { locked(id, run -> { Run finishedRun = run.finished(controller.clock().instant()); locked(id.application(), id.type(), runs -> { runs.put(run.id(), finishedRun); long last = id.number(); Iterator<RunId> ids = runs.keySet().iterator(); for (RunId old = ids.next(); old.number() <= last - historyLength; old = ids.next()) { logs.delete(old); ids.remove(); } }); logs.flush(id); return finishedRun; }); } /** Marks the given run as aborted; no further normal steps will run, but run-always steps will try to succeed. */ public void abort(RunId id) { locked(id, run -> run.aborted()); } /** * Accepts and stores a new application package and test jar pair under a generated application version key. */ public ApplicationVersion submit(ApplicationId id, SourceRevision revision, String authorEmail, long projectId, byte[] packageBytes, byte[] testPackageBytes) { AtomicReference<ApplicationVersion> version = new AtomicReference<>(); controller.applications().lockOrThrow(id, application -> { if ( ! application.get().deploymentJobs().deployedInternally()) { application.get().deployments().values().stream() .map(Deployment::applicationVersion) .distinct() .forEach(appVersion -> { byte[] content = controller.applications().artifacts().getApplicationPackage(application.get().id(), appVersion.id()); controller.applications().applicationStore().put(application.get().id(), appVersion, content); }); } long run = nextBuild(id); version.set(ApplicationVersion.from(revision, run, authorEmail)); controller.applications().applicationStore().put(id, version.get(), packageBytes); controller.applications().applicationStore().put(TesterId.of(id), version.get(), testPackageBytes); prunePackages(id); controller.applications().storeWithUpdatedConfig(application.withBuiltInternally(true), new ApplicationPackage(packageBytes)); controller.applications().deploymentTrigger().notifyOfCompletion(DeploymentJobs.JobReport.ofSubmission(id, projectId, version.get())); }); return version.get(); } /** Orders a run of the given type, or throws an IllegalStateException if that job type is already running. */ public void start(ApplicationId id, JobType type, Versions versions) { controller.applications().lockIfPresent(id, application -> { if ( ! application.get().deploymentJobs().deployedInternally()) throw new IllegalArgumentException(id + " is not built here!"); locked(id, type, __ -> { Optional<Run> last = last(id, type); if (last.flatMap(run -> active(run.id())).isPresent()) throw new IllegalStateException("Can not start " + type + " for " + id + "; it is already running!"); RunId newId = new RunId(id, type, last.map(run -> run.id().number()).orElse(0L) + 1); curator.writeLastRun(Run.initial(newId, versions, controller.clock().instant())); }); }); } /** Unregisters the given application and makes all associated data eligible for garbage collection. */ public void unregister(ApplicationId id) { controller.applications().lockIfPresent(id, application -> { controller.applications().store(application.withBuiltInternally(false)); jobs(id).forEach(type -> last(id, type).ifPresent(last -> abort(last.id()))); }); } /** Deletes run data, packages and tester deployments for applications which are unknown, or no longer built internally. */ public void collectGarbage() { Set<ApplicationId> applicationsToBuild = new HashSet<>(applications()); curator.applicationsWithJobs().stream() .filter(id -> ! applicationsToBuild.contains(id)) .forEach(id -> { try { TesterId tester = TesterId.of(id); for (JobType type : jobs(id)) locked(id, type, deactivateTester, __ -> { try (Lock ___ = curator.lock(id, type)) { deactivateTester(tester, type); curator.deleteRunData(id, type); logs.delete(id); } }); } catch (TimeoutException e) { return; } curator.deleteRunData(id); }); } public void deactivateTester(TesterId id, JobType type) { try { controller.configServer().deactivate(new DeploymentId(id.id(), type.zone(controller.system()))); } catch (NoInstanceException ignored) { } } /** Returns a URI which points at a badge showing historic status of given length for the given job type for the given application. */ /** Returns a URI which points at a badge showing current status for all jobs for the given application. */ public URI overviewBadge(ApplicationId id) { DeploymentSteps steps = new DeploymentSteps(controller.applications().require(id).deploymentSpec(), controller::system); return badges.overview(id, steps.jobs().stream() .map(type -> last(id, type)) .filter(Optional::isPresent).map(Optional::get) .collect(toList())); } /** Returns a URI of the tester endpoint retrieved from the routing generator, provided it matches an expected form. */ Optional<URI> testerEndpoint(RunId id) { ApplicationId tester = id.tester().id(); return controller.applications().getDeploymentEndpoints(new DeploymentId(tester, id.type().zone(controller.system()))) .flatMap(uris -> uris.stream().findAny()); } private long nextBuild(ApplicationId id) { return 1 + controller.applications().require(id).deploymentJobs() .statusOf(JobType.component) .flatMap(JobStatus::lastCompleted) .map(JobStatus.JobRun::id) .orElse(0L); } private void prunePackages(ApplicationId id) { controller.applications().lockIfPresent(id, application -> { application.get().productionDeployments().values().stream() .map(Deployment::applicationVersion) .min(Comparator.comparingLong(applicationVersion -> applicationVersion.buildNumber().getAsLong())) .ifPresent(oldestDeployed -> { controller.applications().applicationStore().prune(id, oldestDeployed); controller.applications().applicationStore().prune(TesterId.of(id), oldestDeployed); }); }); } /** Locks and modifies the list of historic runs for the given application and job type. */ private void locked(ApplicationId id, JobType type, Consumer<SortedMap<RunId, Run>> modifications) { try (Lock __ = curator.lock(id, type)) { SortedMap<RunId, Run> runs = curator.readHistoricRuns(id, type); modifications.accept(runs); curator.writeHistoricRuns(id, type, runs.values()); } } /** Locks and modifies the run with the given id, provided it is still active. */ private void locked(RunId id, UnaryOperator<Run> modifications) { try (Lock __ = curator.lock(id.application(), id.type())) { active(id).ifPresent(run -> { run = modifications.apply(run); curator.writeLastRun(run); }); } } /** Locks the given step and checks none of its prerequisites are running, then performs the given actions. */ public void locked(ApplicationId id, JobType type, Step step, Consumer<LockedStep> action) throws TimeoutException { try (Lock lock = curator.lock(id, type, step)) { for (Step prerequisite : step.prerequisites()) try (Lock __ = curator.lock(id, type, prerequisite)) { ; } action.accept(new LockedStep(lock, step)); } } }
Yes, but I think we already do validate that during deployment.
private ExpressionNode transformFeature(ReferenceNode feature) { try { if ( ! feature.getName().equals("tensorflow")) return feature; if (feature.getArguments().isEmpty()) throw new IllegalArgumentException("A tensorflow node must take an argument pointing to " + "the tensorflow model directory under [application]/models"); ImportResult result = tensorFlowImporter.importModel(asString(feature.getArguments().expressions().get(0))); ImportResult.Signature signature = chooseOrDefault("signatures", result.signatures(), optionalArgument(1, feature.getArguments())); String output = chooseOrDefault("outputs", signature.outputs(), optionalArgument(2, feature.getArguments())); result.constants().forEach((k, v) -> profile.addConstantTensor(k, new TensorValue(v))); return result.expressions().get(output).getRoot(); } catch (IllegalArgumentException e) { throw new IllegalArgumentException("Could not import tensorflow model from " + feature, e); } }
}
private ExpressionNode transformFeature(ReferenceNode feature) { try { if ( ! feature.getName().equals("tensorflow")) return feature; if (feature.getArguments().isEmpty()) throw new IllegalArgumentException("A tensorflow node must take an argument pointing to " + "the tensorflow model directory under [application]/models"); ImportResult result = tensorFlowImporter.importModel(asString(feature.getArguments().expressions().get(0))); ImportResult.Signature signature = chooseOrDefault("signatures", result.signatures(), optionalArgument(1, feature.getArguments())); String output = chooseOrDefault("outputs", signature.outputs(), optionalArgument(2, feature.getArguments())); result.constants().forEach((k, v) -> profile.addConstantTensor(k, new TensorValue(v))); return result.expressions().get(output).getRoot(); } catch (IllegalArgumentException e) { throw new IllegalArgumentException("Could not import tensorflow model from " + feature, e); } }
class TensorFlowFeatureConverter extends ExpressionTransformer { private final TensorFlowImporter tensorFlowImporter = new TensorFlowImporter(); private final RankProfile profile; public TensorFlowFeatureConverter(RankProfile profile) { this.profile = profile; } @Override public ExpressionNode transform(ExpressionNode node) { if (node instanceof ReferenceNode) return transformFeature((ReferenceNode) node); else if (node instanceof CompositeNode) return super.transformChildren((CompositeNode) node); else return node; } /** * Returns the specified, existing map value, or the only map value if no key is specified. * Throws IllegalArgumentException in all other cases. */ private <T> T chooseOrDefault(String valueDescription, Map<String, T> map, Optional<String> key) { if ( ! key.isPresent()) { if (map.size() == 0) throw new IllegalArgumentException("No " + valueDescription + " are present"); if (map.size() > 1) throw new IllegalArgumentException("Model has multiple " + valueDescription + ", but no " + valueDescription + " argument is specified"); return map.values().stream().findFirst().get(); } else { T value = map.get(key.get()); if (value == null) throw new IllegalArgumentException("Model does not have the specified " + valueDescription + " '" + key.get() + "'"); return value; } } private Optional<String> optionalArgument(int argumentIndex, Arguments arguments) { if (argumentIndex >= arguments.expressions().size()) return Optional.empty(); return Optional.of(asString(arguments.expressions().get(argumentIndex))); } private String asString(ExpressionNode node) { if ( ! (node instanceof ConstantNode)) throw new IllegalArgumentException("Expected a constant string as tensorflow argument, but got '" + node); return stripQuotes(((ConstantNode)node).sourceString()); } private String stripQuotes(String s) { if ( ! isQuoteSign(s.codePointAt(0))) return s; if ( ! isQuoteSign(s.codePointAt(s.length() - 1 ))) throw new IllegalArgumentException("tensorflow argument [" + s + "] is missing endquote"); return s.substring(1, s.length()-1); } private boolean isQuoteSign(int c) { return c == '\'' || c == '"'; } }
class TensorFlowFeatureConverter extends ExpressionTransformer { private final TensorFlowImporter tensorFlowImporter = new TensorFlowImporter(); private final RankProfile profile; public TensorFlowFeatureConverter(RankProfile profile) { this.profile = profile; } @Override public ExpressionNode transform(ExpressionNode node) { if (node instanceof ReferenceNode) return transformFeature((ReferenceNode) node); else if (node instanceof CompositeNode) return super.transformChildren((CompositeNode) node); else return node; } /** * Returns the specified, existing map value, or the only map value if no key is specified. * Throws IllegalArgumentException in all other cases. */ private <T> T chooseOrDefault(String valueDescription, Map<String, T> map, Optional<String> key) { if ( ! key.isPresent()) { if (map.size() == 0) throw new IllegalArgumentException("No " + valueDescription + " are present"); if (map.size() > 1) throw new IllegalArgumentException("Model has multiple " + valueDescription + ", but no " + valueDescription + " argument is specified"); return map.values().stream().findFirst().get(); } else { T value = map.get(key.get()); if (value == null) throw new IllegalArgumentException("Model does not have the specified " + valueDescription + " '" + key.get() + "'"); return value; } } private Optional<String> optionalArgument(int argumentIndex, Arguments arguments) { if (argumentIndex >= arguments.expressions().size()) return Optional.empty(); return Optional.of(asString(arguments.expressions().get(argumentIndex))); } private String asString(ExpressionNode node) { if ( ! (node instanceof ConstantNode)) throw new IllegalArgumentException("Expected a constant string as tensorflow argument, but got '" + node); return stripQuotes(((ConstantNode)node).sourceString()); } private String stripQuotes(String s) { if ( ! isQuoteSign(s.codePointAt(0))) return s; if ( ! isQuoteSign(s.codePointAt(s.length() - 1 ))) throw new IllegalArgumentException("tensorflow argument [" + s + "] is missing endquote"); return s.substring(1, s.length()-1); } private boolean isQuoteSign(int c) { return c == '\'' || c == '"'; } }
Not at all. See outstanding work.
private ExpressionNode transformFeature(ReferenceNode feature) { try { if ( ! feature.getName().equals("tensorflow")) return feature; if (feature.getArguments().isEmpty()) throw new IllegalArgumentException("A tensorflow node must take an argument pointing to " + "the tensorflow model directory under [application]/models"); ImportResult result = tensorFlowImporter.importModel(asString(feature.getArguments().expressions().get(0))); ImportResult.Signature signature = chooseOrDefault("signatures", result.signatures(), optionalArgument(1, feature.getArguments())); String output = chooseOrDefault("outputs", signature.outputs(), optionalArgument(2, feature.getArguments())); result.constants().forEach((k, v) -> profile.addConstantTensor(k, new TensorValue(v))); return result.expressions().get(output).getRoot(); } catch (IllegalArgumentException e) { throw new IllegalArgumentException("Could not import tensorflow model from " + feature, e); } }
result.constants().forEach((k, v) -> profile.addConstantTensor(k, new TensorValue(v)));
private ExpressionNode transformFeature(ReferenceNode feature) { try { if ( ! feature.getName().equals("tensorflow")) return feature; if (feature.getArguments().isEmpty()) throw new IllegalArgumentException("A tensorflow node must take an argument pointing to " + "the tensorflow model directory under [application]/models"); ImportResult result = tensorFlowImporter.importModel(asString(feature.getArguments().expressions().get(0))); ImportResult.Signature signature = chooseOrDefault("signatures", result.signatures(), optionalArgument(1, feature.getArguments())); String output = chooseOrDefault("outputs", signature.outputs(), optionalArgument(2, feature.getArguments())); result.constants().forEach((k, v) -> profile.addConstantTensor(k, new TensorValue(v))); return result.expressions().get(output).getRoot(); } catch (IllegalArgumentException e) { throw new IllegalArgumentException("Could not import tensorflow model from " + feature, e); } }
class TensorFlowFeatureConverter extends ExpressionTransformer { private final TensorFlowImporter tensorFlowImporter = new TensorFlowImporter(); private final RankProfile profile; public TensorFlowFeatureConverter(RankProfile profile) { this.profile = profile; } @Override public ExpressionNode transform(ExpressionNode node) { if (node instanceof ReferenceNode) return transformFeature((ReferenceNode) node); else if (node instanceof CompositeNode) return super.transformChildren((CompositeNode) node); else return node; } /** * Returns the specified, existing map value, or the only map value if no key is specified. * Throws IllegalArgumentException in all other cases. */ private <T> T chooseOrDefault(String valueDescription, Map<String, T> map, Optional<String> key) { if ( ! key.isPresent()) { if (map.size() == 0) throw new IllegalArgumentException("No " + valueDescription + " are present"); if (map.size() > 1) throw new IllegalArgumentException("Model has multiple " + valueDescription + ", but no " + valueDescription + " argument is specified"); return map.values().stream().findFirst().get(); } else { T value = map.get(key.get()); if (value == null) throw new IllegalArgumentException("Model does not have the specified " + valueDescription + " '" + key.get() + "'"); return value; } } private Optional<String> optionalArgument(int argumentIndex, Arguments arguments) { if (argumentIndex >= arguments.expressions().size()) return Optional.empty(); return Optional.of(asString(arguments.expressions().get(argumentIndex))); } private String asString(ExpressionNode node) { if ( ! (node instanceof ConstantNode)) throw new IllegalArgumentException("Expected a constant string as tensorflow argument, but got '" + node); return stripQuotes(((ConstantNode)node).sourceString()); } private String stripQuotes(String s) { if ( ! isQuoteSign(s.codePointAt(0))) return s; if ( ! isQuoteSign(s.codePointAt(s.length() - 1 ))) throw new IllegalArgumentException("tensorflow argument [" + s + "] is missing endquote"); return s.substring(1, s.length()-1); } private boolean isQuoteSign(int c) { return c == '\'' || c == '"'; } }
class TensorFlowFeatureConverter extends ExpressionTransformer { private final TensorFlowImporter tensorFlowImporter = new TensorFlowImporter(); private final RankProfile profile; public TensorFlowFeatureConverter(RankProfile profile) { this.profile = profile; } @Override public ExpressionNode transform(ExpressionNode node) { if (node instanceof ReferenceNode) return transformFeature((ReferenceNode) node); else if (node instanceof CompositeNode) return super.transformChildren((CompositeNode) node); else return node; } /** * Returns the specified, existing map value, or the only map value if no key is specified. * Throws IllegalArgumentException in all other cases. */ private <T> T chooseOrDefault(String valueDescription, Map<String, T> map, Optional<String> key) { if ( ! key.isPresent()) { if (map.size() == 0) throw new IllegalArgumentException("No " + valueDescription + " are present"); if (map.size() > 1) throw new IllegalArgumentException("Model has multiple " + valueDescription + ", but no " + valueDescription + " argument is specified"); return map.values().stream().findFirst().get(); } else { T value = map.get(key.get()); if (value == null) throw new IllegalArgumentException("Model does not have the specified " + valueDescription + " '" + key.get() + "'"); return value; } } private Optional<String> optionalArgument(int argumentIndex, Arguments arguments) { if (argumentIndex >= arguments.expressions().size()) return Optional.empty(); return Optional.of(asString(arguments.expressions().get(argumentIndex))); } private String asString(ExpressionNode node) { if ( ! (node instanceof ConstantNode)) throw new IllegalArgumentException("Expected a constant string as tensorflow argument, but got '" + node); return stripQuotes(((ConstantNode)node).sourceString()); } private String stripQuotes(String s) { if ( ! isQuoteSign(s.codePointAt(0))) return s; if ( ! isQuoteSign(s.codePointAt(s.length() - 1 ))) throw new IllegalArgumentException("tensorflow argument [" + s + "] is missing endquote"); return s.substring(1, s.length()-1); } private boolean isQuoteSign(int c) { return c == '\'' || c == '"'; } }
```suggestion return lockRoot.append(instance.serializedForm() + ":" + zone.environment().value() + ":" + zone.region().value()); ```
private Path lockPath(ApplicationId instance, ZoneId zone) { return lockRoot.append(instance.serializedForm() + ":" + zone.region().value()); }
return lockRoot.append(instance.serializedForm() + ":" + zone.region().value());
private Path lockPath(ApplicationId instance, ZoneId zone) { return lockRoot.append(instance.serializedForm() + ":" + zone.environment().value() + ":" + zone.region().value()); }
class CuratorDb { private static final Logger log = Logger.getLogger(CuratorDb.class.getName()); private static final Duration deployLockTimeout = Duration.ofMinutes(30); private static final Duration defaultLockTimeout = Duration.ofMinutes(5); private static final Duration defaultTryLockTimeout = Duration.ofSeconds(1); private static final Path root = Path.fromString("/controller/v1"); private static final Path lockRoot = root.append("locks"); private static final Path tenantRoot = root.append("tenants"); private static final Path applicationRoot = root.append("applications"); private static final Path jobRoot = root.append("jobs"); private static final Path controllerRoot = root.append("controllers"); private static final Path routingPoliciesRoot = root.append("routingPolicies"); private static final Path zoneRoutingPoliciesRoot = root.append("zoneRoutingPolicies"); private static final Path endpointCertificateRoot = root.append("applicationCertificates"); private static final Path archiveBucketsRoot = root.append("archiveBuckets"); private static final Path changeRequestsRoot = root.append("changeRequests"); private static final Path notificationsRoot = root.append("notifications"); private static final Path supportAccessRoot = root.append("supportAccess"); private final NodeVersionSerializer nodeVersionSerializer = new NodeVersionSerializer(); private final VersionStatusSerializer versionStatusSerializer = new VersionStatusSerializer(nodeVersionSerializer); private final ControllerVersionSerializer controllerVersionSerializer = new ControllerVersionSerializer(); private final ConfidenceOverrideSerializer confidenceOverrideSerializer = new ConfidenceOverrideSerializer(); private final TenantSerializer tenantSerializer = new TenantSerializer(); private final ApplicationSerializer applicationSerializer = new ApplicationSerializer(); private final RunSerializer runSerializer = new RunSerializer(); private final OsVersionSerializer osVersionSerializer = new OsVersionSerializer(); private final OsVersionTargetSerializer osVersionTargetSerializer = new OsVersionTargetSerializer(osVersionSerializer); private final OsVersionStatusSerializer osVersionStatusSerializer = new OsVersionStatusSerializer(osVersionSerializer, nodeVersionSerializer); private final RoutingPolicySerializer routingPolicySerializer = new RoutingPolicySerializer(); private final ZoneRoutingPolicySerializer zoneRoutingPolicySerializer = new ZoneRoutingPolicySerializer(routingPolicySerializer); private final AuditLogSerializer auditLogSerializer = new AuditLogSerializer(); private final NameServiceQueueSerializer nameServiceQueueSerializer = new NameServiceQueueSerializer(); private final Curator curator; private final Duration tryLockTimeout; private final Map<Path, Pair<Integer, Application>> cachedApplications = new ConcurrentHashMap<>(); private final Map<Path, Pair<Integer, NavigableMap<RunId, Run>>> cachedHistoricRuns = new ConcurrentHashMap<>(); @Inject public CuratorDb(Curator curator) { this(curator, defaultTryLockTimeout); } CuratorDb(Curator curator, Duration tryLockTimeout) { this.curator = curator; this.tryLockTimeout = tryLockTimeout; } /** Returns all hostnames configured to be part of this ZooKeeper cluster */ public List<String> cluster() { return Arrays.stream(curator.zooKeeperEnsembleConnectionSpec().split(",")) .filter(hostAndPort -> !hostAndPort.isEmpty()) .map(hostAndPort -> hostAndPort.split(":")[0]) .collect(Collectors.toUnmodifiableList()); } public Lock lock(TenantName name) { return curator.lock(lockPath(name), defaultLockTimeout.multipliedBy(2)); } public Lock lock(TenantAndApplicationId id) { return new MultiplePathsLock(lockPath(id), legacyLockPath(id), defaultLockTimeout.multipliedBy(2),curator); } public Lock lockForDeployment(ApplicationId id, ZoneId zone) { return new MultiplePathsLock(lockPath(id, zone), legacyLockPath(id, zone), deployLockTimeout, curator); } public Lock lock(ApplicationId id, JobType type) { return new MultiplePathsLock(lockPath(id, type), legacyLockPath(id, type), defaultLockTimeout, curator); } public Lock lock(ApplicationId id, JobType type, Step step) throws TimeoutException { return tryLock(lockPath(id, type, step), legacyLockPath(id, type, step)); } public Lock lockRotations() { return curator.lock(lockRoot.append("rotations"), defaultLockTimeout); } public Lock lockConfidenceOverrides() { return curator.lock(lockRoot.append("confidenceOverrides"), defaultLockTimeout); } public Lock lockMaintenanceJob(String jobName) { try { return tryLock(lockRoot.append("maintenanceJobLocks").append(jobName)); } catch (TimeoutException e) { throw new UncheckedTimeoutException(e); } } @SuppressWarnings("unused") public Lock lockProvisionState(String provisionStateId) { return curator.lock(lockPath(provisionStateId), Duration.ofSeconds(1)); } public Lock lockOsVersions() { return curator.lock(lockRoot.append("osTargetVersion"), defaultLockTimeout); } public Lock lockOsVersionStatus() { return curator.lock(lockRoot.append("osVersionStatus"), defaultLockTimeout); } public Lock lockRoutingPolicies() { return curator.lock(lockRoot.append("routingPolicies"), defaultLockTimeout); } public Lock lockAuditLog() { return curator.lock(lockRoot.append("auditLog"), defaultLockTimeout); } public Lock lockNameServiceQueue() { return curator.lock(lockRoot.append("nameServiceQueue"), defaultLockTimeout); } public Lock lockMeteringRefreshTime() throws TimeoutException { return tryLock(lockRoot.append("meteringRefreshTime")); } public Lock lockArchiveBuckets(ZoneId zoneId) { return curator.lock(lockRoot.append("archiveBuckets").append(zoneId.value()), defaultLockTimeout); } public Lock lockChangeRequests() { return curator.lock(lockRoot.append("changeRequests"), defaultLockTimeout); } public Lock lockNotifications(TenantName tenantName) { return curator.lock(lockRoot.append("notifications").append(tenantName.value()), defaultLockTimeout); } public Lock lockSupportAccess(DeploymentId deploymentId) { return curator.lock(lockRoot.append("supportAccess").append(deploymentId.dottedString()), defaultLockTimeout); } public Lock lockDeploymentRetriggerQueue() { return curator.lock(lockRoot.append("deploymentRetriggerQueue"), defaultLockTimeout); } /** Try locking with a low timeout, meaning it is OK to fail lock acquisition. * * Useful for maintenance jobs, where there is no point in running the jobs back to back. */ private Lock tryLock(Path path) throws TimeoutException { try { return curator.lock(path, tryLockTimeout); } catch (UncheckedTimeoutException e) { throw new TimeoutException(e.getMessage()); } } /** Try locking with a low timeout, meaning it is OK to fail lock acquisition. * * Useful for maintenance jobs, where there is no point in running the jobs back to back. */ private Lock tryLock(Path path, Path path2) throws TimeoutException { try { return new MultiplePathsLock(path, path2, tryLockTimeout, curator); } catch (UncheckedTimeoutException e) { throw new TimeoutException(e.getMessage()); } } private <T> Optional<T> read(Path path, Function<byte[], T> mapper) { return curator.getData(path).filter(data -> data.length > 0).map(mapper); } private Optional<Slime> readSlime(Path path) { return read(path, SlimeUtils::jsonToSlime); } private static byte[] asJson(Slime slime) { try { return SlimeUtils.toJsonBytes(slime); } catch (IOException e) { throw new UncheckedIOException(e); } } public double readUpgradesPerMinute() { return read(upgradesPerMinutePath(), ByteBuffer::wrap).map(ByteBuffer::getDouble).orElse(0.125); } public void writeUpgradesPerMinute(double n) { curator.set(upgradesPerMinutePath(), ByteBuffer.allocate(Double.BYTES).putDouble(n).array()); } public Optional<Integer> readTargetMajorVersion() { return read(targetMajorVersionPath(), ByteBuffer::wrap).map(ByteBuffer::getInt); } public void writeTargetMajorVersion(Optional<Integer> targetMajorVersion) { if (targetMajorVersion.isPresent()) curator.set(targetMajorVersionPath(), ByteBuffer.allocate(Integer.BYTES).putInt(targetMajorVersion.get()).array()); else curator.delete(targetMajorVersionPath()); } public void writeVersionStatus(VersionStatus status) { curator.set(versionStatusPath(), asJson(versionStatusSerializer.toSlime(status))); } public VersionStatus readVersionStatus() { return readSlime(versionStatusPath()).map(versionStatusSerializer::fromSlime).orElseGet(VersionStatus::empty); } public void writeConfidenceOverrides(Map<Version, VespaVersion.Confidence> overrides) { curator.set(confidenceOverridesPath(), asJson(confidenceOverrideSerializer.toSlime(overrides))); } public Map<Version, VespaVersion.Confidence> readConfidenceOverrides() { return readSlime(confidenceOverridesPath()).map(confidenceOverrideSerializer::fromSlime) .orElseGet(Collections::emptyMap); } public void writeControllerVersion(HostName hostname, ControllerVersion version) { curator.set(controllerPath(hostname.value()), asJson(controllerVersionSerializer.toSlime(version))); } public ControllerVersion readControllerVersion(HostName hostname) { return readSlime(controllerPath(hostname.value())) .map(controllerVersionSerializer::fromSlime) .orElse(ControllerVersion.CURRENT); } public void writeOsVersionTargets(Set<OsVersionTarget> versions) { curator.set(osVersionTargetsPath(), asJson(osVersionTargetSerializer.toSlime(versions))); } public Set<OsVersionTarget> readOsVersionTargets() { return readSlime(osVersionTargetsPath()).map(osVersionTargetSerializer::fromSlime).orElseGet(Collections::emptySet); } public void writeOsVersionStatus(OsVersionStatus status) { curator.set(osVersionStatusPath(), asJson(osVersionStatusSerializer.toSlime(status))); } public OsVersionStatus readOsVersionStatus() { return readSlime(osVersionStatusPath()).map(osVersionStatusSerializer::fromSlime).orElse(OsVersionStatus.empty); } public void writeTenant(Tenant tenant) { curator.set(tenantPath(tenant.name()), asJson(tenantSerializer.toSlime(tenant))); } public Optional<Tenant> readTenant(TenantName name) { return readSlime(tenantPath(name)).map(bytes -> tenantSerializer.tenantFrom(bytes)); } public List<Tenant> readTenants() { return readTenantNames().stream() .map(this::readTenant) .flatMap(Optional::stream) .collect(collectingAndThen(Collectors.toList(), Collections::unmodifiableList)); } public List<TenantName> readTenantNames() { return curator.getChildren(tenantRoot).stream() .map(TenantName::from) .collect(Collectors.toList()); } public void removeTenant(TenantName name) { curator.delete(tenantPath(name)); } public void writeApplication(Application application) { curator.set(applicationPath(application.id()), asJson(applicationSerializer.toSlime(application))); } public Optional<Application> readApplication(TenantAndApplicationId application) { Path path = applicationPath(application); return curator.getStat(path) .map(stat -> cachedApplications.compute(path, (__, old) -> old != null && old.getFirst() == stat.getVersion() ? old : new Pair<>(stat.getVersion(), read(path, applicationSerializer::fromSlime).get())).getSecond()); } public List<Application> readApplications(boolean canFail) { return readApplications(ignored -> true, canFail); } public List<Application> readApplications(TenantName name) { return readApplications(application -> application.tenant().equals(name), false); } private List<Application> readApplications(Predicate<TenantAndApplicationId> applicationFilter, boolean canFail) { var applicationIds = readApplicationIds(); var applications = new ArrayList<Application>(applicationIds.size()); for (var id : applicationIds) { if (!applicationFilter.test(id)) continue; try { readApplication(id).ifPresent(applications::add); } catch (Exception e) { if (canFail) { log.log(Level.SEVERE, "Failed to read application '" + id + "', this must be fixed through " + "manual intervention", e); } else { throw e; } } } return Collections.unmodifiableList(applications); } public List<TenantAndApplicationId> readApplicationIds() { return curator.getChildren(applicationRoot).stream() .map(TenantAndApplicationId::fromSerialized) .sorted() .collect(toUnmodifiableList()); } public void removeApplication(TenantAndApplicationId id) { curator.delete(applicationPath(id)); } public void writeLastRun(Run run) { curator.set(lastRunPath(run.id().application(), run.id().type()), asJson(runSerializer.toSlime(run))); } public void writeHistoricRuns(ApplicationId id, JobType type, Iterable<Run> runs) { Path path = runsPath(id, type); curator.set(path, asJson(runSerializer.toSlime(runs))); } public Optional<Run> readLastRun(ApplicationId id, JobType type) { return readSlime(lastRunPath(id, type)).map(runSerializer::runFromSlime); } public NavigableMap<RunId, Run> readHistoricRuns(ApplicationId id, JobType type) { Path path = runsPath(id, type); return curator.getStat(path) .map(stat -> cachedHistoricRuns.compute(path, (__, old) -> old != null && old.getFirst() == stat.getVersion() ? old : new Pair<>(stat.getVersion(), runSerializer.runsFromSlime(readSlime(path).get()))).getSecond()) .orElseGet(Collections::emptyNavigableMap); } public void deleteRunData(ApplicationId id, JobType type) { curator.delete(runsPath(id, type)); curator.delete(lastRunPath(id, type)); } public void deleteRunData(ApplicationId id) { curator.delete(jobRoot.append(id.serializedForm())); } public List<ApplicationId> applicationsWithJobs() { return curator.getChildren(jobRoot).stream() .map(ApplicationId::fromSerializedForm) .collect(Collectors.toList()); } public Optional<byte[]> readLog(ApplicationId id, JobType type, long chunkId) { return curator.getData(logPath(id, type, chunkId)); } public void writeLog(ApplicationId id, JobType type, long chunkId, byte[] log) { curator.set(logPath(id, type, chunkId), log); } public void deleteLog(ApplicationId id, JobType type) { curator.delete(runsPath(id, type).append("logs")); } public Optional<Long> readLastLogEntryId(ApplicationId id, JobType type) { return curator.getData(lastLogPath(id, type)) .map(String::new).map(Long::parseLong); } public void writeLastLogEntryId(ApplicationId id, JobType type, long lastId) { curator.set(lastLogPath(id, type), Long.toString(lastId).getBytes()); } public LongStream getLogChunkIds(ApplicationId id, JobType type) { return curator.getChildren(runsPath(id, type).append("logs")).stream() .mapToLong(Long::parseLong) .sorted(); } public AuditLog readAuditLog() { return readSlime(auditLogPath()).map(auditLogSerializer::fromSlime) .orElse(AuditLog.empty); } public void writeAuditLog(AuditLog log) { curator.set(auditLogPath(), asJson(auditLogSerializer.toSlime(log))); } public NameServiceQueue readNameServiceQueue() { return readSlime(nameServiceQueuePath()).map(nameServiceQueueSerializer::fromSlime) .orElse(NameServiceQueue.EMPTY); } public void writeNameServiceQueue(NameServiceQueue queue) { curator.set(nameServiceQueuePath(), asJson(nameServiceQueueSerializer.toSlime(queue))); } @SuppressWarnings("unused") public Optional<byte[]> readProvisionState(String provisionId) { return curator.getData(provisionStatePath(provisionId)); } @SuppressWarnings("unused") public void writeProvisionState(String provisionId, byte[] data) { curator.set(provisionStatePath(provisionId), data); } @SuppressWarnings("unused") public List<String> readProvisionStateIds() { return curator.getChildren(provisionStatePath()); } public void writeRoutingPolicies(ApplicationId application, List<RoutingPolicy> policies) { for (var policy : policies) { if (!policy.id().owner().equals(application)) { throw new IllegalArgumentException(policy.id() + " does not belong to the application being written: " + application.toShortString()); } } curator.set(routingPolicyPath(application), asJson(routingPolicySerializer.toSlime(policies))); } public Map<ApplicationId, List<RoutingPolicy>> readRoutingPolicies() { return readRoutingPolicies((instance) -> true); } public Map<ApplicationId, List<RoutingPolicy>> readRoutingPolicies(Predicate<ApplicationId> filter) { return curator.getChildren(routingPoliciesRoot).stream() .map(ApplicationId::fromSerializedForm) .filter(filter) .collect(Collectors.toUnmodifiableMap(Function.identity(), this::readRoutingPolicies)); } public List<RoutingPolicy> readRoutingPolicies(ApplicationId application) { return readSlime(routingPolicyPath(application)).map(slime -> routingPolicySerializer.fromSlime(application, slime)) .orElseGet(List::of); } public void writeZoneRoutingPolicy(ZoneRoutingPolicy policy) { curator.set(zoneRoutingPolicyPath(policy.zone()), asJson(zoneRoutingPolicySerializer.toSlime(policy))); } public ZoneRoutingPolicy readZoneRoutingPolicy(ZoneId zone) { return readSlime(zoneRoutingPolicyPath(zone)).map(data -> zoneRoutingPolicySerializer.fromSlime(zone, data)) .orElseGet(() -> new ZoneRoutingPolicy(zone, RoutingStatus.DEFAULT)); } public void writeEndpointCertificateMetadata(ApplicationId applicationId, EndpointCertificateMetadata endpointCertificateMetadata) { curator.set(endpointCertificatePath(applicationId), asJson(EndpointCertificateMetadataSerializer.toSlime(endpointCertificateMetadata))); } public void deleteEndpointCertificateMetadata(ApplicationId applicationId) { curator.delete(endpointCertificatePath(applicationId)); } public Optional<EndpointCertificateMetadata> readEndpointCertificateMetadata(ApplicationId applicationId) { return curator.getData(endpointCertificatePath(applicationId)).map(String::new).map(EndpointCertificateMetadataSerializer::fromJsonString); } public Map<ApplicationId, EndpointCertificateMetadata> readAllEndpointCertificateMetadata() { Map<ApplicationId, EndpointCertificateMetadata> allEndpointCertificateMetadata = new HashMap<>(); for (String appIdString : curator.getChildren(endpointCertificateRoot)) { ApplicationId applicationId = ApplicationId.fromSerializedForm(appIdString); Optional<EndpointCertificateMetadata> endpointCertificateMetadata = readEndpointCertificateMetadata(applicationId); allEndpointCertificateMetadata.put(applicationId, endpointCertificateMetadata.orElseThrow()); } return allEndpointCertificateMetadata; } public void writeMeteringRefreshTime(long timestamp) { curator.set(meteringRefreshPath(), Long.toString(timestamp).getBytes()); } public long readMeteringRefreshTime() { return curator.getData(meteringRefreshPath()) .map(String::new).map(Long::parseLong) .orElse(0L); } public Set<ArchiveBucket> readArchiveBuckets(ZoneId zoneId) { return curator.getData(archiveBucketsPath(zoneId)).map(String::new).map(ArchiveBucketsSerializer::fromJsonString) .orElseGet(Set::of); } public void writeArchiveBuckets(ZoneId zoneid, Set<ArchiveBucket> archiveBuckets) { curator.set(archiveBucketsPath(zoneid), asJson(ArchiveBucketsSerializer.toSlime(archiveBuckets))); } public Optional<VespaChangeRequest> readChangeRequest(String changeRequestId) { return readSlime(changeRequestPath(changeRequestId)).map(ChangeRequestSerializer::fromSlime); } public List<VespaChangeRequest> readChangeRequests() { return curator.getChildren(changeRequestsRoot) .stream() .map(this::readChangeRequest) .flatMap(Optional::stream) .collect(Collectors.toList()); } public void writeChangeRequest(VespaChangeRequest changeRequest) { curator.set(changeRequestPath(changeRequest.getId()), asJson(ChangeRequestSerializer.toSlime(changeRequest))); } public void deleteChangeRequest(VespaChangeRequest changeRequest) { curator.delete(changeRequestPath(changeRequest.getId())); } public List<Notification> readNotifications(TenantName tenantName) { return readSlime(notificationsPath(tenantName)) .map(slime -> NotificationsSerializer.fromSlime(tenantName, slime)).orElseGet(List::of); } public List<TenantName> listTenantsWithNotifications() { return curator.getChildren(notificationsRoot).stream() .map(TenantName::from) .collect(Collectors.toUnmodifiableList()); } public void writeNotifications(TenantName tenantName, List<Notification> notifications) { curator.set(notificationsPath(tenantName), asJson(NotificationsSerializer.toSlime(notifications))); } public void deleteNotifications(TenantName tenantName) { curator.delete(notificationsPath(tenantName)); } public SupportAccess readSupportAccess(DeploymentId deploymentId) { return readSlime(supportAccessPath(deploymentId)).map(SupportAccessSerializer::fromSlime).orElse(SupportAccess.DISALLOWED_NO_HISTORY); } /** Take lock before reading before writing */ public void writeSupportAccess(DeploymentId deploymentId, SupportAccess supportAccess) { curator.set(supportAccessPath(deploymentId), asJson(SupportAccessSerializer.toSlime(supportAccess))); } public List<RetriggerEntry> readRetriggerEntries() { return readSlime(deploymentRetriggerPath()).map(RetriggerEntrySerializer::fromSlime).orElseGet(List::of); } public void writeRetriggerEntries(List<RetriggerEntry> retriggerEntries) { curator.set(deploymentRetriggerPath(), asJson(RetriggerEntrySerializer.toSlime(retriggerEntries))); } private Path lockPath(TenantName tenant) { return lockRoot .append(tenant.value()); } private Path legacyLockPath(TenantAndApplicationId application) { return lockPath(application.tenant()) .append(application.application().value()); } private Path legacyLockPath(ApplicationId instance) { return legacyLockPath(TenantAndApplicationId.from(instance)) .append(instance.instance().value()); } private Path legacyLockPath(ApplicationId instance, ZoneId zone) { return legacyLockPath(instance) .append(zone.environment().value()) .append(zone.region().value()); } private Path legacyLockPath(ApplicationId instance, JobType type) { return legacyLockPath(instance) .append(type.jobName()); } private Path legacyLockPath(ApplicationId instance, JobType type, Step step) { return legacyLockPath(instance, type) .append(step.name()); } private Path lockPath(TenantAndApplicationId application) { return lockRoot.append(application.tenant().value() + ":" + application.application().value()); } private Path lockPath(ApplicationId instance, JobType type) { return lockRoot.append(instance.serializedForm() + ":" + type.jobName()); } private Path lockPath(ApplicationId instance, JobType type, Step step) { return lockRoot.append(instance.serializedForm() + ":" + type.jobName() + ":" + step.name()); } private Path lockPath(String provisionId) { return lockRoot .append(provisionStatePath()) .append(provisionId); } private static Path upgradesPerMinutePath() { return root.append("upgrader").append("upgradesPerMinute"); } private static Path targetMajorVersionPath() { return root.append("upgrader").append("targetMajorVersion"); } private static Path confidenceOverridesPath() { return root.append("upgrader").append("confidenceOverrides"); } private static Path osVersionTargetsPath() { return root.append("osUpgrader").append("targetVersion"); } private static Path osVersionStatusPath() { return root.append("osVersionStatus"); } private static Path versionStatusPath() { return root.append("versionStatus"); } private static Path routingPolicyPath(ApplicationId application) { return routingPoliciesRoot.append(application.serializedForm()); } private static Path zoneRoutingPolicyPath(ZoneId zone) { return zoneRoutingPoliciesRoot.append(zone.value()); } private static Path nameServiceQueuePath() { return root.append("nameServiceQueue"); } private static Path auditLogPath() { return root.append("auditLog"); } private static Path provisionStatePath() { return root.append("provisioning").append("states"); } private static Path provisionStatePath(String provisionId) { return provisionStatePath().append(provisionId); } private static Path tenantPath(TenantName name) { return tenantRoot.append(name.value()); } private static Path applicationPath(TenantAndApplicationId id) { return applicationRoot.append(id.serialized()); } private static Path runsPath(ApplicationId id, JobType type) { return jobRoot.append(id.serializedForm()).append(type.jobName()); } private static Path lastRunPath(ApplicationId id, JobType type) { return runsPath(id, type).append("last"); } private static Path logPath(ApplicationId id, JobType type, long first) { return runsPath(id, type).append("logs").append(Long.toString(first)); } private static Path lastLogPath(ApplicationId id, JobType type) { return runsPath(id, type).append("logs"); } private static Path controllerPath(String hostname) { return controllerRoot.append(hostname); } private static Path endpointCertificatePath(ApplicationId id) { return endpointCertificateRoot.append(id.serializedForm()); } private static Path meteringRefreshPath() { return root.append("meteringRefreshTime"); } private static Path archiveBucketsPath(ZoneId zoneId) { return archiveBucketsRoot.append(zoneId.value()); } private static Path changeRequestPath(String id) { return changeRequestsRoot.append(id); } private static Path notificationsPath(TenantName tenantName) { return notificationsRoot.append(tenantName.value()); } private static Path supportAccessPath(DeploymentId deploymentId) { return supportAccessRoot.append(deploymentId.dottedString()); } private static Path deploymentRetriggerPath() { return root.append("deploymentRetriggerQueue"); } }
class CuratorDb { private static final Logger log = Logger.getLogger(CuratorDb.class.getName()); private static final Duration deployLockTimeout = Duration.ofMinutes(30); private static final Duration defaultLockTimeout = Duration.ofMinutes(5); private static final Duration defaultTryLockTimeout = Duration.ofSeconds(1); private static final Path root = Path.fromString("/controller/v1"); private static final Path lockRoot = root.append("locks"); private static final Path tenantRoot = root.append("tenants"); private static final Path applicationRoot = root.append("applications"); private static final Path jobRoot = root.append("jobs"); private static final Path controllerRoot = root.append("controllers"); private static final Path routingPoliciesRoot = root.append("routingPolicies"); private static final Path zoneRoutingPoliciesRoot = root.append("zoneRoutingPolicies"); private static final Path endpointCertificateRoot = root.append("applicationCertificates"); private static final Path archiveBucketsRoot = root.append("archiveBuckets"); private static final Path changeRequestsRoot = root.append("changeRequests"); private static final Path notificationsRoot = root.append("notifications"); private static final Path supportAccessRoot = root.append("supportAccess"); private final NodeVersionSerializer nodeVersionSerializer = new NodeVersionSerializer(); private final VersionStatusSerializer versionStatusSerializer = new VersionStatusSerializer(nodeVersionSerializer); private final ControllerVersionSerializer controllerVersionSerializer = new ControllerVersionSerializer(); private final ConfidenceOverrideSerializer confidenceOverrideSerializer = new ConfidenceOverrideSerializer(); private final TenantSerializer tenantSerializer = new TenantSerializer(); private final ApplicationSerializer applicationSerializer = new ApplicationSerializer(); private final RunSerializer runSerializer = new RunSerializer(); private final OsVersionSerializer osVersionSerializer = new OsVersionSerializer(); private final OsVersionTargetSerializer osVersionTargetSerializer = new OsVersionTargetSerializer(osVersionSerializer); private final OsVersionStatusSerializer osVersionStatusSerializer = new OsVersionStatusSerializer(osVersionSerializer, nodeVersionSerializer); private final RoutingPolicySerializer routingPolicySerializer = new RoutingPolicySerializer(); private final ZoneRoutingPolicySerializer zoneRoutingPolicySerializer = new ZoneRoutingPolicySerializer(routingPolicySerializer); private final AuditLogSerializer auditLogSerializer = new AuditLogSerializer(); private final NameServiceQueueSerializer nameServiceQueueSerializer = new NameServiceQueueSerializer(); private final Curator curator; private final Duration tryLockTimeout; private final Map<Path, Pair<Integer, Application>> cachedApplications = new ConcurrentHashMap<>(); private final Map<Path, Pair<Integer, NavigableMap<RunId, Run>>> cachedHistoricRuns = new ConcurrentHashMap<>(); @Inject public CuratorDb(Curator curator) { this(curator, defaultTryLockTimeout); } CuratorDb(Curator curator, Duration tryLockTimeout) { this.curator = curator; this.tryLockTimeout = tryLockTimeout; } /** Returns all hostnames configured to be part of this ZooKeeper cluster */ public List<String> cluster() { return Arrays.stream(curator.zooKeeperEnsembleConnectionSpec().split(",")) .filter(hostAndPort -> !hostAndPort.isEmpty()) .map(hostAndPort -> hostAndPort.split(":")[0]) .collect(Collectors.toUnmodifiableList()); } public Lock lock(TenantName name) { return curator.lock(lockPath(name), defaultLockTimeout.multipliedBy(2)); } public Lock lock(TenantAndApplicationId id) { return new MultiplePathsLock(lockPath(id), legacyLockPath(id), defaultLockTimeout.multipliedBy(2),curator); } public Lock lockForDeployment(ApplicationId id, ZoneId zone) { return new MultiplePathsLock(lockPath(id, zone), legacyLockPath(id, zone), deployLockTimeout, curator); } public Lock lock(ApplicationId id, JobType type) { return new MultiplePathsLock(lockPath(id, type), legacyLockPath(id, type), defaultLockTimeout, curator); } public Lock lock(ApplicationId id, JobType type, Step step) throws TimeoutException { return tryLock(lockPath(id, type, step), legacyLockPath(id, type, step)); } public Lock lockRotations() { return curator.lock(lockRoot.append("rotations"), defaultLockTimeout); } public Lock lockConfidenceOverrides() { return curator.lock(lockRoot.append("confidenceOverrides"), defaultLockTimeout); } public Lock lockMaintenanceJob(String jobName) { try { return tryLock(lockRoot.append("maintenanceJobLocks").append(jobName)); } catch (TimeoutException e) { throw new UncheckedTimeoutException(e); } } @SuppressWarnings("unused") public Lock lockProvisionState(String provisionStateId) { return curator.lock(lockPath(provisionStateId), Duration.ofSeconds(1)); } public Lock lockOsVersions() { return curator.lock(lockRoot.append("osTargetVersion"), defaultLockTimeout); } public Lock lockOsVersionStatus() { return curator.lock(lockRoot.append("osVersionStatus"), defaultLockTimeout); } public Lock lockRoutingPolicies() { return curator.lock(lockRoot.append("routingPolicies"), defaultLockTimeout); } public Lock lockAuditLog() { return curator.lock(lockRoot.append("auditLog"), defaultLockTimeout); } public Lock lockNameServiceQueue() { return curator.lock(lockRoot.append("nameServiceQueue"), defaultLockTimeout); } public Lock lockMeteringRefreshTime() throws TimeoutException { return tryLock(lockRoot.append("meteringRefreshTime")); } public Lock lockArchiveBuckets(ZoneId zoneId) { return curator.lock(lockRoot.append("archiveBuckets").append(zoneId.value()), defaultLockTimeout); } public Lock lockChangeRequests() { return curator.lock(lockRoot.append("changeRequests"), defaultLockTimeout); } public Lock lockNotifications(TenantName tenantName) { return curator.lock(lockRoot.append("notifications").append(tenantName.value()), defaultLockTimeout); } public Lock lockSupportAccess(DeploymentId deploymentId) { return curator.lock(lockRoot.append("supportAccess").append(deploymentId.dottedString()), defaultLockTimeout); } public Lock lockDeploymentRetriggerQueue() { return curator.lock(lockRoot.append("deploymentRetriggerQueue"), defaultLockTimeout); } /** Try locking with a low timeout, meaning it is OK to fail lock acquisition. * * Useful for maintenance jobs, where there is no point in running the jobs back to back. */ private Lock tryLock(Path path) throws TimeoutException { try { return curator.lock(path, tryLockTimeout); } catch (UncheckedTimeoutException e) { throw new TimeoutException(e.getMessage()); } } /** Try locking with a low timeout, meaning it is OK to fail lock acquisition. * * Useful for maintenance jobs, where there is no point in running the jobs back to back. */ private Lock tryLock(Path path, Path path2) throws TimeoutException { try { return new MultiplePathsLock(path, path2, tryLockTimeout, curator); } catch (UncheckedTimeoutException e) { throw new TimeoutException(e.getMessage()); } } private <T> Optional<T> read(Path path, Function<byte[], T> mapper) { return curator.getData(path).filter(data -> data.length > 0).map(mapper); } private Optional<Slime> readSlime(Path path) { return read(path, SlimeUtils::jsonToSlime); } private static byte[] asJson(Slime slime) { try { return SlimeUtils.toJsonBytes(slime); } catch (IOException e) { throw new UncheckedIOException(e); } } public double readUpgradesPerMinute() { return read(upgradesPerMinutePath(), ByteBuffer::wrap).map(ByteBuffer::getDouble).orElse(0.125); } public void writeUpgradesPerMinute(double n) { curator.set(upgradesPerMinutePath(), ByteBuffer.allocate(Double.BYTES).putDouble(n).array()); } public Optional<Integer> readTargetMajorVersion() { return read(targetMajorVersionPath(), ByteBuffer::wrap).map(ByteBuffer::getInt); } public void writeTargetMajorVersion(Optional<Integer> targetMajorVersion) { if (targetMajorVersion.isPresent()) curator.set(targetMajorVersionPath(), ByteBuffer.allocate(Integer.BYTES).putInt(targetMajorVersion.get()).array()); else curator.delete(targetMajorVersionPath()); } public void writeVersionStatus(VersionStatus status) { curator.set(versionStatusPath(), asJson(versionStatusSerializer.toSlime(status))); } public VersionStatus readVersionStatus() { return readSlime(versionStatusPath()).map(versionStatusSerializer::fromSlime).orElseGet(VersionStatus::empty); } public void writeConfidenceOverrides(Map<Version, VespaVersion.Confidence> overrides) { curator.set(confidenceOverridesPath(), asJson(confidenceOverrideSerializer.toSlime(overrides))); } public Map<Version, VespaVersion.Confidence> readConfidenceOverrides() { return readSlime(confidenceOverridesPath()).map(confidenceOverrideSerializer::fromSlime) .orElseGet(Collections::emptyMap); } public void writeControllerVersion(HostName hostname, ControllerVersion version) { curator.set(controllerPath(hostname.value()), asJson(controllerVersionSerializer.toSlime(version))); } public ControllerVersion readControllerVersion(HostName hostname) { return readSlime(controllerPath(hostname.value())) .map(controllerVersionSerializer::fromSlime) .orElse(ControllerVersion.CURRENT); } public void writeOsVersionTargets(Set<OsVersionTarget> versions) { curator.set(osVersionTargetsPath(), asJson(osVersionTargetSerializer.toSlime(versions))); } public Set<OsVersionTarget> readOsVersionTargets() { return readSlime(osVersionTargetsPath()).map(osVersionTargetSerializer::fromSlime).orElseGet(Collections::emptySet); } public void writeOsVersionStatus(OsVersionStatus status) { curator.set(osVersionStatusPath(), asJson(osVersionStatusSerializer.toSlime(status))); } public OsVersionStatus readOsVersionStatus() { return readSlime(osVersionStatusPath()).map(osVersionStatusSerializer::fromSlime).orElse(OsVersionStatus.empty); } public void writeTenant(Tenant tenant) { curator.set(tenantPath(tenant.name()), asJson(tenantSerializer.toSlime(tenant))); } public Optional<Tenant> readTenant(TenantName name) { return readSlime(tenantPath(name)).map(bytes -> tenantSerializer.tenantFrom(bytes)); } public List<Tenant> readTenants() { return readTenantNames().stream() .map(this::readTenant) .flatMap(Optional::stream) .collect(collectingAndThen(Collectors.toList(), Collections::unmodifiableList)); } public List<TenantName> readTenantNames() { return curator.getChildren(tenantRoot).stream() .map(TenantName::from) .collect(Collectors.toList()); } public void removeTenant(TenantName name) { curator.delete(tenantPath(name)); } public void writeApplication(Application application) { curator.set(applicationPath(application.id()), asJson(applicationSerializer.toSlime(application))); } public Optional<Application> readApplication(TenantAndApplicationId application) { Path path = applicationPath(application); return curator.getStat(path) .map(stat -> cachedApplications.compute(path, (__, old) -> old != null && old.getFirst() == stat.getVersion() ? old : new Pair<>(stat.getVersion(), read(path, applicationSerializer::fromSlime).get())).getSecond()); } public List<Application> readApplications(boolean canFail) { return readApplications(ignored -> true, canFail); } public List<Application> readApplications(TenantName name) { return readApplications(application -> application.tenant().equals(name), false); } private List<Application> readApplications(Predicate<TenantAndApplicationId> applicationFilter, boolean canFail) { var applicationIds = readApplicationIds(); var applications = new ArrayList<Application>(applicationIds.size()); for (var id : applicationIds) { if (!applicationFilter.test(id)) continue; try { readApplication(id).ifPresent(applications::add); } catch (Exception e) { if (canFail) { log.log(Level.SEVERE, "Failed to read application '" + id + "', this must be fixed through " + "manual intervention", e); } else { throw e; } } } return Collections.unmodifiableList(applications); } public List<TenantAndApplicationId> readApplicationIds() { return curator.getChildren(applicationRoot).stream() .map(TenantAndApplicationId::fromSerialized) .sorted() .collect(toUnmodifiableList()); } public void removeApplication(TenantAndApplicationId id) { curator.delete(applicationPath(id)); } public void writeLastRun(Run run) { curator.set(lastRunPath(run.id().application(), run.id().type()), asJson(runSerializer.toSlime(run))); } public void writeHistoricRuns(ApplicationId id, JobType type, Iterable<Run> runs) { Path path = runsPath(id, type); curator.set(path, asJson(runSerializer.toSlime(runs))); } public Optional<Run> readLastRun(ApplicationId id, JobType type) { return readSlime(lastRunPath(id, type)).map(runSerializer::runFromSlime); } public NavigableMap<RunId, Run> readHistoricRuns(ApplicationId id, JobType type) { Path path = runsPath(id, type); return curator.getStat(path) .map(stat -> cachedHistoricRuns.compute(path, (__, old) -> old != null && old.getFirst() == stat.getVersion() ? old : new Pair<>(stat.getVersion(), runSerializer.runsFromSlime(readSlime(path).get()))).getSecond()) .orElseGet(Collections::emptyNavigableMap); } public void deleteRunData(ApplicationId id, JobType type) { curator.delete(runsPath(id, type)); curator.delete(lastRunPath(id, type)); } public void deleteRunData(ApplicationId id) { curator.delete(jobRoot.append(id.serializedForm())); } public List<ApplicationId> applicationsWithJobs() { return curator.getChildren(jobRoot).stream() .map(ApplicationId::fromSerializedForm) .collect(Collectors.toList()); } public Optional<byte[]> readLog(ApplicationId id, JobType type, long chunkId) { return curator.getData(logPath(id, type, chunkId)); } public void writeLog(ApplicationId id, JobType type, long chunkId, byte[] log) { curator.set(logPath(id, type, chunkId), log); } public void deleteLog(ApplicationId id, JobType type) { curator.delete(runsPath(id, type).append("logs")); } public Optional<Long> readLastLogEntryId(ApplicationId id, JobType type) { return curator.getData(lastLogPath(id, type)) .map(String::new).map(Long::parseLong); } public void writeLastLogEntryId(ApplicationId id, JobType type, long lastId) { curator.set(lastLogPath(id, type), Long.toString(lastId).getBytes()); } public LongStream getLogChunkIds(ApplicationId id, JobType type) { return curator.getChildren(runsPath(id, type).append("logs")).stream() .mapToLong(Long::parseLong) .sorted(); } public AuditLog readAuditLog() { return readSlime(auditLogPath()).map(auditLogSerializer::fromSlime) .orElse(AuditLog.empty); } public void writeAuditLog(AuditLog log) { curator.set(auditLogPath(), asJson(auditLogSerializer.toSlime(log))); } public NameServiceQueue readNameServiceQueue() { return readSlime(nameServiceQueuePath()).map(nameServiceQueueSerializer::fromSlime) .orElse(NameServiceQueue.EMPTY); } public void writeNameServiceQueue(NameServiceQueue queue) { curator.set(nameServiceQueuePath(), asJson(nameServiceQueueSerializer.toSlime(queue))); } @SuppressWarnings("unused") public Optional<byte[]> readProvisionState(String provisionId) { return curator.getData(provisionStatePath(provisionId)); } @SuppressWarnings("unused") public void writeProvisionState(String provisionId, byte[] data) { curator.set(provisionStatePath(provisionId), data); } @SuppressWarnings("unused") public List<String> readProvisionStateIds() { return curator.getChildren(provisionStatePath()); } public void writeRoutingPolicies(ApplicationId application, List<RoutingPolicy> policies) { for (var policy : policies) { if (!policy.id().owner().equals(application)) { throw new IllegalArgumentException(policy.id() + " does not belong to the application being written: " + application.toShortString()); } } curator.set(routingPolicyPath(application), asJson(routingPolicySerializer.toSlime(policies))); } public Map<ApplicationId, List<RoutingPolicy>> readRoutingPolicies() { return readRoutingPolicies((instance) -> true); } public Map<ApplicationId, List<RoutingPolicy>> readRoutingPolicies(Predicate<ApplicationId> filter) { return curator.getChildren(routingPoliciesRoot).stream() .map(ApplicationId::fromSerializedForm) .filter(filter) .collect(Collectors.toUnmodifiableMap(Function.identity(), this::readRoutingPolicies)); } public List<RoutingPolicy> readRoutingPolicies(ApplicationId application) { return readSlime(routingPolicyPath(application)).map(slime -> routingPolicySerializer.fromSlime(application, slime)) .orElseGet(List::of); } public void writeZoneRoutingPolicy(ZoneRoutingPolicy policy) { curator.set(zoneRoutingPolicyPath(policy.zone()), asJson(zoneRoutingPolicySerializer.toSlime(policy))); } public ZoneRoutingPolicy readZoneRoutingPolicy(ZoneId zone) { return readSlime(zoneRoutingPolicyPath(zone)).map(data -> zoneRoutingPolicySerializer.fromSlime(zone, data)) .orElseGet(() -> new ZoneRoutingPolicy(zone, RoutingStatus.DEFAULT)); } public void writeEndpointCertificateMetadata(ApplicationId applicationId, EndpointCertificateMetadata endpointCertificateMetadata) { curator.set(endpointCertificatePath(applicationId), asJson(EndpointCertificateMetadataSerializer.toSlime(endpointCertificateMetadata))); } public void deleteEndpointCertificateMetadata(ApplicationId applicationId) { curator.delete(endpointCertificatePath(applicationId)); } public Optional<EndpointCertificateMetadata> readEndpointCertificateMetadata(ApplicationId applicationId) { return curator.getData(endpointCertificatePath(applicationId)).map(String::new).map(EndpointCertificateMetadataSerializer::fromJsonString); } public Map<ApplicationId, EndpointCertificateMetadata> readAllEndpointCertificateMetadata() { Map<ApplicationId, EndpointCertificateMetadata> allEndpointCertificateMetadata = new HashMap<>(); for (String appIdString : curator.getChildren(endpointCertificateRoot)) { ApplicationId applicationId = ApplicationId.fromSerializedForm(appIdString); Optional<EndpointCertificateMetadata> endpointCertificateMetadata = readEndpointCertificateMetadata(applicationId); allEndpointCertificateMetadata.put(applicationId, endpointCertificateMetadata.orElseThrow()); } return allEndpointCertificateMetadata; } public void writeMeteringRefreshTime(long timestamp) { curator.set(meteringRefreshPath(), Long.toString(timestamp).getBytes()); } public long readMeteringRefreshTime() { return curator.getData(meteringRefreshPath()) .map(String::new).map(Long::parseLong) .orElse(0L); } public Set<ArchiveBucket> readArchiveBuckets(ZoneId zoneId) { return curator.getData(archiveBucketsPath(zoneId)).map(String::new).map(ArchiveBucketsSerializer::fromJsonString) .orElseGet(Set::of); } public void writeArchiveBuckets(ZoneId zoneid, Set<ArchiveBucket> archiveBuckets) { curator.set(archiveBucketsPath(zoneid), asJson(ArchiveBucketsSerializer.toSlime(archiveBuckets))); } public Optional<VespaChangeRequest> readChangeRequest(String changeRequestId) { return readSlime(changeRequestPath(changeRequestId)).map(ChangeRequestSerializer::fromSlime); } public List<VespaChangeRequest> readChangeRequests() { return curator.getChildren(changeRequestsRoot) .stream() .map(this::readChangeRequest) .flatMap(Optional::stream) .collect(Collectors.toList()); } public void writeChangeRequest(VespaChangeRequest changeRequest) { curator.set(changeRequestPath(changeRequest.getId()), asJson(ChangeRequestSerializer.toSlime(changeRequest))); } public void deleteChangeRequest(VespaChangeRequest changeRequest) { curator.delete(changeRequestPath(changeRequest.getId())); } public List<Notification> readNotifications(TenantName tenantName) { return readSlime(notificationsPath(tenantName)) .map(slime -> NotificationsSerializer.fromSlime(tenantName, slime)).orElseGet(List::of); } public List<TenantName> listTenantsWithNotifications() { return curator.getChildren(notificationsRoot).stream() .map(TenantName::from) .collect(Collectors.toUnmodifiableList()); } public void writeNotifications(TenantName tenantName, List<Notification> notifications) { curator.set(notificationsPath(tenantName), asJson(NotificationsSerializer.toSlime(notifications))); } public void deleteNotifications(TenantName tenantName) { curator.delete(notificationsPath(tenantName)); } public SupportAccess readSupportAccess(DeploymentId deploymentId) { return readSlime(supportAccessPath(deploymentId)).map(SupportAccessSerializer::fromSlime).orElse(SupportAccess.DISALLOWED_NO_HISTORY); } /** Take lock before reading before writing */ public void writeSupportAccess(DeploymentId deploymentId, SupportAccess supportAccess) { curator.set(supportAccessPath(deploymentId), asJson(SupportAccessSerializer.toSlime(supportAccess))); } public List<RetriggerEntry> readRetriggerEntries() { return readSlime(deploymentRetriggerPath()).map(RetriggerEntrySerializer::fromSlime).orElseGet(List::of); } public void writeRetriggerEntries(List<RetriggerEntry> retriggerEntries) { curator.set(deploymentRetriggerPath(), asJson(RetriggerEntrySerializer.toSlime(retriggerEntries))); } private Path lockPath(TenantName tenant) { return lockRoot .append(tenant.value()); } private Path legacyLockPath(TenantAndApplicationId application) { return lockPath(application.tenant()) .append(application.application().value()); } private Path legacyLockPath(ApplicationId instance) { return legacyLockPath(TenantAndApplicationId.from(instance)) .append(instance.instance().value()); } private Path legacyLockPath(ApplicationId instance, ZoneId zone) { return legacyLockPath(instance) .append(zone.environment().value()) .append(zone.region().value()); } private Path legacyLockPath(ApplicationId instance, JobType type) { return legacyLockPath(instance) .append(type.jobName()); } private Path legacyLockPath(ApplicationId instance, JobType type, Step step) { return legacyLockPath(instance, type) .append(step.name()); } private Path lockPath(TenantAndApplicationId application) { return lockRoot.append(application.tenant().value() + ":" + application.application().value()); } private Path lockPath(ApplicationId instance, JobType type) { return lockRoot.append(instance.serializedForm() + ":" + type.jobName()); } private Path lockPath(ApplicationId instance, JobType type, Step step) { return lockRoot.append(instance.serializedForm() + ":" + type.jobName() + ":" + step.name()); } private Path lockPath(String provisionId) { return lockRoot .append(provisionStatePath()) .append(provisionId); } private static Path upgradesPerMinutePath() { return root.append("upgrader").append("upgradesPerMinute"); } private static Path targetMajorVersionPath() { return root.append("upgrader").append("targetMajorVersion"); } private static Path confidenceOverridesPath() { return root.append("upgrader").append("confidenceOverrides"); } private static Path osVersionTargetsPath() { return root.append("osUpgrader").append("targetVersion"); } private static Path osVersionStatusPath() { return root.append("osVersionStatus"); } private static Path versionStatusPath() { return root.append("versionStatus"); } private static Path routingPolicyPath(ApplicationId application) { return routingPoliciesRoot.append(application.serializedForm()); } private static Path zoneRoutingPolicyPath(ZoneId zone) { return zoneRoutingPoliciesRoot.append(zone.value()); } private static Path nameServiceQueuePath() { return root.append("nameServiceQueue"); } private static Path auditLogPath() { return root.append("auditLog"); } private static Path provisionStatePath() { return root.append("provisioning").append("states"); } private static Path provisionStatePath(String provisionId) { return provisionStatePath().append(provisionId); } private static Path tenantPath(TenantName name) { return tenantRoot.append(name.value()); } private static Path applicationPath(TenantAndApplicationId id) { return applicationRoot.append(id.serialized()); } private static Path runsPath(ApplicationId id, JobType type) { return jobRoot.append(id.serializedForm()).append(type.jobName()); } private static Path lastRunPath(ApplicationId id, JobType type) { return runsPath(id, type).append("last"); } private static Path logPath(ApplicationId id, JobType type, long first) { return runsPath(id, type).append("logs").append(Long.toString(first)); } private static Path lastLogPath(ApplicationId id, JobType type) { return runsPath(id, type).append("logs"); } private static Path controllerPath(String hostname) { return controllerRoot.append(hostname); } private static Path endpointCertificatePath(ApplicationId id) { return endpointCertificateRoot.append(id.serializedForm()); } private static Path meteringRefreshPath() { return root.append("meteringRefreshTime"); } private static Path archiveBucketsPath(ZoneId zoneId) { return archiveBucketsRoot.append(zoneId.value()); } private static Path changeRequestPath(String id) { return changeRequestsRoot.append(id); } private static Path notificationsPath(TenantName tenantName) { return notificationsRoot.append(tenantName.value()); } private static Path supportAccessPath(DeploymentId deploymentId) { return supportAccessRoot.append(deploymentId.dottedString()); } private static Path deploymentRetriggerPath() { return root.append("deploymentRetriggerQueue"); } }
Huh, thought I deleted this ...
public NodeFailer createFailer() { metric.values = new HashMap<>(); return new NodeFailer(deployer, hostLivenessTracker, serviceMonitor, nodeRepository, downtimeLimitOneHour, clock, orchestrator, NodeFailer.ThrottlePolicy.hosted, metric, new JobControl(nodeRepository.database())); }
metric.values = new HashMap<>();
public NodeFailer createFailer() { return new NodeFailer(deployer, hostLivenessTracker, serviceMonitor, nodeRepository, downtimeLimitOneHour, clock, orchestrator, NodeFailer.ThrottlePolicy.hosted, metric, new JobControl(nodeRepository.database())); }
class NodeFailTester { public static final ApplicationId nodeAdminApp = ApplicationId.from(TenantName.from("hosted-vespa"), ApplicationName.from("routing"), InstanceName.from("default")); public static final ApplicationId app1 = ApplicationId.from(TenantName.from("foo1"), ApplicationName.from("bar"), InstanceName.from("fuz")); public static final ApplicationId app2 = ApplicationId.from(TenantName.from("foo2"), ApplicationName.from("bar"), InstanceName.from("fuz")); public static final NodeFlavors nodeFlavors = FlavorConfigBuilder.createDummies("default", "docker"); private static final Zone zone = new Zone(Environment.prod, RegionName.from("us-east")); private static final Duration downtimeLimitOneHour = Duration.ofMinutes(60); public final ManualClock clock; public final NodeRepository nodeRepository; public NodeFailer failer; public ServiceMonitorStub serviceMonitor; public MockDeployer deployer; public MetricsReporterTest.TestMetric metric; private final TestHostLivenessTracker hostLivenessTracker; private final Orchestrator orchestrator; private final NodeRepositoryProvisioner provisioner; private final Curator curator; private NodeFailTester() { clock = new ManualClock(); curator = new MockCurator(); nodeRepository = new NodeRepository(nodeFlavors, curator, clock, zone, new MockNameResolver().mockAnyLookup(), new DockerImage("docker-registry.domain.tld:8080/dist/vespa")); provisioner = new NodeRepositoryProvisioner(nodeRepository, nodeFlavors, zone); hostLivenessTracker = new TestHostLivenessTracker(clock); orchestrator = new OrchestratorMock(); } public static NodeFailTester withTwoApplications() { NodeFailTester tester = new NodeFailTester(); tester.createReadyNodes(16); tester.createHostNodes(3); ClusterSpec clusterApp1 = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("test"), Version.fromString("6.42")); ClusterSpec clusterApp2 = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("test"), Version.fromString("6.42")); int wantedNodesApp1 = 5; int wantedNodesApp2 = 7; tester.activate(app1, clusterApp1, wantedNodesApp1); tester.activate(app2, clusterApp2, wantedNodesApp2); assertEquals(wantedNodesApp1, tester.nodeRepository.getNodes(app1, Node.State.active).size()); assertEquals(wantedNodesApp2, tester.nodeRepository.getNodes(app2, Node.State.active).size()); Map<ApplicationId, MockDeployer.ApplicationContext> apps = new HashMap<>(); apps.put(app1, new MockDeployer.ApplicationContext(app1, clusterApp1, Capacity.fromNodeCount(wantedNodesApp1, Optional.of("default")), 1)); apps.put(app2, new MockDeployer.ApplicationContext(app2, clusterApp2, Capacity.fromNodeCount(wantedNodesApp2, Optional.of("default")), 1)); tester.deployer = new MockDeployer(tester.provisioner, apps); tester.serviceMonitor = new ServiceMonitorStub(apps, tester.nodeRepository); tester.metric = new MetricsReporterTest.TestMetric(); tester.failer = tester.createFailer(); return tester; } public static NodeFailTester withTwoApplicationsOnDocker(int numberOfHosts) { NodeFailTester tester = new NodeFailTester(); int nodesPerHost = 3; List<Node> hosts = tester.createHostNodes(numberOfHosts); for (int i = 0; i < hosts.size(); i++) { tester.createReadyNodes(nodesPerHost, i * nodesPerHost, Optional.of("parent" + i), nodeFlavors.getFlavorOrThrow("docker"), NodeType.tenant); } ClusterSpec clusterNodeAdminApp = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("node-admin"), Version.fromString("6.42")); ClusterSpec clusterApp1 = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("test"), Version.fromString("6.75.0")); ClusterSpec clusterApp2 = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("test"), Version.fromString("6.75.0")); Capacity allHosts = Capacity.fromRequiredNodeType(NodeType.host); Capacity capacity1 = Capacity.fromNodeCount(3, Optional.of("docker")); Capacity capacity2 = Capacity.fromNodeCount(5, Optional.of("docker")); tester.activate(nodeAdminApp, clusterNodeAdminApp, allHosts); tester.activate(app1, clusterApp1, capacity1); tester.activate(app2, clusterApp2, capacity2); assertEquals(new HashSet<>(tester.nodeRepository.getNodes(NodeType.host)), new HashSet<>(tester.nodeRepository.getNodes(nodeAdminApp, Node.State.active))); assertEquals(capacity1.nodeCount(), tester.nodeRepository.getNodes(app1, Node.State.active).size()); assertEquals(capacity2.nodeCount(), tester.nodeRepository.getNodes(app2, Node.State.active).size()); Map<ApplicationId, MockDeployer.ApplicationContext> apps = new HashMap<>(); apps.put(nodeAdminApp, new MockDeployer.ApplicationContext(nodeAdminApp, clusterNodeAdminApp, allHosts, 1)); apps.put(app1, new MockDeployer.ApplicationContext(app1, clusterApp1, capacity1, 1)); apps.put(app2, new MockDeployer.ApplicationContext(app2, clusterApp2, capacity2, 1)); tester.deployer = new MockDeployer(tester.provisioner, apps); tester.serviceMonitor = new ServiceMonitorStub(apps, tester.nodeRepository); tester.metric = new MetricsReporterTest.TestMetric(); tester.failer = tester.createFailer(); return tester; } public static NodeFailTester withProxyApplication() { NodeFailTester tester = new NodeFailTester(); tester.createReadyNodes(16, NodeType.proxy); Capacity allProxies = Capacity.fromRequiredNodeType(NodeType.proxy); ClusterSpec clusterApp1 = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("test"), Version.fromString("6.42")); tester.activate(app1, clusterApp1, allProxies); assertEquals(16, tester.nodeRepository.getNodes(NodeType.proxy, Node.State.active).size()); Map<ApplicationId, MockDeployer.ApplicationContext> apps = new HashMap<>(); apps.put(app1, new MockDeployer.ApplicationContext(app1, clusterApp1, allProxies, 1)); tester.deployer = new MockDeployer(tester.provisioner, apps); tester.serviceMonitor = new ServiceMonitorStub(apps, tester.nodeRepository); tester.metric = new MetricsReporterTest.TestMetric(); tester.failer = tester.createFailer(); return tester; } public static NodeFailTester withNoApplications() { NodeFailTester tester = new NodeFailTester(); tester.deployer = new MockDeployer(tester.provisioner, Collections.emptyMap()); tester.serviceMonitor = new ServiceMonitorStub(Collections.emptyMap(), tester.nodeRepository); tester.metric = new MetricsReporterTest.TestMetric(); tester.failer = tester.createFailer(); return tester; } public void suspend(ApplicationId app) { try { orchestrator.suspend(app); } catch (Exception e) { throw new RuntimeException(e); } } public void allNodesMakeAConfigRequestExcept(Node ... deadNodeArray) { allNodesMakeAConfigRequestExcept(Arrays.asList(deadNodeArray)); } public void allNodesMakeAConfigRequestExcept(List<Node> deadNodes) { for (Node node : nodeRepository.getNodes()) { if ( ! deadNodes.contains(node)) hostLivenessTracker.receivedRequestFrom(node.hostname()); } } public List<Node> createReadyNodes(int count) { return createReadyNodes(count, 0); } public List<Node> createReadyNodes(int count, NodeType nodeType) { return createReadyNodes(count, 0, Optional.empty(), nodeFlavors.getFlavorOrThrow("default"), nodeType); } public List<Node> createReadyNodes(int count, int startIndex) { return createReadyNodes(count, startIndex, "default"); } public List<Node> createReadyNodes(int count, int startIndex, String flavor) { return createReadyNodes(count, startIndex, Optional.empty(), nodeFlavors.getFlavorOrThrow(flavor), NodeType.tenant); } private List<Node> createReadyNodes(int count, int startIndex, Optional<String> parentHostname, Flavor flavor, NodeType nodeType) { List<Node> nodes = new ArrayList<>(count); for (int i = startIndex; i < startIndex + count; i++) nodes.add(nodeRepository.createNode("node" + i, "host" + i, parentHostname, flavor, nodeType)); nodes = nodeRepository.addNodes(nodes); nodes = nodeRepository.setDirty(nodes); return nodeRepository.setReady(nodes); } private List<Node> createHostNodes(int count) { List<Node> nodes = new ArrayList<>(count); for (int i = 0; i < count; i++) nodes.add(nodeRepository.createNode("parent" + i, "parent" + i, Optional.empty(), nodeFlavors.getFlavorOrThrow("default"), NodeType.host)); nodes = nodeRepository.addNodes(nodes); nodes = nodeRepository.setDirty(nodes); return nodeRepository.setReady(nodes); } private void activate(ApplicationId applicationId, ClusterSpec cluster, int nodeCount) { activate(applicationId, cluster, Capacity.fromNodeCount(nodeCount)); } private void activate(ApplicationId applicationId, ClusterSpec cluster, Capacity capacity) { List<HostSpec> hosts = provisioner.prepare(applicationId, cluster, capacity, 1, null); NestedTransaction transaction = new NestedTransaction().add(new CuratorTransaction(curator)); provisioner.activate(transaction, applicationId, hosts); transaction.commit(); } /** Returns the node with the highest membership index from the given set of allocated nodes */ public Node highestIndex(List<Node> nodes) { Node highestIndex = null; for (Node node : nodes) { if (highestIndex == null || node.allocation().get().membership().index() > highestIndex.allocation().get().membership().index()) highestIndex = node; } return highestIndex; } }
class NodeFailTester { public static final ApplicationId nodeAdminApp = ApplicationId.from(TenantName.from("hosted-vespa"), ApplicationName.from("routing"), InstanceName.from("default")); public static final ApplicationId app1 = ApplicationId.from(TenantName.from("foo1"), ApplicationName.from("bar"), InstanceName.from("fuz")); public static final ApplicationId app2 = ApplicationId.from(TenantName.from("foo2"), ApplicationName.from("bar"), InstanceName.from("fuz")); public static final NodeFlavors nodeFlavors = FlavorConfigBuilder.createDummies("default", "docker"); private static final Zone zone = new Zone(Environment.prod, RegionName.from("us-east")); private static final Duration downtimeLimitOneHour = Duration.ofMinutes(60); public final ManualClock clock; public final NodeRepository nodeRepository; public NodeFailer failer; public ServiceMonitorStub serviceMonitor; public MockDeployer deployer; public MetricsReporterTest.TestMetric metric; private final TestHostLivenessTracker hostLivenessTracker; private final Orchestrator orchestrator; private final NodeRepositoryProvisioner provisioner; private final Curator curator; private NodeFailTester() { clock = new ManualClock(); curator = new MockCurator(); nodeRepository = new NodeRepository(nodeFlavors, curator, clock, zone, new MockNameResolver().mockAnyLookup(), new DockerImage("docker-registry.domain.tld:8080/dist/vespa")); provisioner = new NodeRepositoryProvisioner(nodeRepository, nodeFlavors, zone); hostLivenessTracker = new TestHostLivenessTracker(clock); orchestrator = new OrchestratorMock(); } public static NodeFailTester withTwoApplications() { NodeFailTester tester = new NodeFailTester(); tester.createReadyNodes(16); tester.createHostNodes(3); ClusterSpec clusterApp1 = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("test"), Version.fromString("6.42")); ClusterSpec clusterApp2 = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("test"), Version.fromString("6.42")); int wantedNodesApp1 = 5; int wantedNodesApp2 = 7; tester.activate(app1, clusterApp1, wantedNodesApp1); tester.activate(app2, clusterApp2, wantedNodesApp2); assertEquals(wantedNodesApp1, tester.nodeRepository.getNodes(app1, Node.State.active).size()); assertEquals(wantedNodesApp2, tester.nodeRepository.getNodes(app2, Node.State.active).size()); Map<ApplicationId, MockDeployer.ApplicationContext> apps = new HashMap<>(); apps.put(app1, new MockDeployer.ApplicationContext(app1, clusterApp1, Capacity.fromNodeCount(wantedNodesApp1, Optional.of("default")), 1)); apps.put(app2, new MockDeployer.ApplicationContext(app2, clusterApp2, Capacity.fromNodeCount(wantedNodesApp2, Optional.of("default")), 1)); tester.deployer = new MockDeployer(tester.provisioner, apps); tester.serviceMonitor = new ServiceMonitorStub(apps, tester.nodeRepository); tester.metric = new MetricsReporterTest.TestMetric(); tester.failer = tester.createFailer(); return tester; } public static NodeFailTester withTwoApplicationsOnDocker(int numberOfHosts) { NodeFailTester tester = new NodeFailTester(); int nodesPerHost = 3; List<Node> hosts = tester.createHostNodes(numberOfHosts); for (int i = 0; i < hosts.size(); i++) { tester.createReadyNodes(nodesPerHost, i * nodesPerHost, Optional.of("parent" + i), nodeFlavors.getFlavorOrThrow("docker"), NodeType.tenant); } ClusterSpec clusterNodeAdminApp = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("node-admin"), Version.fromString("6.42")); ClusterSpec clusterApp1 = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("test"), Version.fromString("6.75.0")); ClusterSpec clusterApp2 = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("test"), Version.fromString("6.75.0")); Capacity allHosts = Capacity.fromRequiredNodeType(NodeType.host); Capacity capacity1 = Capacity.fromNodeCount(3, Optional.of("docker")); Capacity capacity2 = Capacity.fromNodeCount(5, Optional.of("docker")); tester.activate(nodeAdminApp, clusterNodeAdminApp, allHosts); tester.activate(app1, clusterApp1, capacity1); tester.activate(app2, clusterApp2, capacity2); assertEquals(new HashSet<>(tester.nodeRepository.getNodes(NodeType.host)), new HashSet<>(tester.nodeRepository.getNodes(nodeAdminApp, Node.State.active))); assertEquals(capacity1.nodeCount(), tester.nodeRepository.getNodes(app1, Node.State.active).size()); assertEquals(capacity2.nodeCount(), tester.nodeRepository.getNodes(app2, Node.State.active).size()); Map<ApplicationId, MockDeployer.ApplicationContext> apps = new HashMap<>(); apps.put(nodeAdminApp, new MockDeployer.ApplicationContext(nodeAdminApp, clusterNodeAdminApp, allHosts, 1)); apps.put(app1, new MockDeployer.ApplicationContext(app1, clusterApp1, capacity1, 1)); apps.put(app2, new MockDeployer.ApplicationContext(app2, clusterApp2, capacity2, 1)); tester.deployer = new MockDeployer(tester.provisioner, apps); tester.serviceMonitor = new ServiceMonitorStub(apps, tester.nodeRepository); tester.metric = new MetricsReporterTest.TestMetric(); tester.failer = tester.createFailer(); return tester; } public static NodeFailTester withProxyApplication() { NodeFailTester tester = new NodeFailTester(); tester.createReadyNodes(16, NodeType.proxy); Capacity allProxies = Capacity.fromRequiredNodeType(NodeType.proxy); ClusterSpec clusterApp1 = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("test"), Version.fromString("6.42")); tester.activate(app1, clusterApp1, allProxies); assertEquals(16, tester.nodeRepository.getNodes(NodeType.proxy, Node.State.active).size()); Map<ApplicationId, MockDeployer.ApplicationContext> apps = new HashMap<>(); apps.put(app1, new MockDeployer.ApplicationContext(app1, clusterApp1, allProxies, 1)); tester.deployer = new MockDeployer(tester.provisioner, apps); tester.serviceMonitor = new ServiceMonitorStub(apps, tester.nodeRepository); tester.metric = new MetricsReporterTest.TestMetric(); tester.failer = tester.createFailer(); return tester; } public static NodeFailTester withNoApplications() { NodeFailTester tester = new NodeFailTester(); tester.deployer = new MockDeployer(tester.provisioner, Collections.emptyMap()); tester.serviceMonitor = new ServiceMonitorStub(Collections.emptyMap(), tester.nodeRepository); tester.metric = new MetricsReporterTest.TestMetric(); tester.failer = tester.createFailer(); return tester; } public void suspend(ApplicationId app) { try { orchestrator.suspend(app); } catch (Exception e) { throw new RuntimeException(e); } } public void allNodesMakeAConfigRequestExcept(Node ... deadNodeArray) { allNodesMakeAConfigRequestExcept(Arrays.asList(deadNodeArray)); } public void allNodesMakeAConfigRequestExcept(List<Node> deadNodes) { for (Node node : nodeRepository.getNodes()) { if ( ! deadNodes.contains(node)) hostLivenessTracker.receivedRequestFrom(node.hostname()); } } public List<Node> createReadyNodes(int count) { return createReadyNodes(count, 0); } public List<Node> createReadyNodes(int count, NodeType nodeType) { return createReadyNodes(count, 0, Optional.empty(), nodeFlavors.getFlavorOrThrow("default"), nodeType); } public List<Node> createReadyNodes(int count, int startIndex) { return createReadyNodes(count, startIndex, "default"); } public List<Node> createReadyNodes(int count, int startIndex, String flavor) { return createReadyNodes(count, startIndex, Optional.empty(), nodeFlavors.getFlavorOrThrow(flavor), NodeType.tenant); } private List<Node> createReadyNodes(int count, int startIndex, Optional<String> parentHostname, Flavor flavor, NodeType nodeType) { List<Node> nodes = new ArrayList<>(count); for (int i = startIndex; i < startIndex + count; i++) nodes.add(nodeRepository.createNode("node" + i, "host" + i, parentHostname, flavor, nodeType)); nodes = nodeRepository.addNodes(nodes); nodes = nodeRepository.setDirty(nodes); return nodeRepository.setReady(nodes); } private List<Node> createHostNodes(int count) { List<Node> nodes = new ArrayList<>(count); for (int i = 0; i < count; i++) nodes.add(nodeRepository.createNode("parent" + i, "parent" + i, Optional.empty(), nodeFlavors.getFlavorOrThrow("default"), NodeType.host)); nodes = nodeRepository.addNodes(nodes); nodes = nodeRepository.setDirty(nodes); return nodeRepository.setReady(nodes); } private void activate(ApplicationId applicationId, ClusterSpec cluster, int nodeCount) { activate(applicationId, cluster, Capacity.fromNodeCount(nodeCount)); } private void activate(ApplicationId applicationId, ClusterSpec cluster, Capacity capacity) { List<HostSpec> hosts = provisioner.prepare(applicationId, cluster, capacity, 1, null); NestedTransaction transaction = new NestedTransaction().add(new CuratorTransaction(curator)); provisioner.activate(transaction, applicationId, hosts); transaction.commit(); } /** Returns the node with the highest membership index from the given set of allocated nodes */ public Node highestIndex(List<Node> nodes) { Node highestIndex = null; for (Node node : nodes) { if (highestIndex == null || node.allocation().get().membership().index() > highestIndex.allocation().get().membership().index()) highestIndex = node; } return highestIndex; } }
Dont you need ":port" as well when configserverConfig.zookeeperLocalhostAffinity() is true?
private Curator(ConfigserverConfig configserverConfig, String zooKeeperEnsembleConnectionSpec) { this(configserverConfig.zookeeperLocalhostAffinity() ? HostName.getLocalhost() : zooKeeperEnsembleConnectionSpec, zooKeeperEnsembleConnectionSpec); }
HostName.getLocalhost() : zooKeeperEnsembleConnectionSpec,
private Curator(ConfigserverConfig configserverConfig, String zooKeeperEnsembleConnectionSpec) { this(configserverConfig.zookeeperLocalhostAffinity() ? createConnectionSpecForLocalhost(configserverConfig) : zooKeeperEnsembleConnectionSpec, zooKeeperEnsembleConnectionSpec); }
class Curator implements AutoCloseable { private static final long UNKNOWN_HOST_TIMEOUT_MILLIS = TimeUnit.MINUTES.toMillis(30); private static final int ZK_SESSION_TIMEOUT = 30000; private static final int ZK_CONNECTION_TIMEOUT = 30000; private static final int BASE_SLEEP_TIME = 1000; private static final int MAX_RETRIES = 10; protected final RetryPolicy retryPolicy; private final CuratorFramework curatorFramework; private final String connectionSpec; private final String zooKeeperEnsembleConnectionSpec; private final int zooKeeperEnsembleCount; /** Creates a curator instance from a comma-separated string of ZooKeeper host:port strings */ public static Curator create(String connectionSpec) { return new Curator(connectionSpec, connectionSpec); } @Inject public Curator(ConfigserverConfig configserverConfig, ZooKeeperServer server) { this(configserverConfig, createConnectionSpec(configserverConfig)); } private Curator(String connectionSpec, String zooKeeperEnsembleConnectionSpec) { this(connectionSpec, zooKeeperEnsembleConnectionSpec, (retryPolicy) -> CuratorFrameworkFactory .builder() .retryPolicy(retryPolicy) .sessionTimeoutMs(ZK_SESSION_TIMEOUT) .connectionTimeoutMs(ZK_CONNECTION_TIMEOUT) .connectString(connectionSpec) .zookeeperFactory(new DNSResolvingFixerZooKeeperFactory(UNKNOWN_HOST_TIMEOUT_MILLIS)) .build()); } protected Curator(String connectionSpec, String zooKeeperEnsembleConnectionSpec, Function<RetryPolicy, CuratorFramework> curatorFactory) { this(connectionSpec, zooKeeperEnsembleConnectionSpec, curatorFactory, new ExponentialBackoffRetry(BASE_SLEEP_TIME, MAX_RETRIES)); } private Curator(String connectionSpec, String zooKeeperEnsembleConnectionSpec, Function<RetryPolicy, CuratorFramework> curatorFactory, RetryPolicy retryPolicy) { this.connectionSpec = connectionSpec; this.retryPolicy = retryPolicy; this.curatorFramework = curatorFactory.apply(retryPolicy); if (this.curatorFramework != null) { validateConnectionSpec(connectionSpec); validateConnectionSpec(zooKeeperEnsembleConnectionSpec); addFakeListener(); curatorFramework.start(); } this.zooKeeperEnsembleConnectionSpec = zooKeeperEnsembleConnectionSpec; this.zooKeeperEnsembleCount = zooKeeperEnsembleConnectionSpec.split(",").length; } static String createConnectionSpec(ConfigserverConfig config) { StringBuilder connectionSpec = new StringBuilder(); for (int i = 0; i < config.zookeeperserver().size(); i++) { if (connectionSpec.length() > 0) { connectionSpec.append(','); } ConfigserverConfig.Zookeeperserver server = config.zookeeperserver(i); connectionSpec.append(server.hostname()); connectionSpec.append(':'); connectionSpec.append(server.port()); } return connectionSpec.toString(); } private static void validateConnectionSpec(String connectionSpec) { if (connectionSpec == null || connectionSpec.isEmpty()) throw new IllegalArgumentException(String.format("Connections spec '%s' is not valid", connectionSpec)); } /** * Returns the ZooKeeper "connect string" used by curator: a comma-separated list of * host:port of ZooKeeper endpoints to connect to. This may be a subset of * zooKeeperEnsembleConnectionSpec() if there's some affinity, e.g. for * performance reasons. * * This may be empty but never null */ public String connectionSpec() { return connectionSpec; } /** For internal use; prefer creating a {@link CuratorCounter} */ public DistributedAtomicLong createAtomicCounter(String path) { return new DistributedAtomicLong(curatorFramework, path, new ExponentialBackoffRetry(BASE_SLEEP_TIME, MAX_RETRIES)); } /** For internal use; prefer creating a {@link com.yahoo.vespa.curator.recipes.CuratorLock} */ public InterProcessLock createMutex(String lockPath) { return new InterProcessMutex(curatorFramework, lockPath); } private void addFakeListener() { curatorFramework.getConnectionStateListenable().addListener(new ConnectionStateListener() { @Override public void stateChanged(CuratorFramework curatorFramework, ConnectionState connectionState) { } }); } public CompletionWaiter getCompletionWaiter(Path waiterPath, int numMembers, String id) { return CuratorCompletionWaiter.create(curatorFramework, waiterPath, numMembers, id); } public CompletionWaiter createCompletionWaiter(Path parentPath, String waiterNode, int numMembers, String id) { return CuratorCompletionWaiter.createAndInitialize(this, parentPath, waiterNode, numMembers, id); } /** Creates a listenable cache which keeps in sync with changes to all the immediate children of a path */ public DirectoryCache createDirectoryCache(String path, boolean cacheData, boolean dataIsCompressed, ExecutorService executorService) { return new PathChildrenCacheWrapper(framework(), path, cacheData, dataIsCompressed, executorService); } /** Creates a listenable cache which keeps in sync with changes to a given node */ public FileCache createFileCache(String path, boolean dataIsCompressed) { return new NodeCacheWrapper(framework(), path, dataIsCompressed); } /** A convenience method which returns whether the given path exists */ public boolean exists(Path path) { try { return framework().checkExists().forPath(path.getAbsolute()) != null; } catch (Exception e) { throw new RuntimeException("Could not check existence of " + path.getAbsolute(), e); } } /** * A convenience method which sets some content at a path. * If the path and any of its parents does not exists they are created. */ public void set(Path path, byte[] data) { String absolutePath = path.getAbsolute(); try { if ( ! exists(path)) framework().create().creatingParentsIfNeeded().forPath(absolutePath, data); else framework().setData().forPath(absolutePath, data); } catch (Exception e) { throw new RuntimeException("Could not set data at " + absolutePath, e); } } /** * Creates an empty node at a path, creating any parents as necessary. * If the node already exists nothing is done. */ public void create(Path path) { if (exists(path)) return; String absolutePath = path.getAbsolute(); try { framework().create().creatingParentsIfNeeded().forPath(absolutePath, new byte[0]); } catch (org.apache.zookeeper.KeeperException.NodeExistsException e) { } catch (Exception e) { throw new RuntimeException("Could not create " + absolutePath, e); } } /** * Creates all the given paths in a single transaction. Any paths which already exists are ignored. */ public void createAtomically(Path... paths) { try { CuratorTransaction transaction = framework().inTransaction(); for (Path path : paths) { if ( ! exists(path)) { transaction = transaction.create().forPath(path.getAbsolute(), new byte[0]).and(); } } ((CuratorTransactionFinal)transaction).commit(); } catch (Exception e) { throw new RuntimeException("Could not create " + Arrays.toString(paths), e); } } /** * Deletes the given path and any children it may have. * If the path does not exists nothing is done. */ public void delete(Path path) { if ( ! exists(path)) return; try { framework().delete().guaranteed().deletingChildrenIfNeeded().forPath(path.getAbsolute()); } catch (Exception e) { throw new RuntimeException("Could not delete " + path.getAbsolute(), e); } } /** * Returns the names of the children at the given path. * If the path does not exist or have no children an empty list (never null) is returned. */ public List<String> getChildren(Path path) { if ( ! exists(path)) return Collections.emptyList(); try { return framework().getChildren().forPath(path.getAbsolute()); } catch (Exception e) { throw new RuntimeException("Could not get children of " + path.getAbsolute(), e); } } /** * Returns the data at the given path, which may be a zero-length buffer if the node exists but have no data. * Empty is returned if the path does not exist. */ public Optional<byte[]> getData(Path path) { if ( ! exists(path)) return Optional.empty(); try { return Optional.of(framework().getData().forPath(path.getAbsolute())); } catch (Exception e) { throw new RuntimeException("Could not get data at " + path.getAbsolute(), e); } } /** Returns the curator framework API */ public CuratorFramework framework() { return curatorFramework; } @Override public void close() { curatorFramework.close(); } /** * Interface for waiting for completion of an operation */ public interface CompletionWaiter { /** * Awaits completion of something. Blocks until an implementation defined * condition has been met. * * @param timeout timeout for blocking await call. * @throws CompletionTimeoutException if timeout is reached without completion. */ void awaitCompletion(Duration timeout); /** * Notify completion of something. This method does not block and is called by clients * that want to notify the completion waiter that something has completed. */ void notifyCompletion(); } /** * A listenable cache of all the immediate children of a curator path. * This wraps the Curator PathChildrenCache recipe to allow us to mock it. */ public interface DirectoryCache { void start(); void addListener(PathChildrenCacheListener listener); List<ChildData> getCurrentData(); void close(); } /** * A listenable cache of the content of a single curator path. * This wraps the Curator NodeCache recipe to allow us to mock it. */ public interface FileCache { void start(); void addListener(NodeCacheListener listener); ChildData getCurrentData(); void close(); } /** * @return The non-null connect string containing all ZooKeeper servers in the ensemble. * WARNING: This may be different from the servers this Curator may connect to. * TODO: Move method out of this class. */ public String zooKeeperEnsembleConnectionSpec() { return zooKeeperEnsembleConnectionSpec; } /** * Returns the number of zooKeeper servers in this ensemble. * WARNING: This may be different from the number of servers this Curator may connect to. * TODO: Move method out of this class. */ public int zooKeeperEnsembleCount() { return zooKeeperEnsembleCount; } }
class Curator implements AutoCloseable { private static final long UNKNOWN_HOST_TIMEOUT_MILLIS = TimeUnit.MINUTES.toMillis(30); private static final int ZK_SESSION_TIMEOUT = 30000; private static final int ZK_CONNECTION_TIMEOUT = 30000; private static final int BASE_SLEEP_TIME = 1000; private static final int MAX_RETRIES = 10; protected final RetryPolicy retryPolicy; private final CuratorFramework curatorFramework; private final String connectionSpec; private final String zooKeeperEnsembleConnectionSpec; private final int zooKeeperEnsembleCount; /** Creates a curator instance from a comma-separated string of ZooKeeper host:port strings */ public static Curator create(String connectionSpec) { return new Curator(connectionSpec, connectionSpec); } @Inject public Curator(ConfigserverConfig configserverConfig, ZooKeeperServer server) { this(configserverConfig, createConnectionSpec(configserverConfig)); } private Curator(String connectionSpec, String zooKeeperEnsembleConnectionSpec) { this(connectionSpec, zooKeeperEnsembleConnectionSpec, (retryPolicy) -> CuratorFrameworkFactory .builder() .retryPolicy(retryPolicy) .sessionTimeoutMs(ZK_SESSION_TIMEOUT) .connectionTimeoutMs(ZK_CONNECTION_TIMEOUT) .connectString(connectionSpec) .zookeeperFactory(new DNSResolvingFixerZooKeeperFactory(UNKNOWN_HOST_TIMEOUT_MILLIS)) .build()); } protected Curator(String connectionSpec, String zooKeeperEnsembleConnectionSpec, Function<RetryPolicy, CuratorFramework> curatorFactory) { this(connectionSpec, zooKeeperEnsembleConnectionSpec, curatorFactory, new ExponentialBackoffRetry(BASE_SLEEP_TIME, MAX_RETRIES)); } private Curator(String connectionSpec, String zooKeeperEnsembleConnectionSpec, Function<RetryPolicy, CuratorFramework> curatorFactory, RetryPolicy retryPolicy) { this.connectionSpec = connectionSpec; this.retryPolicy = retryPolicy; this.curatorFramework = curatorFactory.apply(retryPolicy); if (this.curatorFramework != null) { validateConnectionSpec(connectionSpec); validateConnectionSpec(zooKeeperEnsembleConnectionSpec); addFakeListener(); curatorFramework.start(); } this.zooKeeperEnsembleConnectionSpec = zooKeeperEnsembleConnectionSpec; this.zooKeeperEnsembleCount = zooKeeperEnsembleConnectionSpec.split(",").length; } static String createConnectionSpec(ConfigserverConfig config) { StringBuilder connectionSpec = new StringBuilder(); for (int i = 0; i < config.zookeeperserver().size(); i++) { if (connectionSpec.length() > 0) { connectionSpec.append(','); } ConfigserverConfig.Zookeeperserver server = config.zookeeperserver(i); connectionSpec.append(server.hostname()); connectionSpec.append(':'); connectionSpec.append(server.port()); } return connectionSpec.toString(); } static String createConnectionSpecForLocalhost(ConfigserverConfig config) { String thisServer = HostName.getLocalhost(); for (int i = 0; i < config.zookeeperserver().size(); i++) { ConfigserverConfig.Zookeeperserver server = config.zookeeperserver(i); if (thisServer.equals(server.hostname())) { return String.format("%s:%d", server.hostname(), server.port()); } } throw new IllegalArgumentException("Unable to create connect string to localhost: " + "There is no localhost servers specified in config: " + config); } private static void validateConnectionSpec(String connectionSpec) { if (connectionSpec == null || connectionSpec.isEmpty()) throw new IllegalArgumentException(String.format("Connections spec '%s' is not valid", connectionSpec)); } /** * Returns the ZooKeeper "connect string" used by curator: a comma-separated list of * host:port of ZooKeeper endpoints to connect to. This may be a subset of * zooKeeperEnsembleConnectionSpec() if there's some affinity, e.g. for * performance reasons. * * This may be empty but never null */ public String connectionSpec() { return connectionSpec; } /** For internal use; prefer creating a {@link CuratorCounter} */ public DistributedAtomicLong createAtomicCounter(String path) { return new DistributedAtomicLong(curatorFramework, path, new ExponentialBackoffRetry(BASE_SLEEP_TIME, MAX_RETRIES)); } /** For internal use; prefer creating a {@link com.yahoo.vespa.curator.recipes.CuratorLock} */ public InterProcessLock createMutex(String lockPath) { return new InterProcessMutex(curatorFramework, lockPath); } private void addFakeListener() { curatorFramework.getConnectionStateListenable().addListener(new ConnectionStateListener() { @Override public void stateChanged(CuratorFramework curatorFramework, ConnectionState connectionState) { } }); } public CompletionWaiter getCompletionWaiter(Path waiterPath, int numMembers, String id) { return CuratorCompletionWaiter.create(curatorFramework, waiterPath, numMembers, id); } public CompletionWaiter createCompletionWaiter(Path parentPath, String waiterNode, int numMembers, String id) { return CuratorCompletionWaiter.createAndInitialize(this, parentPath, waiterNode, numMembers, id); } /** Creates a listenable cache which keeps in sync with changes to all the immediate children of a path */ public DirectoryCache createDirectoryCache(String path, boolean cacheData, boolean dataIsCompressed, ExecutorService executorService) { return new PathChildrenCacheWrapper(framework(), path, cacheData, dataIsCompressed, executorService); } /** Creates a listenable cache which keeps in sync with changes to a given node */ public FileCache createFileCache(String path, boolean dataIsCompressed) { return new NodeCacheWrapper(framework(), path, dataIsCompressed); } /** A convenience method which returns whether the given path exists */ public boolean exists(Path path) { try { return framework().checkExists().forPath(path.getAbsolute()) != null; } catch (Exception e) { throw new RuntimeException("Could not check existence of " + path.getAbsolute(), e); } } /** * A convenience method which sets some content at a path. * If the path and any of its parents does not exists they are created. */ public void set(Path path, byte[] data) { String absolutePath = path.getAbsolute(); try { if ( ! exists(path)) framework().create().creatingParentsIfNeeded().forPath(absolutePath, data); else framework().setData().forPath(absolutePath, data); } catch (Exception e) { throw new RuntimeException("Could not set data at " + absolutePath, e); } } /** * Creates an empty node at a path, creating any parents as necessary. * If the node already exists nothing is done. */ public void create(Path path) { if (exists(path)) return; String absolutePath = path.getAbsolute(); try { framework().create().creatingParentsIfNeeded().forPath(absolutePath, new byte[0]); } catch (org.apache.zookeeper.KeeperException.NodeExistsException e) { } catch (Exception e) { throw new RuntimeException("Could not create " + absolutePath, e); } } /** * Creates all the given paths in a single transaction. Any paths which already exists are ignored. */ public void createAtomically(Path... paths) { try { CuratorTransaction transaction = framework().inTransaction(); for (Path path : paths) { if ( ! exists(path)) { transaction = transaction.create().forPath(path.getAbsolute(), new byte[0]).and(); } } ((CuratorTransactionFinal)transaction).commit(); } catch (Exception e) { throw new RuntimeException("Could not create " + Arrays.toString(paths), e); } } /** * Deletes the given path and any children it may have. * If the path does not exists nothing is done. */ public void delete(Path path) { if ( ! exists(path)) return; try { framework().delete().guaranteed().deletingChildrenIfNeeded().forPath(path.getAbsolute()); } catch (Exception e) { throw new RuntimeException("Could not delete " + path.getAbsolute(), e); } } /** * Returns the names of the children at the given path. * If the path does not exist or have no children an empty list (never null) is returned. */ public List<String> getChildren(Path path) { if ( ! exists(path)) return Collections.emptyList(); try { return framework().getChildren().forPath(path.getAbsolute()); } catch (Exception e) { throw new RuntimeException("Could not get children of " + path.getAbsolute(), e); } } /** * Returns the data at the given path, which may be a zero-length buffer if the node exists but have no data. * Empty is returned if the path does not exist. */ public Optional<byte[]> getData(Path path) { if ( ! exists(path)) return Optional.empty(); try { return Optional.of(framework().getData().forPath(path.getAbsolute())); } catch (Exception e) { throw new RuntimeException("Could not get data at " + path.getAbsolute(), e); } } /** Returns the curator framework API */ public CuratorFramework framework() { return curatorFramework; } @Override public void close() { curatorFramework.close(); } /** * Interface for waiting for completion of an operation */ public interface CompletionWaiter { /** * Awaits completion of something. Blocks until an implementation defined * condition has been met. * * @param timeout timeout for blocking await call. * @throws CompletionTimeoutException if timeout is reached without completion. */ void awaitCompletion(Duration timeout); /** * Notify completion of something. This method does not block and is called by clients * that want to notify the completion waiter that something has completed. */ void notifyCompletion(); } /** * A listenable cache of all the immediate children of a curator path. * This wraps the Curator PathChildrenCache recipe to allow us to mock it. */ public interface DirectoryCache { void start(); void addListener(PathChildrenCacheListener listener); List<ChildData> getCurrentData(); void close(); } /** * A listenable cache of the content of a single curator path. * This wraps the Curator NodeCache recipe to allow us to mock it. */ public interface FileCache { void start(); void addListener(NodeCacheListener listener); ChildData getCurrentData(); void close(); } /** * @return The non-null connect string containing all ZooKeeper servers in the ensemble. * WARNING: This may be different from the servers this Curator may connect to. * TODO: Move method out of this class. */ public String zooKeeperEnsembleConnectionSpec() { return zooKeeperEnsembleConnectionSpec; } /** * Returns the number of zooKeeper servers in this ensemble. * WARNING: This may be different from the number of servers this Curator may connect to. * TODO: Move method out of this class. */ public int zooKeeperEnsembleCount() { return zooKeeperEnsembleCount; } }
Yes! Good catch, fix coming up.
private Curator(ConfigserverConfig configserverConfig, String zooKeeperEnsembleConnectionSpec) { this(configserverConfig.zookeeperLocalhostAffinity() ? HostName.getLocalhost() : zooKeeperEnsembleConnectionSpec, zooKeeperEnsembleConnectionSpec); }
HostName.getLocalhost() : zooKeeperEnsembleConnectionSpec,
private Curator(ConfigserverConfig configserverConfig, String zooKeeperEnsembleConnectionSpec) { this(configserverConfig.zookeeperLocalhostAffinity() ? createConnectionSpecForLocalhost(configserverConfig) : zooKeeperEnsembleConnectionSpec, zooKeeperEnsembleConnectionSpec); }
class Curator implements AutoCloseable { private static final long UNKNOWN_HOST_TIMEOUT_MILLIS = TimeUnit.MINUTES.toMillis(30); private static final int ZK_SESSION_TIMEOUT = 30000; private static final int ZK_CONNECTION_TIMEOUT = 30000; private static final int BASE_SLEEP_TIME = 1000; private static final int MAX_RETRIES = 10; protected final RetryPolicy retryPolicy; private final CuratorFramework curatorFramework; private final String connectionSpec; private final String zooKeeperEnsembleConnectionSpec; private final int zooKeeperEnsembleCount; /** Creates a curator instance from a comma-separated string of ZooKeeper host:port strings */ public static Curator create(String connectionSpec) { return new Curator(connectionSpec, connectionSpec); } @Inject public Curator(ConfigserverConfig configserverConfig, ZooKeeperServer server) { this(configserverConfig, createConnectionSpec(configserverConfig)); } private Curator(String connectionSpec, String zooKeeperEnsembleConnectionSpec) { this(connectionSpec, zooKeeperEnsembleConnectionSpec, (retryPolicy) -> CuratorFrameworkFactory .builder() .retryPolicy(retryPolicy) .sessionTimeoutMs(ZK_SESSION_TIMEOUT) .connectionTimeoutMs(ZK_CONNECTION_TIMEOUT) .connectString(connectionSpec) .zookeeperFactory(new DNSResolvingFixerZooKeeperFactory(UNKNOWN_HOST_TIMEOUT_MILLIS)) .build()); } protected Curator(String connectionSpec, String zooKeeperEnsembleConnectionSpec, Function<RetryPolicy, CuratorFramework> curatorFactory) { this(connectionSpec, zooKeeperEnsembleConnectionSpec, curatorFactory, new ExponentialBackoffRetry(BASE_SLEEP_TIME, MAX_RETRIES)); } private Curator(String connectionSpec, String zooKeeperEnsembleConnectionSpec, Function<RetryPolicy, CuratorFramework> curatorFactory, RetryPolicy retryPolicy) { this.connectionSpec = connectionSpec; this.retryPolicy = retryPolicy; this.curatorFramework = curatorFactory.apply(retryPolicy); if (this.curatorFramework != null) { validateConnectionSpec(connectionSpec); validateConnectionSpec(zooKeeperEnsembleConnectionSpec); addFakeListener(); curatorFramework.start(); } this.zooKeeperEnsembleConnectionSpec = zooKeeperEnsembleConnectionSpec; this.zooKeeperEnsembleCount = zooKeeperEnsembleConnectionSpec.split(",").length; } static String createConnectionSpec(ConfigserverConfig config) { StringBuilder connectionSpec = new StringBuilder(); for (int i = 0; i < config.zookeeperserver().size(); i++) { if (connectionSpec.length() > 0) { connectionSpec.append(','); } ConfigserverConfig.Zookeeperserver server = config.zookeeperserver(i); connectionSpec.append(server.hostname()); connectionSpec.append(':'); connectionSpec.append(server.port()); } return connectionSpec.toString(); } private static void validateConnectionSpec(String connectionSpec) { if (connectionSpec == null || connectionSpec.isEmpty()) throw new IllegalArgumentException(String.format("Connections spec '%s' is not valid", connectionSpec)); } /** * Returns the ZooKeeper "connect string" used by curator: a comma-separated list of * host:port of ZooKeeper endpoints to connect to. This may be a subset of * zooKeeperEnsembleConnectionSpec() if there's some affinity, e.g. for * performance reasons. * * This may be empty but never null */ public String connectionSpec() { return connectionSpec; } /** For internal use; prefer creating a {@link CuratorCounter} */ public DistributedAtomicLong createAtomicCounter(String path) { return new DistributedAtomicLong(curatorFramework, path, new ExponentialBackoffRetry(BASE_SLEEP_TIME, MAX_RETRIES)); } /** For internal use; prefer creating a {@link com.yahoo.vespa.curator.recipes.CuratorLock} */ public InterProcessLock createMutex(String lockPath) { return new InterProcessMutex(curatorFramework, lockPath); } private void addFakeListener() { curatorFramework.getConnectionStateListenable().addListener(new ConnectionStateListener() { @Override public void stateChanged(CuratorFramework curatorFramework, ConnectionState connectionState) { } }); } public CompletionWaiter getCompletionWaiter(Path waiterPath, int numMembers, String id) { return CuratorCompletionWaiter.create(curatorFramework, waiterPath, numMembers, id); } public CompletionWaiter createCompletionWaiter(Path parentPath, String waiterNode, int numMembers, String id) { return CuratorCompletionWaiter.createAndInitialize(this, parentPath, waiterNode, numMembers, id); } /** Creates a listenable cache which keeps in sync with changes to all the immediate children of a path */ public DirectoryCache createDirectoryCache(String path, boolean cacheData, boolean dataIsCompressed, ExecutorService executorService) { return new PathChildrenCacheWrapper(framework(), path, cacheData, dataIsCompressed, executorService); } /** Creates a listenable cache which keeps in sync with changes to a given node */ public FileCache createFileCache(String path, boolean dataIsCompressed) { return new NodeCacheWrapper(framework(), path, dataIsCompressed); } /** A convenience method which returns whether the given path exists */ public boolean exists(Path path) { try { return framework().checkExists().forPath(path.getAbsolute()) != null; } catch (Exception e) { throw new RuntimeException("Could not check existence of " + path.getAbsolute(), e); } } /** * A convenience method which sets some content at a path. * If the path and any of its parents does not exists they are created. */ public void set(Path path, byte[] data) { String absolutePath = path.getAbsolute(); try { if ( ! exists(path)) framework().create().creatingParentsIfNeeded().forPath(absolutePath, data); else framework().setData().forPath(absolutePath, data); } catch (Exception e) { throw new RuntimeException("Could not set data at " + absolutePath, e); } } /** * Creates an empty node at a path, creating any parents as necessary. * If the node already exists nothing is done. */ public void create(Path path) { if (exists(path)) return; String absolutePath = path.getAbsolute(); try { framework().create().creatingParentsIfNeeded().forPath(absolutePath, new byte[0]); } catch (org.apache.zookeeper.KeeperException.NodeExistsException e) { } catch (Exception e) { throw new RuntimeException("Could not create " + absolutePath, e); } } /** * Creates all the given paths in a single transaction. Any paths which already exists are ignored. */ public void createAtomically(Path... paths) { try { CuratorTransaction transaction = framework().inTransaction(); for (Path path : paths) { if ( ! exists(path)) { transaction = transaction.create().forPath(path.getAbsolute(), new byte[0]).and(); } } ((CuratorTransactionFinal)transaction).commit(); } catch (Exception e) { throw new RuntimeException("Could not create " + Arrays.toString(paths), e); } } /** * Deletes the given path and any children it may have. * If the path does not exists nothing is done. */ public void delete(Path path) { if ( ! exists(path)) return; try { framework().delete().guaranteed().deletingChildrenIfNeeded().forPath(path.getAbsolute()); } catch (Exception e) { throw new RuntimeException("Could not delete " + path.getAbsolute(), e); } } /** * Returns the names of the children at the given path. * If the path does not exist or have no children an empty list (never null) is returned. */ public List<String> getChildren(Path path) { if ( ! exists(path)) return Collections.emptyList(); try { return framework().getChildren().forPath(path.getAbsolute()); } catch (Exception e) { throw new RuntimeException("Could not get children of " + path.getAbsolute(), e); } } /** * Returns the data at the given path, which may be a zero-length buffer if the node exists but have no data. * Empty is returned if the path does not exist. */ public Optional<byte[]> getData(Path path) { if ( ! exists(path)) return Optional.empty(); try { return Optional.of(framework().getData().forPath(path.getAbsolute())); } catch (Exception e) { throw new RuntimeException("Could not get data at " + path.getAbsolute(), e); } } /** Returns the curator framework API */ public CuratorFramework framework() { return curatorFramework; } @Override public void close() { curatorFramework.close(); } /** * Interface for waiting for completion of an operation */ public interface CompletionWaiter { /** * Awaits completion of something. Blocks until an implementation defined * condition has been met. * * @param timeout timeout for blocking await call. * @throws CompletionTimeoutException if timeout is reached without completion. */ void awaitCompletion(Duration timeout); /** * Notify completion of something. This method does not block and is called by clients * that want to notify the completion waiter that something has completed. */ void notifyCompletion(); } /** * A listenable cache of all the immediate children of a curator path. * This wraps the Curator PathChildrenCache recipe to allow us to mock it. */ public interface DirectoryCache { void start(); void addListener(PathChildrenCacheListener listener); List<ChildData> getCurrentData(); void close(); } /** * A listenable cache of the content of a single curator path. * This wraps the Curator NodeCache recipe to allow us to mock it. */ public interface FileCache { void start(); void addListener(NodeCacheListener listener); ChildData getCurrentData(); void close(); } /** * @return The non-null connect string containing all ZooKeeper servers in the ensemble. * WARNING: This may be different from the servers this Curator may connect to. * TODO: Move method out of this class. */ public String zooKeeperEnsembleConnectionSpec() { return zooKeeperEnsembleConnectionSpec; } /** * Returns the number of zooKeeper servers in this ensemble. * WARNING: This may be different from the number of servers this Curator may connect to. * TODO: Move method out of this class. */ public int zooKeeperEnsembleCount() { return zooKeeperEnsembleCount; } }
class Curator implements AutoCloseable { private static final long UNKNOWN_HOST_TIMEOUT_MILLIS = TimeUnit.MINUTES.toMillis(30); private static final int ZK_SESSION_TIMEOUT = 30000; private static final int ZK_CONNECTION_TIMEOUT = 30000; private static final int BASE_SLEEP_TIME = 1000; private static final int MAX_RETRIES = 10; protected final RetryPolicy retryPolicy; private final CuratorFramework curatorFramework; private final String connectionSpec; private final String zooKeeperEnsembleConnectionSpec; private final int zooKeeperEnsembleCount; /** Creates a curator instance from a comma-separated string of ZooKeeper host:port strings */ public static Curator create(String connectionSpec) { return new Curator(connectionSpec, connectionSpec); } @Inject public Curator(ConfigserverConfig configserverConfig, ZooKeeperServer server) { this(configserverConfig, createConnectionSpec(configserverConfig)); } private Curator(String connectionSpec, String zooKeeperEnsembleConnectionSpec) { this(connectionSpec, zooKeeperEnsembleConnectionSpec, (retryPolicy) -> CuratorFrameworkFactory .builder() .retryPolicy(retryPolicy) .sessionTimeoutMs(ZK_SESSION_TIMEOUT) .connectionTimeoutMs(ZK_CONNECTION_TIMEOUT) .connectString(connectionSpec) .zookeeperFactory(new DNSResolvingFixerZooKeeperFactory(UNKNOWN_HOST_TIMEOUT_MILLIS)) .build()); } protected Curator(String connectionSpec, String zooKeeperEnsembleConnectionSpec, Function<RetryPolicy, CuratorFramework> curatorFactory) { this(connectionSpec, zooKeeperEnsembleConnectionSpec, curatorFactory, new ExponentialBackoffRetry(BASE_SLEEP_TIME, MAX_RETRIES)); } private Curator(String connectionSpec, String zooKeeperEnsembleConnectionSpec, Function<RetryPolicy, CuratorFramework> curatorFactory, RetryPolicy retryPolicy) { this.connectionSpec = connectionSpec; this.retryPolicy = retryPolicy; this.curatorFramework = curatorFactory.apply(retryPolicy); if (this.curatorFramework != null) { validateConnectionSpec(connectionSpec); validateConnectionSpec(zooKeeperEnsembleConnectionSpec); addFakeListener(); curatorFramework.start(); } this.zooKeeperEnsembleConnectionSpec = zooKeeperEnsembleConnectionSpec; this.zooKeeperEnsembleCount = zooKeeperEnsembleConnectionSpec.split(",").length; } static String createConnectionSpec(ConfigserverConfig config) { StringBuilder connectionSpec = new StringBuilder(); for (int i = 0; i < config.zookeeperserver().size(); i++) { if (connectionSpec.length() > 0) { connectionSpec.append(','); } ConfigserverConfig.Zookeeperserver server = config.zookeeperserver(i); connectionSpec.append(server.hostname()); connectionSpec.append(':'); connectionSpec.append(server.port()); } return connectionSpec.toString(); } static String createConnectionSpecForLocalhost(ConfigserverConfig config) { String thisServer = HostName.getLocalhost(); for (int i = 0; i < config.zookeeperserver().size(); i++) { ConfigserverConfig.Zookeeperserver server = config.zookeeperserver(i); if (thisServer.equals(server.hostname())) { return String.format("%s:%d", server.hostname(), server.port()); } } throw new IllegalArgumentException("Unable to create connect string to localhost: " + "There is no localhost servers specified in config: " + config); } private static void validateConnectionSpec(String connectionSpec) { if (connectionSpec == null || connectionSpec.isEmpty()) throw new IllegalArgumentException(String.format("Connections spec '%s' is not valid", connectionSpec)); } /** * Returns the ZooKeeper "connect string" used by curator: a comma-separated list of * host:port of ZooKeeper endpoints to connect to. This may be a subset of * zooKeeperEnsembleConnectionSpec() if there's some affinity, e.g. for * performance reasons. * * This may be empty but never null */ public String connectionSpec() { return connectionSpec; } /** For internal use; prefer creating a {@link CuratorCounter} */ public DistributedAtomicLong createAtomicCounter(String path) { return new DistributedAtomicLong(curatorFramework, path, new ExponentialBackoffRetry(BASE_SLEEP_TIME, MAX_RETRIES)); } /** For internal use; prefer creating a {@link com.yahoo.vespa.curator.recipes.CuratorLock} */ public InterProcessLock createMutex(String lockPath) { return new InterProcessMutex(curatorFramework, lockPath); } private void addFakeListener() { curatorFramework.getConnectionStateListenable().addListener(new ConnectionStateListener() { @Override public void stateChanged(CuratorFramework curatorFramework, ConnectionState connectionState) { } }); } public CompletionWaiter getCompletionWaiter(Path waiterPath, int numMembers, String id) { return CuratorCompletionWaiter.create(curatorFramework, waiterPath, numMembers, id); } public CompletionWaiter createCompletionWaiter(Path parentPath, String waiterNode, int numMembers, String id) { return CuratorCompletionWaiter.createAndInitialize(this, parentPath, waiterNode, numMembers, id); } /** Creates a listenable cache which keeps in sync with changes to all the immediate children of a path */ public DirectoryCache createDirectoryCache(String path, boolean cacheData, boolean dataIsCompressed, ExecutorService executorService) { return new PathChildrenCacheWrapper(framework(), path, cacheData, dataIsCompressed, executorService); } /** Creates a listenable cache which keeps in sync with changes to a given node */ public FileCache createFileCache(String path, boolean dataIsCompressed) { return new NodeCacheWrapper(framework(), path, dataIsCompressed); } /** A convenience method which returns whether the given path exists */ public boolean exists(Path path) { try { return framework().checkExists().forPath(path.getAbsolute()) != null; } catch (Exception e) { throw new RuntimeException("Could not check existence of " + path.getAbsolute(), e); } } /** * A convenience method which sets some content at a path. * If the path and any of its parents does not exists they are created. */ public void set(Path path, byte[] data) { String absolutePath = path.getAbsolute(); try { if ( ! exists(path)) framework().create().creatingParentsIfNeeded().forPath(absolutePath, data); else framework().setData().forPath(absolutePath, data); } catch (Exception e) { throw new RuntimeException("Could not set data at " + absolutePath, e); } } /** * Creates an empty node at a path, creating any parents as necessary. * If the node already exists nothing is done. */ public void create(Path path) { if (exists(path)) return; String absolutePath = path.getAbsolute(); try { framework().create().creatingParentsIfNeeded().forPath(absolutePath, new byte[0]); } catch (org.apache.zookeeper.KeeperException.NodeExistsException e) { } catch (Exception e) { throw new RuntimeException("Could not create " + absolutePath, e); } } /** * Creates all the given paths in a single transaction. Any paths which already exists are ignored. */ public void createAtomically(Path... paths) { try { CuratorTransaction transaction = framework().inTransaction(); for (Path path : paths) { if ( ! exists(path)) { transaction = transaction.create().forPath(path.getAbsolute(), new byte[0]).and(); } } ((CuratorTransactionFinal)transaction).commit(); } catch (Exception e) { throw new RuntimeException("Could not create " + Arrays.toString(paths), e); } } /** * Deletes the given path and any children it may have. * If the path does not exists nothing is done. */ public void delete(Path path) { if ( ! exists(path)) return; try { framework().delete().guaranteed().deletingChildrenIfNeeded().forPath(path.getAbsolute()); } catch (Exception e) { throw new RuntimeException("Could not delete " + path.getAbsolute(), e); } } /** * Returns the names of the children at the given path. * If the path does not exist or have no children an empty list (never null) is returned. */ public List<String> getChildren(Path path) { if ( ! exists(path)) return Collections.emptyList(); try { return framework().getChildren().forPath(path.getAbsolute()); } catch (Exception e) { throw new RuntimeException("Could not get children of " + path.getAbsolute(), e); } } /** * Returns the data at the given path, which may be a zero-length buffer if the node exists but have no data. * Empty is returned if the path does not exist. */ public Optional<byte[]> getData(Path path) { if ( ! exists(path)) return Optional.empty(); try { return Optional.of(framework().getData().forPath(path.getAbsolute())); } catch (Exception e) { throw new RuntimeException("Could not get data at " + path.getAbsolute(), e); } } /** Returns the curator framework API */ public CuratorFramework framework() { return curatorFramework; } @Override public void close() { curatorFramework.close(); } /** * Interface for waiting for completion of an operation */ public interface CompletionWaiter { /** * Awaits completion of something. Blocks until an implementation defined * condition has been met. * * @param timeout timeout for blocking await call. * @throws CompletionTimeoutException if timeout is reached without completion. */ void awaitCompletion(Duration timeout); /** * Notify completion of something. This method does not block and is called by clients * that want to notify the completion waiter that something has completed. */ void notifyCompletion(); } /** * A listenable cache of all the immediate children of a curator path. * This wraps the Curator PathChildrenCache recipe to allow us to mock it. */ public interface DirectoryCache { void start(); void addListener(PathChildrenCacheListener listener); List<ChildData> getCurrentData(); void close(); } /** * A listenable cache of the content of a single curator path. * This wraps the Curator NodeCache recipe to allow us to mock it. */ public interface FileCache { void start(); void addListener(NodeCacheListener listener); ChildData getCurrentData(); void close(); } /** * @return The non-null connect string containing all ZooKeeper servers in the ensemble. * WARNING: This may be different from the servers this Curator may connect to. * TODO: Move method out of this class. */ public String zooKeeperEnsembleConnectionSpec() { return zooKeeperEnsembleConnectionSpec; } /** * Returns the number of zooKeeper servers in this ensemble. * WARNING: This may be different from the number of servers this Curator may connect to. * TODO: Move method out of this class. */ public int zooKeeperEnsembleCount() { return zooKeeperEnsembleCount; } }
Isn't this a warning?
public void removeContainer(final Container existingContainer, ContainerNodeSpec nodeSpec) { final ContainerName containerName = existingContainer.name; PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); if (existingContainer.state.isRunning()) { logger.info("Stopping container " + containerName.asString()); docker.stopContainer(containerName); } logger.info("Deleting container " + containerName.asString()); docker.deleteContainer(containerName); if (docker.networkNATed()) { logger.info("Delete iptables NAT rules for " + containerName.asString()); try { InetAddress nodeInetAddress = environment.getInetAddressForHost(nodeSpec.hostname); String ipv6Str = docker.getGlobalIPv6Address(containerName); String drop = NATCommand.drop(nodeInetAddress, InetAddress.getByName(ipv6Str)); Pair<Integer, String> result = processExecuter.exec(drop); if (result.getFirst() != 0) { logger.debug("Unable to drop NAT rule - error message: " + result.getSecond()); } } catch (IOException e) { logger.warning("Unable to drop NAT rule for container " + containerName, e); } } }
logger.debug("Unable to drop NAT rule - error message: " + result.getSecond());
public void removeContainer(final Container existingContainer, ContainerNodeSpec nodeSpec) { final ContainerName containerName = existingContainer.name; PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); if (existingContainer.state.isRunning()) { logger.info("Stopping container " + containerName.asString()); docker.stopContainer(containerName); } logger.info("Deleting container " + containerName.asString()); docker.deleteContainer(containerName); if (docker.networkNATed()) { logger.info("Delete iptables NAT rules for " + containerName.asString()); try { InetAddress nodeInetAddress = environment.getInetAddressForHost(nodeSpec.hostname); String ipv6Str = docker.getGlobalIPv6Address(containerName); String drop = NATCommand.drop(nodeInetAddress, InetAddress.getByName(ipv6Str)); Pair<Integer, String> result = processExecuter.exec(drop); if (result.getFirst() != 0) { logger.debug("Unable to drop NAT rule - error message: " + result.getSecond()); } } catch (IOException e) { logger.warning("Unable to drop NAT rule for container " + containerName, e); } } }
class DockerOperationsImpl implements DockerOperations { public static final String NODE_PROGRAM = getDefaults().underVespaHome("bin/vespa-nodectl"); private static final String[] RESUME_NODE_COMMAND = new String[]{NODE_PROGRAM, "resume"}; private static final String[] SUSPEND_NODE_COMMAND = new String[]{NODE_PROGRAM, "suspend"}; private static final String[] RESTART_VESPA_ON_NODE_COMMAND = new String[]{NODE_PROGRAM, "restart-vespa"}; private static final String[] STOP_NODE_COMMAND = new String[]{NODE_PROGRAM, "stop"}; private static final String MANAGER_NAME = "node-admin"; private static final Map<String, Boolean> DIRECTORIES_TO_MOUNT = new HashMap<>(); static { DIRECTORIES_TO_MOUNT.put("/etc/yamas-agent", true); DIRECTORIES_TO_MOUNT.put("/etc/filebeat", true); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/daemontools_y"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/jdisc_core"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/langdetect/"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/vespa"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/yca"), true); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/yck"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/yell"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/ykeykey"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/ykeykeyd"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/yms_agent"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/ysar"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/ystatus"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/zpe_policy_updater"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/cache"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/crash"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/db/jdisc"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/db/vespa"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/jdisc_container"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/jdisc_core"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/maven"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/run"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/scoreboards"), true); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/service"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/share"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/spool"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/vespa"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/yca"), true); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/ycore++"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/zookeeper"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("tmp"), false); } private final Docker docker; private final Environment environment; private final ProcessExecuter processExecuter; public DockerOperationsImpl(Docker docker, Environment environment, ProcessExecuter processExecuter) { this.docker = docker; this.environment = environment; this.processExecuter = processExecuter; } @Override public void startContainer(ContainerName containerName, final ContainerNodeSpec nodeSpec) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); logger.info("Starting container " + containerName); try { InetAddress nodeInetAddress = environment.getInetAddressForHost(nodeSpec.hostname); final boolean isIPv6 = nodeInetAddress instanceof Inet6Address; String configServers = environment.getConfigServerUris().stream() .map(URI::getHost) .collect(Collectors.joining(",")); Docker.CreateContainerCommand command = docker.createContainerCommand( nodeSpec.wantedDockerImage.get(), ContainerResources.from(nodeSpec.minCpuCores, nodeSpec.minMainMemoryAvailableGb), containerName, nodeSpec.hostname) .withManagedBy(MANAGER_NAME) .withEnvironment("CONFIG_SERVER_ADDRESS", configServers) .withUlimit("nofile", 262_144, 262_144) .withUlimit("nproc", 32_768, 409_600) .withUlimit("core", -1, -1) .withAddCapability("SYS_PTRACE") .withAddCapability("SYS_ADMIN"); if (!docker.networkNATed()) { logger.info("Network not nated - setting up with specific ip address on a macvlan"); command.withIpAddress(nodeInetAddress); command.withNetworkMode(DockerImpl.DOCKER_CUSTOM_MACVLAN_NETWORK_NAME); command.withVolume("/etc/hosts", "/etc/hosts"); } for (String pathInNode : DIRECTORIES_TO_MOUNT.keySet()) { String pathInHost = environment.pathInHostFromPathInNode(containerName, pathInNode).toString(); command.withVolume(pathInHost, pathInNode); } long minMainMemoryAvailableMb = (long) (nodeSpec.minMainMemoryAvailableGb * 1024); if (minMainMemoryAvailableMb > 0) { command.withEnvironment("VESPA_TOTAL_MEMORY_MB", Long.toString(minMainMemoryAvailableMb)); } logger.info("Starting new container with args: " + command); command.create(); if (isIPv6) { if (!docker.networkNATed()) { docker.connectContainerToNetwork(containerName, "bridge"); } docker.startContainer(containerName); setupContainerNetworkConnectivity(containerName, nodeInetAddress); } else { docker.startContainer(containerName); if (docker.networkNATed()) { setupContainerNetworkConnectivity(containerName, nodeInetAddress); } } DIRECTORIES_TO_MOUNT.entrySet().stream().filter(Map.Entry::getValue).forEach(entry -> docker.executeInContainerAsRoot(containerName, "chmod", "-R", "a+w", entry.getKey())); } catch (IOException e) { throw new RuntimeException("Failed to create container " + containerName.asString(), e); } } @Override @Override public Optional<Container> getContainer(ContainerName containerName) { return docker.getContainer(containerName); } /** * Try to suspend node. Suspending a node means the node should be taken offline, * such that maintenance can be done of the node (upgrading, rebooting, etc), * and such that we will start serving again as soon as possible afterwards. * <p> * Any failures are logged and ignored. */ @Override public void trySuspendNode(ContainerName containerName) { try { executeCommandInContainer(containerName, SUSPEND_NODE_COMMAND); } catch (RuntimeException e) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); logger.warning("Failed trying to suspend container " + containerName.asString() + " with " + Arrays.toString(SUSPEND_NODE_COMMAND), e); } } /** * For macvlan: * Due to a bug in docker (https: * IPv6 gateway in containers connected to more than one docker network * * For nat: * Setup iptables NAT rules to map the hosts public ips to the containers */ private void setupContainerNetworkConnectivity(ContainerName containerName, InetAddress externalAddress) throws IOException { if (docker.networkNATed()) { insertNAT(containerName, externalAddress); } else { InetAddress hostDefaultGateway = DockerNetworkCreator.getDefaultGatewayLinux(true); executeCommandInNetworkNamespace(containerName, "route", "-A", "inet6", "add", "default", "gw", hostDefaultGateway.getHostAddress(), "dev", "eth1"); } } @Override public boolean pullImageAsyncIfNeeded(DockerImage dockerImage) { return docker.pullImageAsyncIfNeeded(dockerImage); } ProcessResult executeCommandInContainer(ContainerName containerName, String... command) { ProcessResult result = docker.executeInContainerAsRoot(containerName, command); if (!result.isSuccess()) { throw new RuntimeException("Container " + containerName.asString() + ": command " + Arrays.toString(command) + " failed: " + result); } return result; } @Override public ProcessResult executeCommandInContainerAsRoot(ContainerName containerName, Long timeoutSeconds, String... command) { return docker.executeInContainerAsRoot(containerName, timeoutSeconds, command); } @Override public ProcessResult executeCommandInContainerAsRoot(ContainerName containerName, String... command) { return docker.executeInContainerAsRoot(containerName, command); } @Override public void executeCommandInNetworkNamespace(ContainerName containerName, String... command) { final PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); final Integer containerPid = docker.getContainer(containerName) .filter(container -> container.state.isRunning()) .map(container -> container.pid) .orElseThrow(() -> new RuntimeException("PID not found for container with name: " + containerName.asString())); final String[] wrappedCommand = Stream.concat( Stream.of("sudo", "nsenter", String.format("--net=/host/proc/%d/ns/net", containerPid), "--"), Stream.of(command)) .toArray(String[]::new); try { Pair<Integer, String> result = processExecuter.exec(wrappedCommand); if (result.getFirst() != 0) { String msg = String.format( "Failed to execute %s in network namespace for %s (PID = %d), exit code: %d, output: %s", Arrays.toString(wrappedCommand), containerName.asString(), containerPid, result.getFirst(), result.getSecond()); logger.error(msg); throw new RuntimeException(msg); } } catch (IOException e) { logger.warning(String.format("IOException while executing %s in network namespace for %s (PID = %d)", Arrays.toString(wrappedCommand), containerName.asString(), containerPid), e); throw new RuntimeException(e); } } @Override public void resumeNode(ContainerName containerName) { executeCommandInContainer(containerName, RESUME_NODE_COMMAND); } @Override public void restartVespaOnNode(ContainerName containerName) { executeCommandInContainer(containerName, RESTART_VESPA_ON_NODE_COMMAND); } @Override public void stopServicesOnNode(ContainerName containerName) { executeCommandInContainer(containerName, STOP_NODE_COMMAND); } @Override public Optional<Docker.ContainerStats> getContainerStats(ContainerName containerName) { return docker.getContainerStats(containerName); } @Override public List<Container> getAllManagedContainers() { return docker.getAllContainersManagedBy(MANAGER_NAME); } @Override public List<ContainerName> listAllManagedContainers() { return docker.listAllContainersManagedBy(MANAGER_NAME); } @Override public void deleteUnusedDockerImages() { docker.deleteUnusedDockerImages(); } /** * Only insert NAT rules if they don't exist (or else they will be added) */ private void insertNAT(ContainerName containerName, InetAddress externalAddress) throws IOException { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); String ipv6Str = docker.getGlobalIPv6Address(containerName); String checkCommand = NATCommand.check(externalAddress, InetAddress.getByName(ipv6Str)); Pair<Integer, String> result = processExecuter.exec(checkCommand); if (result.getFirst() == 0 ) return; String natCommand = NATCommand.check(externalAddress, InetAddress.getByName(ipv6Str)); logger.info("Setting up NAT rules: " + natCommand); result = processExecuter.exec(checkCommand); if (result.getFirst() != 0 ) { throw new IOException("Unable to setup NAT rule - error message: " + result.getSecond()); } } }
class DockerOperationsImpl implements DockerOperations { public static final String NODE_PROGRAM = getDefaults().underVespaHome("bin/vespa-nodectl"); private static final String[] RESUME_NODE_COMMAND = new String[]{NODE_PROGRAM, "resume"}; private static final String[] SUSPEND_NODE_COMMAND = new String[]{NODE_PROGRAM, "suspend"}; private static final String[] RESTART_VESPA_ON_NODE_COMMAND = new String[]{NODE_PROGRAM, "restart-vespa"}; private static final String[] STOP_NODE_COMMAND = new String[]{NODE_PROGRAM, "stop"}; private static final String MANAGER_NAME = "node-admin"; private static final Map<String, Boolean> DIRECTORIES_TO_MOUNT = new HashMap<>(); static { DIRECTORIES_TO_MOUNT.put("/etc/yamas-agent", true); DIRECTORIES_TO_MOUNT.put("/etc/filebeat", true); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/daemontools_y"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/jdisc_core"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/langdetect/"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/vespa"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/yca"), true); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/yck"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/yell"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/ykeykey"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/ykeykeyd"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/yms_agent"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/ysar"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/ystatus"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/zpe_policy_updater"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/cache"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/crash"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/db/jdisc"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/db/vespa"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/jdisc_container"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/jdisc_core"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/maven"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/run"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/scoreboards"), true); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/service"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/share"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/spool"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/vespa"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/yca"), true); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/ycore++"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/zookeeper"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("tmp"), false); } private final Docker docker; private final Environment environment; private final ProcessExecuter processExecuter; public DockerOperationsImpl(Docker docker, Environment environment, ProcessExecuter processExecuter) { this.docker = docker; this.environment = environment; this.processExecuter = processExecuter; } @Override public void startContainer(ContainerName containerName, final ContainerNodeSpec nodeSpec) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); logger.info("Starting container " + containerName); try { InetAddress nodeInetAddress = environment.getInetAddressForHost(nodeSpec.hostname); final boolean isIPv6 = nodeInetAddress instanceof Inet6Address; String configServers = environment.getConfigServerUris().stream() .map(URI::getHost) .collect(Collectors.joining(",")); Docker.CreateContainerCommand command = docker.createContainerCommand( nodeSpec.wantedDockerImage.get(), ContainerResources.from(nodeSpec.minCpuCores, nodeSpec.minMainMemoryAvailableGb), containerName, nodeSpec.hostname) .withManagedBy(MANAGER_NAME) .withEnvironment("CONFIG_SERVER_ADDRESS", configServers) .withUlimit("nofile", 262_144, 262_144) .withUlimit("nproc", 32_768, 409_600) .withUlimit("core", -1, -1) .withAddCapability("SYS_PTRACE") .withAddCapability("SYS_ADMIN"); if (!docker.networkNATed()) { logger.info("Network not nated - setting up with specific ip address on a macvlan"); command.withIpAddress(nodeInetAddress); command.withNetworkMode(DockerImpl.DOCKER_CUSTOM_MACVLAN_NETWORK_NAME); command.withVolume("/etc/hosts", "/etc/hosts"); } for (String pathInNode : DIRECTORIES_TO_MOUNT.keySet()) { String pathInHost = environment.pathInHostFromPathInNode(containerName, pathInNode).toString(); command.withVolume(pathInHost, pathInNode); } long minMainMemoryAvailableMb = (long) (nodeSpec.minMainMemoryAvailableGb * 1024); if (minMainMemoryAvailableMb > 0) { command.withEnvironment("VESPA_TOTAL_MEMORY_MB", Long.toString(minMainMemoryAvailableMb)); } logger.info("Starting new container with args: " + command); command.create(); if (isIPv6) { if (!docker.networkNATed()) { docker.connectContainerToNetwork(containerName, "bridge"); } docker.startContainer(containerName); setupContainerNetworkConnectivity(containerName, nodeInetAddress); } else { docker.startContainer(containerName); if (docker.networkNATed()) { setupContainerNetworkConnectivity(containerName, nodeInetAddress); } } DIRECTORIES_TO_MOUNT.entrySet().stream().filter(Map.Entry::getValue).forEach(entry -> docker.executeInContainerAsRoot(containerName, "chmod", "-R", "a+w", entry.getKey())); } catch (IOException e) { throw new RuntimeException("Failed to create container " + containerName.asString(), e); } } @Override @Override public Optional<Container> getContainer(ContainerName containerName) { return docker.getContainer(containerName); } /** * Try to suspend node. Suspending a node means the node should be taken offline, * such that maintenance can be done of the node (upgrading, rebooting, etc), * and such that we will start serving again as soon as possible afterwards. * <p> * Any failures are logged and ignored. */ @Override public void trySuspendNode(ContainerName containerName) { try { executeCommandInContainer(containerName, SUSPEND_NODE_COMMAND); } catch (RuntimeException e) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); logger.warning("Failed trying to suspend container " + containerName.asString() + " with " + Arrays.toString(SUSPEND_NODE_COMMAND), e); } } /** * For macvlan: * Due to a bug in docker (https: * IPv6 gateway in containers connected to more than one docker network * * For nat: * Setup iptables NAT rules to map the hosts public ips to the containers */ private void setupContainerNetworkConnectivity(ContainerName containerName, InetAddress externalAddress) throws IOException { if (docker.networkNATed()) { insertNAT(containerName, externalAddress); } else { InetAddress hostDefaultGateway = DockerNetworkCreator.getDefaultGatewayLinux(true); executeCommandInNetworkNamespace(containerName, "route", "-A", "inet6", "add", "default", "gw", hostDefaultGateway.getHostAddress(), "dev", "eth1"); } } @Override public boolean pullImageAsyncIfNeeded(DockerImage dockerImage) { return docker.pullImageAsyncIfNeeded(dockerImage); } ProcessResult executeCommandInContainer(ContainerName containerName, String... command) { ProcessResult result = docker.executeInContainerAsRoot(containerName, command); if (!result.isSuccess()) { throw new RuntimeException("Container " + containerName.asString() + ": command " + Arrays.toString(command) + " failed: " + result); } return result; } @Override public ProcessResult executeCommandInContainerAsRoot(ContainerName containerName, Long timeoutSeconds, String... command) { return docker.executeInContainerAsRoot(containerName, timeoutSeconds, command); } @Override public ProcessResult executeCommandInContainerAsRoot(ContainerName containerName, String... command) { return docker.executeInContainerAsRoot(containerName, command); } @Override public void executeCommandInNetworkNamespace(ContainerName containerName, String... command) { final PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); final Integer containerPid = docker.getContainer(containerName) .filter(container -> container.state.isRunning()) .map(container -> container.pid) .orElseThrow(() -> new RuntimeException("PID not found for container with name: " + containerName.asString())); final String[] wrappedCommand = Stream.concat( Stream.of("sudo", "nsenter", String.format("--net=/host/proc/%d/ns/net", containerPid), "--"), Stream.of(command)) .toArray(String[]::new); try { Pair<Integer, String> result = processExecuter.exec(wrappedCommand); if (result.getFirst() != 0) { String msg = String.format( "Failed to execute %s in network namespace for %s (PID = %d), exit code: %d, output: %s", Arrays.toString(wrappedCommand), containerName.asString(), containerPid, result.getFirst(), result.getSecond()); logger.error(msg); throw new RuntimeException(msg); } } catch (IOException e) { logger.warning(String.format("IOException while executing %s in network namespace for %s (PID = %d)", Arrays.toString(wrappedCommand), containerName.asString(), containerPid), e); throw new RuntimeException(e); } } @Override public void resumeNode(ContainerName containerName) { executeCommandInContainer(containerName, RESUME_NODE_COMMAND); } @Override public void restartVespaOnNode(ContainerName containerName) { executeCommandInContainer(containerName, RESTART_VESPA_ON_NODE_COMMAND); } @Override public void stopServicesOnNode(ContainerName containerName) { executeCommandInContainer(containerName, STOP_NODE_COMMAND); } @Override public Optional<Docker.ContainerStats> getContainerStats(ContainerName containerName) { return docker.getContainerStats(containerName); } @Override public List<Container> getAllManagedContainers() { return docker.getAllContainersManagedBy(MANAGER_NAME); } @Override public List<ContainerName> listAllManagedContainers() { return docker.listAllContainersManagedBy(MANAGER_NAME); } @Override public void deleteUnusedDockerImages() { docker.deleteUnusedDockerImages(); } /** * Only insert NAT rules if they don't exist (or else they compounded) */ private void insertNAT(ContainerName containerName, InetAddress externalAddress) throws IOException { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); String ipv6Str = docker.getGlobalIPv6Address(containerName); String checkCommand = NATCommand.check(externalAddress, InetAddress.getByName(ipv6Str)); Pair<Integer, String> result = processExecuter.exec(checkCommand); if (result.getFirst() == 0 ) return; String natCommand = NATCommand.insert(externalAddress, InetAddress.getByName(ipv6Str)); logger.info("Setting up NAT rules: " + natCommand); result = processExecuter.exec(checkCommand); if (result.getFirst() != 0 ) { throw new IOException("Unable to setup NAT rule - error message: " + result.getSecond()); } } }
This should be `insert()`?
private void insertNAT(ContainerName containerName, InetAddress externalAddress) throws IOException { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); String ipv6Str = docker.getGlobalIPv6Address(containerName); String checkCommand = NATCommand.check(externalAddress, InetAddress.getByName(ipv6Str)); Pair<Integer, String> result = processExecuter.exec(checkCommand); if (result.getFirst() == 0 ) return; String natCommand = NATCommand.check(externalAddress, InetAddress.getByName(ipv6Str)); logger.info("Setting up NAT rules: " + natCommand); result = processExecuter.exec(checkCommand); if (result.getFirst() != 0 ) { throw new IOException("Unable to setup NAT rule - error message: " + result.getSecond()); } }
String natCommand = NATCommand.check(externalAddress, InetAddress.getByName(ipv6Str));
private void insertNAT(ContainerName containerName, InetAddress externalAddress) throws IOException { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); String ipv6Str = docker.getGlobalIPv6Address(containerName); String checkCommand = NATCommand.check(externalAddress, InetAddress.getByName(ipv6Str)); Pair<Integer, String> result = processExecuter.exec(checkCommand); if (result.getFirst() == 0 ) return; String natCommand = NATCommand.insert(externalAddress, InetAddress.getByName(ipv6Str)); logger.info("Setting up NAT rules: " + natCommand); result = processExecuter.exec(checkCommand); if (result.getFirst() != 0 ) { throw new IOException("Unable to setup NAT rule - error message: " + result.getSecond()); } }
class DockerOperationsImpl implements DockerOperations { public static final String NODE_PROGRAM = getDefaults().underVespaHome("bin/vespa-nodectl"); private static final String[] RESUME_NODE_COMMAND = new String[]{NODE_PROGRAM, "resume"}; private static final String[] SUSPEND_NODE_COMMAND = new String[]{NODE_PROGRAM, "suspend"}; private static final String[] RESTART_VESPA_ON_NODE_COMMAND = new String[]{NODE_PROGRAM, "restart-vespa"}; private static final String[] STOP_NODE_COMMAND = new String[]{NODE_PROGRAM, "stop"}; private static final String MANAGER_NAME = "node-admin"; private static final Map<String, Boolean> DIRECTORIES_TO_MOUNT = new HashMap<>(); static { DIRECTORIES_TO_MOUNT.put("/etc/yamas-agent", true); DIRECTORIES_TO_MOUNT.put("/etc/filebeat", true); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/daemontools_y"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/jdisc_core"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/langdetect/"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/vespa"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/yca"), true); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/yck"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/yell"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/ykeykey"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/ykeykeyd"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/yms_agent"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/ysar"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/ystatus"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/zpe_policy_updater"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/cache"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/crash"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/db/jdisc"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/db/vespa"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/jdisc_container"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/jdisc_core"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/maven"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/run"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/scoreboards"), true); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/service"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/share"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/spool"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/vespa"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/yca"), true); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/ycore++"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/zookeeper"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("tmp"), false); } private final Docker docker; private final Environment environment; private final ProcessExecuter processExecuter; public DockerOperationsImpl(Docker docker, Environment environment, ProcessExecuter processExecuter) { this.docker = docker; this.environment = environment; this.processExecuter = processExecuter; } @Override public void startContainer(ContainerName containerName, final ContainerNodeSpec nodeSpec) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); logger.info("Starting container " + containerName); try { InetAddress nodeInetAddress = environment.getInetAddressForHost(nodeSpec.hostname); final boolean isIPv6 = nodeInetAddress instanceof Inet6Address; String configServers = environment.getConfigServerUris().stream() .map(URI::getHost) .collect(Collectors.joining(",")); Docker.CreateContainerCommand command = docker.createContainerCommand( nodeSpec.wantedDockerImage.get(), ContainerResources.from(nodeSpec.minCpuCores, nodeSpec.minMainMemoryAvailableGb), containerName, nodeSpec.hostname) .withManagedBy(MANAGER_NAME) .withEnvironment("CONFIG_SERVER_ADDRESS", configServers) .withUlimit("nofile", 262_144, 262_144) .withUlimit("nproc", 32_768, 409_600) .withUlimit("core", -1, -1) .withAddCapability("SYS_PTRACE") .withAddCapability("SYS_ADMIN"); if (!docker.networkNATed()) { logger.info("Network not nated - setting up with specific ip address on a macvlan"); command.withIpAddress(nodeInetAddress); command.withNetworkMode(DockerImpl.DOCKER_CUSTOM_MACVLAN_NETWORK_NAME); command.withVolume("/etc/hosts", "/etc/hosts"); } for (String pathInNode : DIRECTORIES_TO_MOUNT.keySet()) { String pathInHost = environment.pathInHostFromPathInNode(containerName, pathInNode).toString(); command.withVolume(pathInHost, pathInNode); } long minMainMemoryAvailableMb = (long) (nodeSpec.minMainMemoryAvailableGb * 1024); if (minMainMemoryAvailableMb > 0) { command.withEnvironment("VESPA_TOTAL_MEMORY_MB", Long.toString(minMainMemoryAvailableMb)); } logger.info("Starting new container with args: " + command); command.create(); if (isIPv6) { if (!docker.networkNATed()) { docker.connectContainerToNetwork(containerName, "bridge"); } docker.startContainer(containerName); setupContainerNetworkConnectivity(containerName, nodeInetAddress); } else { docker.startContainer(containerName); if (docker.networkNATed()) { setupContainerNetworkConnectivity(containerName, nodeInetAddress); } } DIRECTORIES_TO_MOUNT.entrySet().stream().filter(Map.Entry::getValue).forEach(entry -> docker.executeInContainerAsRoot(containerName, "chmod", "-R", "a+w", entry.getKey())); } catch (IOException e) { throw new RuntimeException("Failed to create container " + containerName.asString(), e); } } @Override public void removeContainer(final Container existingContainer, ContainerNodeSpec nodeSpec) { final ContainerName containerName = existingContainer.name; PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); if (existingContainer.state.isRunning()) { logger.info("Stopping container " + containerName.asString()); docker.stopContainer(containerName); } logger.info("Deleting container " + containerName.asString()); docker.deleteContainer(containerName); if (docker.networkNATed()) { logger.info("Delete iptables NAT rules for " + containerName.asString()); try { InetAddress nodeInetAddress = environment.getInetAddressForHost(nodeSpec.hostname); String ipv6Str = docker.getGlobalIPv6Address(containerName); String drop = NATCommand.drop(nodeInetAddress, InetAddress.getByName(ipv6Str)); Pair<Integer, String> result = processExecuter.exec(drop); if (result.getFirst() != 0) { logger.debug("Unable to drop NAT rule - error message: " + result.getSecond()); } } catch (IOException e) { logger.warning("Unable to drop NAT rule for container " + containerName, e); } } } @Override public Optional<Container> getContainer(ContainerName containerName) { return docker.getContainer(containerName); } /** * Try to suspend node. Suspending a node means the node should be taken offline, * such that maintenance can be done of the node (upgrading, rebooting, etc), * and such that we will start serving again as soon as possible afterwards. * <p> * Any failures are logged and ignored. */ @Override public void trySuspendNode(ContainerName containerName) { try { executeCommandInContainer(containerName, SUSPEND_NODE_COMMAND); } catch (RuntimeException e) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); logger.warning("Failed trying to suspend container " + containerName.asString() + " with " + Arrays.toString(SUSPEND_NODE_COMMAND), e); } } /** * For macvlan: * Due to a bug in docker (https: * IPv6 gateway in containers connected to more than one docker network * * For nat: * Setup iptables NAT rules to map the hosts public ips to the containers */ private void setupContainerNetworkConnectivity(ContainerName containerName, InetAddress externalAddress) throws IOException { if (docker.networkNATed()) { insertNAT(containerName, externalAddress); } else { InetAddress hostDefaultGateway = DockerNetworkCreator.getDefaultGatewayLinux(true); executeCommandInNetworkNamespace(containerName, "route", "-A", "inet6", "add", "default", "gw", hostDefaultGateway.getHostAddress(), "dev", "eth1"); } } @Override public boolean pullImageAsyncIfNeeded(DockerImage dockerImage) { return docker.pullImageAsyncIfNeeded(dockerImage); } ProcessResult executeCommandInContainer(ContainerName containerName, String... command) { ProcessResult result = docker.executeInContainerAsRoot(containerName, command); if (!result.isSuccess()) { throw new RuntimeException("Container " + containerName.asString() + ": command " + Arrays.toString(command) + " failed: " + result); } return result; } @Override public ProcessResult executeCommandInContainerAsRoot(ContainerName containerName, Long timeoutSeconds, String... command) { return docker.executeInContainerAsRoot(containerName, timeoutSeconds, command); } @Override public ProcessResult executeCommandInContainerAsRoot(ContainerName containerName, String... command) { return docker.executeInContainerAsRoot(containerName, command); } @Override public void executeCommandInNetworkNamespace(ContainerName containerName, String... command) { final PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); final Integer containerPid = docker.getContainer(containerName) .filter(container -> container.state.isRunning()) .map(container -> container.pid) .orElseThrow(() -> new RuntimeException("PID not found for container with name: " + containerName.asString())); final String[] wrappedCommand = Stream.concat( Stream.of("sudo", "nsenter", String.format("--net=/host/proc/%d/ns/net", containerPid), "--"), Stream.of(command)) .toArray(String[]::new); try { Pair<Integer, String> result = processExecuter.exec(wrappedCommand); if (result.getFirst() != 0) { String msg = String.format( "Failed to execute %s in network namespace for %s (PID = %d), exit code: %d, output: %s", Arrays.toString(wrappedCommand), containerName.asString(), containerPid, result.getFirst(), result.getSecond()); logger.error(msg); throw new RuntimeException(msg); } } catch (IOException e) { logger.warning(String.format("IOException while executing %s in network namespace for %s (PID = %d)", Arrays.toString(wrappedCommand), containerName.asString(), containerPid), e); throw new RuntimeException(e); } } @Override public void resumeNode(ContainerName containerName) { executeCommandInContainer(containerName, RESUME_NODE_COMMAND); } @Override public void restartVespaOnNode(ContainerName containerName) { executeCommandInContainer(containerName, RESTART_VESPA_ON_NODE_COMMAND); } @Override public void stopServicesOnNode(ContainerName containerName) { executeCommandInContainer(containerName, STOP_NODE_COMMAND); } @Override public Optional<Docker.ContainerStats> getContainerStats(ContainerName containerName) { return docker.getContainerStats(containerName); } @Override public List<Container> getAllManagedContainers() { return docker.getAllContainersManagedBy(MANAGER_NAME); } @Override public List<ContainerName> listAllManagedContainers() { return docker.listAllContainersManagedBy(MANAGER_NAME); } @Override public void deleteUnusedDockerImages() { docker.deleteUnusedDockerImages(); } /** * Only insert NAT rules if they don't exist (or else they will be added) */ }
class DockerOperationsImpl implements DockerOperations { public static final String NODE_PROGRAM = getDefaults().underVespaHome("bin/vespa-nodectl"); private static final String[] RESUME_NODE_COMMAND = new String[]{NODE_PROGRAM, "resume"}; private static final String[] SUSPEND_NODE_COMMAND = new String[]{NODE_PROGRAM, "suspend"}; private static final String[] RESTART_VESPA_ON_NODE_COMMAND = new String[]{NODE_PROGRAM, "restart-vespa"}; private static final String[] STOP_NODE_COMMAND = new String[]{NODE_PROGRAM, "stop"}; private static final String MANAGER_NAME = "node-admin"; private static final Map<String, Boolean> DIRECTORIES_TO_MOUNT = new HashMap<>(); static { DIRECTORIES_TO_MOUNT.put("/etc/yamas-agent", true); DIRECTORIES_TO_MOUNT.put("/etc/filebeat", true); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/daemontools_y"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/jdisc_core"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/langdetect/"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/vespa"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/yca"), true); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/yck"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/yell"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/ykeykey"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/ykeykeyd"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/yms_agent"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/ysar"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/ystatus"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/zpe_policy_updater"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/cache"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/crash"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/db/jdisc"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/db/vespa"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/jdisc_container"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/jdisc_core"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/maven"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/run"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/scoreboards"), true); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/service"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/share"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/spool"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/vespa"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/yca"), true); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/ycore++"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/zookeeper"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("tmp"), false); } private final Docker docker; private final Environment environment; private final ProcessExecuter processExecuter; public DockerOperationsImpl(Docker docker, Environment environment, ProcessExecuter processExecuter) { this.docker = docker; this.environment = environment; this.processExecuter = processExecuter; } @Override public void startContainer(ContainerName containerName, final ContainerNodeSpec nodeSpec) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); logger.info("Starting container " + containerName); try { InetAddress nodeInetAddress = environment.getInetAddressForHost(nodeSpec.hostname); final boolean isIPv6 = nodeInetAddress instanceof Inet6Address; String configServers = environment.getConfigServerUris().stream() .map(URI::getHost) .collect(Collectors.joining(",")); Docker.CreateContainerCommand command = docker.createContainerCommand( nodeSpec.wantedDockerImage.get(), ContainerResources.from(nodeSpec.minCpuCores, nodeSpec.minMainMemoryAvailableGb), containerName, nodeSpec.hostname) .withManagedBy(MANAGER_NAME) .withEnvironment("CONFIG_SERVER_ADDRESS", configServers) .withUlimit("nofile", 262_144, 262_144) .withUlimit("nproc", 32_768, 409_600) .withUlimit("core", -1, -1) .withAddCapability("SYS_PTRACE") .withAddCapability("SYS_ADMIN"); if (!docker.networkNATed()) { logger.info("Network not nated - setting up with specific ip address on a macvlan"); command.withIpAddress(nodeInetAddress); command.withNetworkMode(DockerImpl.DOCKER_CUSTOM_MACVLAN_NETWORK_NAME); command.withVolume("/etc/hosts", "/etc/hosts"); } for (String pathInNode : DIRECTORIES_TO_MOUNT.keySet()) { String pathInHost = environment.pathInHostFromPathInNode(containerName, pathInNode).toString(); command.withVolume(pathInHost, pathInNode); } long minMainMemoryAvailableMb = (long) (nodeSpec.minMainMemoryAvailableGb * 1024); if (minMainMemoryAvailableMb > 0) { command.withEnvironment("VESPA_TOTAL_MEMORY_MB", Long.toString(minMainMemoryAvailableMb)); } logger.info("Starting new container with args: " + command); command.create(); if (isIPv6) { if (!docker.networkNATed()) { docker.connectContainerToNetwork(containerName, "bridge"); } docker.startContainer(containerName); setupContainerNetworkConnectivity(containerName, nodeInetAddress); } else { docker.startContainer(containerName); if (docker.networkNATed()) { setupContainerNetworkConnectivity(containerName, nodeInetAddress); } } DIRECTORIES_TO_MOUNT.entrySet().stream().filter(Map.Entry::getValue).forEach(entry -> docker.executeInContainerAsRoot(containerName, "chmod", "-R", "a+w", entry.getKey())); } catch (IOException e) { throw new RuntimeException("Failed to create container " + containerName.asString(), e); } } @Override public void removeContainer(final Container existingContainer, ContainerNodeSpec nodeSpec) { final ContainerName containerName = existingContainer.name; PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); if (existingContainer.state.isRunning()) { logger.info("Stopping container " + containerName.asString()); docker.stopContainer(containerName); } logger.info("Deleting container " + containerName.asString()); docker.deleteContainer(containerName); if (docker.networkNATed()) { logger.info("Delete iptables NAT rules for " + containerName.asString()); try { InetAddress nodeInetAddress = environment.getInetAddressForHost(nodeSpec.hostname); String ipv6Str = docker.getGlobalIPv6Address(containerName); String drop = NATCommand.drop(nodeInetAddress, InetAddress.getByName(ipv6Str)); Pair<Integer, String> result = processExecuter.exec(drop); if (result.getFirst() != 0) { logger.debug("Unable to drop NAT rule - error message: " + result.getSecond()); } } catch (IOException e) { logger.warning("Unable to drop NAT rule for container " + containerName, e); } } } @Override public Optional<Container> getContainer(ContainerName containerName) { return docker.getContainer(containerName); } /** * Try to suspend node. Suspending a node means the node should be taken offline, * such that maintenance can be done of the node (upgrading, rebooting, etc), * and such that we will start serving again as soon as possible afterwards. * <p> * Any failures are logged and ignored. */ @Override public void trySuspendNode(ContainerName containerName) { try { executeCommandInContainer(containerName, SUSPEND_NODE_COMMAND); } catch (RuntimeException e) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); logger.warning("Failed trying to suspend container " + containerName.asString() + " with " + Arrays.toString(SUSPEND_NODE_COMMAND), e); } } /** * For macvlan: * Due to a bug in docker (https: * IPv6 gateway in containers connected to more than one docker network * * For nat: * Setup iptables NAT rules to map the hosts public ips to the containers */ private void setupContainerNetworkConnectivity(ContainerName containerName, InetAddress externalAddress) throws IOException { if (docker.networkNATed()) { insertNAT(containerName, externalAddress); } else { InetAddress hostDefaultGateway = DockerNetworkCreator.getDefaultGatewayLinux(true); executeCommandInNetworkNamespace(containerName, "route", "-A", "inet6", "add", "default", "gw", hostDefaultGateway.getHostAddress(), "dev", "eth1"); } } @Override public boolean pullImageAsyncIfNeeded(DockerImage dockerImage) { return docker.pullImageAsyncIfNeeded(dockerImage); } ProcessResult executeCommandInContainer(ContainerName containerName, String... command) { ProcessResult result = docker.executeInContainerAsRoot(containerName, command); if (!result.isSuccess()) { throw new RuntimeException("Container " + containerName.asString() + ": command " + Arrays.toString(command) + " failed: " + result); } return result; } @Override public ProcessResult executeCommandInContainerAsRoot(ContainerName containerName, Long timeoutSeconds, String... command) { return docker.executeInContainerAsRoot(containerName, timeoutSeconds, command); } @Override public ProcessResult executeCommandInContainerAsRoot(ContainerName containerName, String... command) { return docker.executeInContainerAsRoot(containerName, command); } @Override public void executeCommandInNetworkNamespace(ContainerName containerName, String... command) { final PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); final Integer containerPid = docker.getContainer(containerName) .filter(container -> container.state.isRunning()) .map(container -> container.pid) .orElseThrow(() -> new RuntimeException("PID not found for container with name: " + containerName.asString())); final String[] wrappedCommand = Stream.concat( Stream.of("sudo", "nsenter", String.format("--net=/host/proc/%d/ns/net", containerPid), "--"), Stream.of(command)) .toArray(String[]::new); try { Pair<Integer, String> result = processExecuter.exec(wrappedCommand); if (result.getFirst() != 0) { String msg = String.format( "Failed to execute %s in network namespace for %s (PID = %d), exit code: %d, output: %s", Arrays.toString(wrappedCommand), containerName.asString(), containerPid, result.getFirst(), result.getSecond()); logger.error(msg); throw new RuntimeException(msg); } } catch (IOException e) { logger.warning(String.format("IOException while executing %s in network namespace for %s (PID = %d)", Arrays.toString(wrappedCommand), containerName.asString(), containerPid), e); throw new RuntimeException(e); } } @Override public void resumeNode(ContainerName containerName) { executeCommandInContainer(containerName, RESUME_NODE_COMMAND); } @Override public void restartVespaOnNode(ContainerName containerName) { executeCommandInContainer(containerName, RESTART_VESPA_ON_NODE_COMMAND); } @Override public void stopServicesOnNode(ContainerName containerName) { executeCommandInContainer(containerName, STOP_NODE_COMMAND); } @Override public Optional<Docker.ContainerStats> getContainerStats(ContainerName containerName) { return docker.getContainerStats(containerName); } @Override public List<Container> getAllManagedContainers() { return docker.getAllContainersManagedBy(MANAGER_NAME); } @Override public List<ContainerName> listAllManagedContainers() { return docker.listAllContainersManagedBy(MANAGER_NAME); } @Override public void deleteUnusedDockerImages() { docker.deleteUnusedDockerImages(); } /** * Only insert NAT rules if they don't exist (or else they compounded) */ }
I made it debug because it is not always an incorrect behavior and when it is, it does not impact the routing logic. This is more a maintenance task to avoid ever growing iptables.
public void removeContainer(final Container existingContainer, ContainerNodeSpec nodeSpec) { final ContainerName containerName = existingContainer.name; PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); if (existingContainer.state.isRunning()) { logger.info("Stopping container " + containerName.asString()); docker.stopContainer(containerName); } logger.info("Deleting container " + containerName.asString()); docker.deleteContainer(containerName); if (docker.networkNATed()) { logger.info("Delete iptables NAT rules for " + containerName.asString()); try { InetAddress nodeInetAddress = environment.getInetAddressForHost(nodeSpec.hostname); String ipv6Str = docker.getGlobalIPv6Address(containerName); String drop = NATCommand.drop(nodeInetAddress, InetAddress.getByName(ipv6Str)); Pair<Integer, String> result = processExecuter.exec(drop); if (result.getFirst() != 0) { logger.debug("Unable to drop NAT rule - error message: " + result.getSecond()); } } catch (IOException e) { logger.warning("Unable to drop NAT rule for container " + containerName, e); } } }
logger.debug("Unable to drop NAT rule - error message: " + result.getSecond());
public void removeContainer(final Container existingContainer, ContainerNodeSpec nodeSpec) { final ContainerName containerName = existingContainer.name; PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); if (existingContainer.state.isRunning()) { logger.info("Stopping container " + containerName.asString()); docker.stopContainer(containerName); } logger.info("Deleting container " + containerName.asString()); docker.deleteContainer(containerName); if (docker.networkNATed()) { logger.info("Delete iptables NAT rules for " + containerName.asString()); try { InetAddress nodeInetAddress = environment.getInetAddressForHost(nodeSpec.hostname); String ipv6Str = docker.getGlobalIPv6Address(containerName); String drop = NATCommand.drop(nodeInetAddress, InetAddress.getByName(ipv6Str)); Pair<Integer, String> result = processExecuter.exec(drop); if (result.getFirst() != 0) { logger.debug("Unable to drop NAT rule - error message: " + result.getSecond()); } } catch (IOException e) { logger.warning("Unable to drop NAT rule for container " + containerName, e); } } }
class DockerOperationsImpl implements DockerOperations { public static final String NODE_PROGRAM = getDefaults().underVespaHome("bin/vespa-nodectl"); private static final String[] RESUME_NODE_COMMAND = new String[]{NODE_PROGRAM, "resume"}; private static final String[] SUSPEND_NODE_COMMAND = new String[]{NODE_PROGRAM, "suspend"}; private static final String[] RESTART_VESPA_ON_NODE_COMMAND = new String[]{NODE_PROGRAM, "restart-vespa"}; private static final String[] STOP_NODE_COMMAND = new String[]{NODE_PROGRAM, "stop"}; private static final String MANAGER_NAME = "node-admin"; private static final Map<String, Boolean> DIRECTORIES_TO_MOUNT = new HashMap<>(); static { DIRECTORIES_TO_MOUNT.put("/etc/yamas-agent", true); DIRECTORIES_TO_MOUNT.put("/etc/filebeat", true); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/daemontools_y"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/jdisc_core"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/langdetect/"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/vespa"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/yca"), true); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/yck"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/yell"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/ykeykey"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/ykeykeyd"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/yms_agent"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/ysar"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/ystatus"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/zpe_policy_updater"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/cache"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/crash"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/db/jdisc"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/db/vespa"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/jdisc_container"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/jdisc_core"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/maven"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/run"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/scoreboards"), true); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/service"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/share"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/spool"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/vespa"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/yca"), true); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/ycore++"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/zookeeper"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("tmp"), false); } private final Docker docker; private final Environment environment; private final ProcessExecuter processExecuter; public DockerOperationsImpl(Docker docker, Environment environment, ProcessExecuter processExecuter) { this.docker = docker; this.environment = environment; this.processExecuter = processExecuter; } @Override public void startContainer(ContainerName containerName, final ContainerNodeSpec nodeSpec) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); logger.info("Starting container " + containerName); try { InetAddress nodeInetAddress = environment.getInetAddressForHost(nodeSpec.hostname); final boolean isIPv6 = nodeInetAddress instanceof Inet6Address; String configServers = environment.getConfigServerUris().stream() .map(URI::getHost) .collect(Collectors.joining(",")); Docker.CreateContainerCommand command = docker.createContainerCommand( nodeSpec.wantedDockerImage.get(), ContainerResources.from(nodeSpec.minCpuCores, nodeSpec.minMainMemoryAvailableGb), containerName, nodeSpec.hostname) .withManagedBy(MANAGER_NAME) .withEnvironment("CONFIG_SERVER_ADDRESS", configServers) .withUlimit("nofile", 262_144, 262_144) .withUlimit("nproc", 32_768, 409_600) .withUlimit("core", -1, -1) .withAddCapability("SYS_PTRACE") .withAddCapability("SYS_ADMIN"); if (!docker.networkNATed()) { logger.info("Network not nated - setting up with specific ip address on a macvlan"); command.withIpAddress(nodeInetAddress); command.withNetworkMode(DockerImpl.DOCKER_CUSTOM_MACVLAN_NETWORK_NAME); command.withVolume("/etc/hosts", "/etc/hosts"); } for (String pathInNode : DIRECTORIES_TO_MOUNT.keySet()) { String pathInHost = environment.pathInHostFromPathInNode(containerName, pathInNode).toString(); command.withVolume(pathInHost, pathInNode); } long minMainMemoryAvailableMb = (long) (nodeSpec.minMainMemoryAvailableGb * 1024); if (minMainMemoryAvailableMb > 0) { command.withEnvironment("VESPA_TOTAL_MEMORY_MB", Long.toString(minMainMemoryAvailableMb)); } logger.info("Starting new container with args: " + command); command.create(); if (isIPv6) { if (!docker.networkNATed()) { docker.connectContainerToNetwork(containerName, "bridge"); } docker.startContainer(containerName); setupContainerNetworkConnectivity(containerName, nodeInetAddress); } else { docker.startContainer(containerName); if (docker.networkNATed()) { setupContainerNetworkConnectivity(containerName, nodeInetAddress); } } DIRECTORIES_TO_MOUNT.entrySet().stream().filter(Map.Entry::getValue).forEach(entry -> docker.executeInContainerAsRoot(containerName, "chmod", "-R", "a+w", entry.getKey())); } catch (IOException e) { throw new RuntimeException("Failed to create container " + containerName.asString(), e); } } @Override @Override public Optional<Container> getContainer(ContainerName containerName) { return docker.getContainer(containerName); } /** * Try to suspend node. Suspending a node means the node should be taken offline, * such that maintenance can be done of the node (upgrading, rebooting, etc), * and such that we will start serving again as soon as possible afterwards. * <p> * Any failures are logged and ignored. */ @Override public void trySuspendNode(ContainerName containerName) { try { executeCommandInContainer(containerName, SUSPEND_NODE_COMMAND); } catch (RuntimeException e) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); logger.warning("Failed trying to suspend container " + containerName.asString() + " with " + Arrays.toString(SUSPEND_NODE_COMMAND), e); } } /** * For macvlan: * Due to a bug in docker (https: * IPv6 gateway in containers connected to more than one docker network * * For nat: * Setup iptables NAT rules to map the hosts public ips to the containers */ private void setupContainerNetworkConnectivity(ContainerName containerName, InetAddress externalAddress) throws IOException { if (docker.networkNATed()) { insertNAT(containerName, externalAddress); } else { InetAddress hostDefaultGateway = DockerNetworkCreator.getDefaultGatewayLinux(true); executeCommandInNetworkNamespace(containerName, "route", "-A", "inet6", "add", "default", "gw", hostDefaultGateway.getHostAddress(), "dev", "eth1"); } } @Override public boolean pullImageAsyncIfNeeded(DockerImage dockerImage) { return docker.pullImageAsyncIfNeeded(dockerImage); } ProcessResult executeCommandInContainer(ContainerName containerName, String... command) { ProcessResult result = docker.executeInContainerAsRoot(containerName, command); if (!result.isSuccess()) { throw new RuntimeException("Container " + containerName.asString() + ": command " + Arrays.toString(command) + " failed: " + result); } return result; } @Override public ProcessResult executeCommandInContainerAsRoot(ContainerName containerName, Long timeoutSeconds, String... command) { return docker.executeInContainerAsRoot(containerName, timeoutSeconds, command); } @Override public ProcessResult executeCommandInContainerAsRoot(ContainerName containerName, String... command) { return docker.executeInContainerAsRoot(containerName, command); } @Override public void executeCommandInNetworkNamespace(ContainerName containerName, String... command) { final PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); final Integer containerPid = docker.getContainer(containerName) .filter(container -> container.state.isRunning()) .map(container -> container.pid) .orElseThrow(() -> new RuntimeException("PID not found for container with name: " + containerName.asString())); final String[] wrappedCommand = Stream.concat( Stream.of("sudo", "nsenter", String.format("--net=/host/proc/%d/ns/net", containerPid), "--"), Stream.of(command)) .toArray(String[]::new); try { Pair<Integer, String> result = processExecuter.exec(wrappedCommand); if (result.getFirst() != 0) { String msg = String.format( "Failed to execute %s in network namespace for %s (PID = %d), exit code: %d, output: %s", Arrays.toString(wrappedCommand), containerName.asString(), containerPid, result.getFirst(), result.getSecond()); logger.error(msg); throw new RuntimeException(msg); } } catch (IOException e) { logger.warning(String.format("IOException while executing %s in network namespace for %s (PID = %d)", Arrays.toString(wrappedCommand), containerName.asString(), containerPid), e); throw new RuntimeException(e); } } @Override public void resumeNode(ContainerName containerName) { executeCommandInContainer(containerName, RESUME_NODE_COMMAND); } @Override public void restartVespaOnNode(ContainerName containerName) { executeCommandInContainer(containerName, RESTART_VESPA_ON_NODE_COMMAND); } @Override public void stopServicesOnNode(ContainerName containerName) { executeCommandInContainer(containerName, STOP_NODE_COMMAND); } @Override public Optional<Docker.ContainerStats> getContainerStats(ContainerName containerName) { return docker.getContainerStats(containerName); } @Override public List<Container> getAllManagedContainers() { return docker.getAllContainersManagedBy(MANAGER_NAME); } @Override public List<ContainerName> listAllManagedContainers() { return docker.listAllContainersManagedBy(MANAGER_NAME); } @Override public void deleteUnusedDockerImages() { docker.deleteUnusedDockerImages(); } /** * Only insert NAT rules if they don't exist (or else they will be added) */ private void insertNAT(ContainerName containerName, InetAddress externalAddress) throws IOException { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); String ipv6Str = docker.getGlobalIPv6Address(containerName); String checkCommand = NATCommand.check(externalAddress, InetAddress.getByName(ipv6Str)); Pair<Integer, String> result = processExecuter.exec(checkCommand); if (result.getFirst() == 0 ) return; String natCommand = NATCommand.check(externalAddress, InetAddress.getByName(ipv6Str)); logger.info("Setting up NAT rules: " + natCommand); result = processExecuter.exec(checkCommand); if (result.getFirst() != 0 ) { throw new IOException("Unable to setup NAT rule - error message: " + result.getSecond()); } } }
class DockerOperationsImpl implements DockerOperations { public static final String NODE_PROGRAM = getDefaults().underVespaHome("bin/vespa-nodectl"); private static final String[] RESUME_NODE_COMMAND = new String[]{NODE_PROGRAM, "resume"}; private static final String[] SUSPEND_NODE_COMMAND = new String[]{NODE_PROGRAM, "suspend"}; private static final String[] RESTART_VESPA_ON_NODE_COMMAND = new String[]{NODE_PROGRAM, "restart-vespa"}; private static final String[] STOP_NODE_COMMAND = new String[]{NODE_PROGRAM, "stop"}; private static final String MANAGER_NAME = "node-admin"; private static final Map<String, Boolean> DIRECTORIES_TO_MOUNT = new HashMap<>(); static { DIRECTORIES_TO_MOUNT.put("/etc/yamas-agent", true); DIRECTORIES_TO_MOUNT.put("/etc/filebeat", true); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/daemontools_y"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/jdisc_core"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/langdetect/"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/vespa"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/yca"), true); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/yck"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/yell"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/ykeykey"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/ykeykeyd"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/yms_agent"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/ysar"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/ystatus"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/zpe_policy_updater"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/cache"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/crash"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/db/jdisc"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/db/vespa"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/jdisc_container"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/jdisc_core"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/maven"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/run"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/scoreboards"), true); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/service"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/share"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/spool"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/vespa"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/yca"), true); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/ycore++"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/zookeeper"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("tmp"), false); } private final Docker docker; private final Environment environment; private final ProcessExecuter processExecuter; public DockerOperationsImpl(Docker docker, Environment environment, ProcessExecuter processExecuter) { this.docker = docker; this.environment = environment; this.processExecuter = processExecuter; } @Override public void startContainer(ContainerName containerName, final ContainerNodeSpec nodeSpec) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); logger.info("Starting container " + containerName); try { InetAddress nodeInetAddress = environment.getInetAddressForHost(nodeSpec.hostname); final boolean isIPv6 = nodeInetAddress instanceof Inet6Address; String configServers = environment.getConfigServerUris().stream() .map(URI::getHost) .collect(Collectors.joining(",")); Docker.CreateContainerCommand command = docker.createContainerCommand( nodeSpec.wantedDockerImage.get(), ContainerResources.from(nodeSpec.minCpuCores, nodeSpec.minMainMemoryAvailableGb), containerName, nodeSpec.hostname) .withManagedBy(MANAGER_NAME) .withEnvironment("CONFIG_SERVER_ADDRESS", configServers) .withUlimit("nofile", 262_144, 262_144) .withUlimit("nproc", 32_768, 409_600) .withUlimit("core", -1, -1) .withAddCapability("SYS_PTRACE") .withAddCapability("SYS_ADMIN"); if (!docker.networkNATed()) { logger.info("Network not nated - setting up with specific ip address on a macvlan"); command.withIpAddress(nodeInetAddress); command.withNetworkMode(DockerImpl.DOCKER_CUSTOM_MACVLAN_NETWORK_NAME); command.withVolume("/etc/hosts", "/etc/hosts"); } for (String pathInNode : DIRECTORIES_TO_MOUNT.keySet()) { String pathInHost = environment.pathInHostFromPathInNode(containerName, pathInNode).toString(); command.withVolume(pathInHost, pathInNode); } long minMainMemoryAvailableMb = (long) (nodeSpec.minMainMemoryAvailableGb * 1024); if (minMainMemoryAvailableMb > 0) { command.withEnvironment("VESPA_TOTAL_MEMORY_MB", Long.toString(minMainMemoryAvailableMb)); } logger.info("Starting new container with args: " + command); command.create(); if (isIPv6) { if (!docker.networkNATed()) { docker.connectContainerToNetwork(containerName, "bridge"); } docker.startContainer(containerName); setupContainerNetworkConnectivity(containerName, nodeInetAddress); } else { docker.startContainer(containerName); if (docker.networkNATed()) { setupContainerNetworkConnectivity(containerName, nodeInetAddress); } } DIRECTORIES_TO_MOUNT.entrySet().stream().filter(Map.Entry::getValue).forEach(entry -> docker.executeInContainerAsRoot(containerName, "chmod", "-R", "a+w", entry.getKey())); } catch (IOException e) { throw new RuntimeException("Failed to create container " + containerName.asString(), e); } } @Override @Override public Optional<Container> getContainer(ContainerName containerName) { return docker.getContainer(containerName); } /** * Try to suspend node. Suspending a node means the node should be taken offline, * such that maintenance can be done of the node (upgrading, rebooting, etc), * and such that we will start serving again as soon as possible afterwards. * <p> * Any failures are logged and ignored. */ @Override public void trySuspendNode(ContainerName containerName) { try { executeCommandInContainer(containerName, SUSPEND_NODE_COMMAND); } catch (RuntimeException e) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); logger.warning("Failed trying to suspend container " + containerName.asString() + " with " + Arrays.toString(SUSPEND_NODE_COMMAND), e); } } /** * For macvlan: * Due to a bug in docker (https: * IPv6 gateway in containers connected to more than one docker network * * For nat: * Setup iptables NAT rules to map the hosts public ips to the containers */ private void setupContainerNetworkConnectivity(ContainerName containerName, InetAddress externalAddress) throws IOException { if (docker.networkNATed()) { insertNAT(containerName, externalAddress); } else { InetAddress hostDefaultGateway = DockerNetworkCreator.getDefaultGatewayLinux(true); executeCommandInNetworkNamespace(containerName, "route", "-A", "inet6", "add", "default", "gw", hostDefaultGateway.getHostAddress(), "dev", "eth1"); } } @Override public boolean pullImageAsyncIfNeeded(DockerImage dockerImage) { return docker.pullImageAsyncIfNeeded(dockerImage); } ProcessResult executeCommandInContainer(ContainerName containerName, String... command) { ProcessResult result = docker.executeInContainerAsRoot(containerName, command); if (!result.isSuccess()) { throw new RuntimeException("Container " + containerName.asString() + ": command " + Arrays.toString(command) + " failed: " + result); } return result; } @Override public ProcessResult executeCommandInContainerAsRoot(ContainerName containerName, Long timeoutSeconds, String... command) { return docker.executeInContainerAsRoot(containerName, timeoutSeconds, command); } @Override public ProcessResult executeCommandInContainerAsRoot(ContainerName containerName, String... command) { return docker.executeInContainerAsRoot(containerName, command); } @Override public void executeCommandInNetworkNamespace(ContainerName containerName, String... command) { final PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); final Integer containerPid = docker.getContainer(containerName) .filter(container -> container.state.isRunning()) .map(container -> container.pid) .orElseThrow(() -> new RuntimeException("PID not found for container with name: " + containerName.asString())); final String[] wrappedCommand = Stream.concat( Stream.of("sudo", "nsenter", String.format("--net=/host/proc/%d/ns/net", containerPid), "--"), Stream.of(command)) .toArray(String[]::new); try { Pair<Integer, String> result = processExecuter.exec(wrappedCommand); if (result.getFirst() != 0) { String msg = String.format( "Failed to execute %s in network namespace for %s (PID = %d), exit code: %d, output: %s", Arrays.toString(wrappedCommand), containerName.asString(), containerPid, result.getFirst(), result.getSecond()); logger.error(msg); throw new RuntimeException(msg); } } catch (IOException e) { logger.warning(String.format("IOException while executing %s in network namespace for %s (PID = %d)", Arrays.toString(wrappedCommand), containerName.asString(), containerPid), e); throw new RuntimeException(e); } } @Override public void resumeNode(ContainerName containerName) { executeCommandInContainer(containerName, RESUME_NODE_COMMAND); } @Override public void restartVespaOnNode(ContainerName containerName) { executeCommandInContainer(containerName, RESTART_VESPA_ON_NODE_COMMAND); } @Override public void stopServicesOnNode(ContainerName containerName) { executeCommandInContainer(containerName, STOP_NODE_COMMAND); } @Override public Optional<Docker.ContainerStats> getContainerStats(ContainerName containerName) { return docker.getContainerStats(containerName); } @Override public List<Container> getAllManagedContainers() { return docker.getAllContainersManagedBy(MANAGER_NAME); } @Override public List<ContainerName> listAllManagedContainers() { return docker.listAllContainersManagedBy(MANAGER_NAME); } @Override public void deleteUnusedDockerImages() { docker.deleteUnusedDockerImages(); } /** * Only insert NAT rules if they don't exist (or else they compounded) */ private void insertNAT(ContainerName containerName, InetAddress externalAddress) throws IOException { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); String ipv6Str = docker.getGlobalIPv6Address(containerName); String checkCommand = NATCommand.check(externalAddress, InetAddress.getByName(ipv6Str)); Pair<Integer, String> result = processExecuter.exec(checkCommand); if (result.getFirst() == 0 ) return; String natCommand = NATCommand.insert(externalAddress, InetAddress.getByName(ipv6Str)); logger.info("Setting up NAT rules: " + natCommand); result = processExecuter.exec(checkCommand); if (result.getFirst() != 0 ) { throw new IOException("Unable to setup NAT rule - error message: " + result.getSecond()); } } }
fixed
private void insertNAT(ContainerName containerName, InetAddress externalAddress) throws IOException { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); String ipv6Str = docker.getGlobalIPv6Address(containerName); String checkCommand = NATCommand.check(externalAddress, InetAddress.getByName(ipv6Str)); Pair<Integer, String> result = processExecuter.exec(checkCommand); if (result.getFirst() == 0 ) return; String natCommand = NATCommand.check(externalAddress, InetAddress.getByName(ipv6Str)); logger.info("Setting up NAT rules: " + natCommand); result = processExecuter.exec(checkCommand); if (result.getFirst() != 0 ) { throw new IOException("Unable to setup NAT rule - error message: " + result.getSecond()); } }
String natCommand = NATCommand.check(externalAddress, InetAddress.getByName(ipv6Str));
private void insertNAT(ContainerName containerName, InetAddress externalAddress) throws IOException { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); String ipv6Str = docker.getGlobalIPv6Address(containerName); String checkCommand = NATCommand.check(externalAddress, InetAddress.getByName(ipv6Str)); Pair<Integer, String> result = processExecuter.exec(checkCommand); if (result.getFirst() == 0 ) return; String natCommand = NATCommand.insert(externalAddress, InetAddress.getByName(ipv6Str)); logger.info("Setting up NAT rules: " + natCommand); result = processExecuter.exec(checkCommand); if (result.getFirst() != 0 ) { throw new IOException("Unable to setup NAT rule - error message: " + result.getSecond()); } }
class DockerOperationsImpl implements DockerOperations { public static final String NODE_PROGRAM = getDefaults().underVespaHome("bin/vespa-nodectl"); private static final String[] RESUME_NODE_COMMAND = new String[]{NODE_PROGRAM, "resume"}; private static final String[] SUSPEND_NODE_COMMAND = new String[]{NODE_PROGRAM, "suspend"}; private static final String[] RESTART_VESPA_ON_NODE_COMMAND = new String[]{NODE_PROGRAM, "restart-vespa"}; private static final String[] STOP_NODE_COMMAND = new String[]{NODE_PROGRAM, "stop"}; private static final String MANAGER_NAME = "node-admin"; private static final Map<String, Boolean> DIRECTORIES_TO_MOUNT = new HashMap<>(); static { DIRECTORIES_TO_MOUNT.put("/etc/yamas-agent", true); DIRECTORIES_TO_MOUNT.put("/etc/filebeat", true); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/daemontools_y"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/jdisc_core"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/langdetect/"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/vespa"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/yca"), true); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/yck"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/yell"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/ykeykey"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/ykeykeyd"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/yms_agent"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/ysar"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/ystatus"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/zpe_policy_updater"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/cache"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/crash"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/db/jdisc"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/db/vespa"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/jdisc_container"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/jdisc_core"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/maven"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/run"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/scoreboards"), true); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/service"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/share"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/spool"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/vespa"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/yca"), true); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/ycore++"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/zookeeper"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("tmp"), false); } private final Docker docker; private final Environment environment; private final ProcessExecuter processExecuter; public DockerOperationsImpl(Docker docker, Environment environment, ProcessExecuter processExecuter) { this.docker = docker; this.environment = environment; this.processExecuter = processExecuter; } @Override public void startContainer(ContainerName containerName, final ContainerNodeSpec nodeSpec) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); logger.info("Starting container " + containerName); try { InetAddress nodeInetAddress = environment.getInetAddressForHost(nodeSpec.hostname); final boolean isIPv6 = nodeInetAddress instanceof Inet6Address; String configServers = environment.getConfigServerUris().stream() .map(URI::getHost) .collect(Collectors.joining(",")); Docker.CreateContainerCommand command = docker.createContainerCommand( nodeSpec.wantedDockerImage.get(), ContainerResources.from(nodeSpec.minCpuCores, nodeSpec.minMainMemoryAvailableGb), containerName, nodeSpec.hostname) .withManagedBy(MANAGER_NAME) .withEnvironment("CONFIG_SERVER_ADDRESS", configServers) .withUlimit("nofile", 262_144, 262_144) .withUlimit("nproc", 32_768, 409_600) .withUlimit("core", -1, -1) .withAddCapability("SYS_PTRACE") .withAddCapability("SYS_ADMIN"); if (!docker.networkNATed()) { logger.info("Network not nated - setting up with specific ip address on a macvlan"); command.withIpAddress(nodeInetAddress); command.withNetworkMode(DockerImpl.DOCKER_CUSTOM_MACVLAN_NETWORK_NAME); command.withVolume("/etc/hosts", "/etc/hosts"); } for (String pathInNode : DIRECTORIES_TO_MOUNT.keySet()) { String pathInHost = environment.pathInHostFromPathInNode(containerName, pathInNode).toString(); command.withVolume(pathInHost, pathInNode); } long minMainMemoryAvailableMb = (long) (nodeSpec.minMainMemoryAvailableGb * 1024); if (minMainMemoryAvailableMb > 0) { command.withEnvironment("VESPA_TOTAL_MEMORY_MB", Long.toString(minMainMemoryAvailableMb)); } logger.info("Starting new container with args: " + command); command.create(); if (isIPv6) { if (!docker.networkNATed()) { docker.connectContainerToNetwork(containerName, "bridge"); } docker.startContainer(containerName); setupContainerNetworkConnectivity(containerName, nodeInetAddress); } else { docker.startContainer(containerName); if (docker.networkNATed()) { setupContainerNetworkConnectivity(containerName, nodeInetAddress); } } DIRECTORIES_TO_MOUNT.entrySet().stream().filter(Map.Entry::getValue).forEach(entry -> docker.executeInContainerAsRoot(containerName, "chmod", "-R", "a+w", entry.getKey())); } catch (IOException e) { throw new RuntimeException("Failed to create container " + containerName.asString(), e); } } @Override public void removeContainer(final Container existingContainer, ContainerNodeSpec nodeSpec) { final ContainerName containerName = existingContainer.name; PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); if (existingContainer.state.isRunning()) { logger.info("Stopping container " + containerName.asString()); docker.stopContainer(containerName); } logger.info("Deleting container " + containerName.asString()); docker.deleteContainer(containerName); if (docker.networkNATed()) { logger.info("Delete iptables NAT rules for " + containerName.asString()); try { InetAddress nodeInetAddress = environment.getInetAddressForHost(nodeSpec.hostname); String ipv6Str = docker.getGlobalIPv6Address(containerName); String drop = NATCommand.drop(nodeInetAddress, InetAddress.getByName(ipv6Str)); Pair<Integer, String> result = processExecuter.exec(drop); if (result.getFirst() != 0) { logger.debug("Unable to drop NAT rule - error message: " + result.getSecond()); } } catch (IOException e) { logger.warning("Unable to drop NAT rule for container " + containerName, e); } } } @Override public Optional<Container> getContainer(ContainerName containerName) { return docker.getContainer(containerName); } /** * Try to suspend node. Suspending a node means the node should be taken offline, * such that maintenance can be done of the node (upgrading, rebooting, etc), * and such that we will start serving again as soon as possible afterwards. * <p> * Any failures are logged and ignored. */ @Override public void trySuspendNode(ContainerName containerName) { try { executeCommandInContainer(containerName, SUSPEND_NODE_COMMAND); } catch (RuntimeException e) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); logger.warning("Failed trying to suspend container " + containerName.asString() + " with " + Arrays.toString(SUSPEND_NODE_COMMAND), e); } } /** * For macvlan: * Due to a bug in docker (https: * IPv6 gateway in containers connected to more than one docker network * * For nat: * Setup iptables NAT rules to map the hosts public ips to the containers */ private void setupContainerNetworkConnectivity(ContainerName containerName, InetAddress externalAddress) throws IOException { if (docker.networkNATed()) { insertNAT(containerName, externalAddress); } else { InetAddress hostDefaultGateway = DockerNetworkCreator.getDefaultGatewayLinux(true); executeCommandInNetworkNamespace(containerName, "route", "-A", "inet6", "add", "default", "gw", hostDefaultGateway.getHostAddress(), "dev", "eth1"); } } @Override public boolean pullImageAsyncIfNeeded(DockerImage dockerImage) { return docker.pullImageAsyncIfNeeded(dockerImage); } ProcessResult executeCommandInContainer(ContainerName containerName, String... command) { ProcessResult result = docker.executeInContainerAsRoot(containerName, command); if (!result.isSuccess()) { throw new RuntimeException("Container " + containerName.asString() + ": command " + Arrays.toString(command) + " failed: " + result); } return result; } @Override public ProcessResult executeCommandInContainerAsRoot(ContainerName containerName, Long timeoutSeconds, String... command) { return docker.executeInContainerAsRoot(containerName, timeoutSeconds, command); } @Override public ProcessResult executeCommandInContainerAsRoot(ContainerName containerName, String... command) { return docker.executeInContainerAsRoot(containerName, command); } @Override public void executeCommandInNetworkNamespace(ContainerName containerName, String... command) { final PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); final Integer containerPid = docker.getContainer(containerName) .filter(container -> container.state.isRunning()) .map(container -> container.pid) .orElseThrow(() -> new RuntimeException("PID not found for container with name: " + containerName.asString())); final String[] wrappedCommand = Stream.concat( Stream.of("sudo", "nsenter", String.format("--net=/host/proc/%d/ns/net", containerPid), "--"), Stream.of(command)) .toArray(String[]::new); try { Pair<Integer, String> result = processExecuter.exec(wrappedCommand); if (result.getFirst() != 0) { String msg = String.format( "Failed to execute %s in network namespace for %s (PID = %d), exit code: %d, output: %s", Arrays.toString(wrappedCommand), containerName.asString(), containerPid, result.getFirst(), result.getSecond()); logger.error(msg); throw new RuntimeException(msg); } } catch (IOException e) { logger.warning(String.format("IOException while executing %s in network namespace for %s (PID = %d)", Arrays.toString(wrappedCommand), containerName.asString(), containerPid), e); throw new RuntimeException(e); } } @Override public void resumeNode(ContainerName containerName) { executeCommandInContainer(containerName, RESUME_NODE_COMMAND); } @Override public void restartVespaOnNode(ContainerName containerName) { executeCommandInContainer(containerName, RESTART_VESPA_ON_NODE_COMMAND); } @Override public void stopServicesOnNode(ContainerName containerName) { executeCommandInContainer(containerName, STOP_NODE_COMMAND); } @Override public Optional<Docker.ContainerStats> getContainerStats(ContainerName containerName) { return docker.getContainerStats(containerName); } @Override public List<Container> getAllManagedContainers() { return docker.getAllContainersManagedBy(MANAGER_NAME); } @Override public List<ContainerName> listAllManagedContainers() { return docker.listAllContainersManagedBy(MANAGER_NAME); } @Override public void deleteUnusedDockerImages() { docker.deleteUnusedDockerImages(); } /** * Only insert NAT rules if they don't exist (or else they will be added) */ }
class DockerOperationsImpl implements DockerOperations { public static final String NODE_PROGRAM = getDefaults().underVespaHome("bin/vespa-nodectl"); private static final String[] RESUME_NODE_COMMAND = new String[]{NODE_PROGRAM, "resume"}; private static final String[] SUSPEND_NODE_COMMAND = new String[]{NODE_PROGRAM, "suspend"}; private static final String[] RESTART_VESPA_ON_NODE_COMMAND = new String[]{NODE_PROGRAM, "restart-vespa"}; private static final String[] STOP_NODE_COMMAND = new String[]{NODE_PROGRAM, "stop"}; private static final String MANAGER_NAME = "node-admin"; private static final Map<String, Boolean> DIRECTORIES_TO_MOUNT = new HashMap<>(); static { DIRECTORIES_TO_MOUNT.put("/etc/yamas-agent", true); DIRECTORIES_TO_MOUNT.put("/etc/filebeat", true); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/daemontools_y"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/jdisc_core"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/langdetect/"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/vespa"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/yca"), true); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/yck"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/yell"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/ykeykey"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/ykeykeyd"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/yms_agent"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/ysar"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/ystatus"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/zpe_policy_updater"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/cache"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/crash"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/db/jdisc"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/db/vespa"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/jdisc_container"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/jdisc_core"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/maven"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/run"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/scoreboards"), true); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/service"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/share"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/spool"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/vespa"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/yca"), true); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/ycore++"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/zookeeper"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("tmp"), false); } private final Docker docker; private final Environment environment; private final ProcessExecuter processExecuter; public DockerOperationsImpl(Docker docker, Environment environment, ProcessExecuter processExecuter) { this.docker = docker; this.environment = environment; this.processExecuter = processExecuter; } @Override public void startContainer(ContainerName containerName, final ContainerNodeSpec nodeSpec) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); logger.info("Starting container " + containerName); try { InetAddress nodeInetAddress = environment.getInetAddressForHost(nodeSpec.hostname); final boolean isIPv6 = nodeInetAddress instanceof Inet6Address; String configServers = environment.getConfigServerUris().stream() .map(URI::getHost) .collect(Collectors.joining(",")); Docker.CreateContainerCommand command = docker.createContainerCommand( nodeSpec.wantedDockerImage.get(), ContainerResources.from(nodeSpec.minCpuCores, nodeSpec.minMainMemoryAvailableGb), containerName, nodeSpec.hostname) .withManagedBy(MANAGER_NAME) .withEnvironment("CONFIG_SERVER_ADDRESS", configServers) .withUlimit("nofile", 262_144, 262_144) .withUlimit("nproc", 32_768, 409_600) .withUlimit("core", -1, -1) .withAddCapability("SYS_PTRACE") .withAddCapability("SYS_ADMIN"); if (!docker.networkNATed()) { logger.info("Network not nated - setting up with specific ip address on a macvlan"); command.withIpAddress(nodeInetAddress); command.withNetworkMode(DockerImpl.DOCKER_CUSTOM_MACVLAN_NETWORK_NAME); command.withVolume("/etc/hosts", "/etc/hosts"); } for (String pathInNode : DIRECTORIES_TO_MOUNT.keySet()) { String pathInHost = environment.pathInHostFromPathInNode(containerName, pathInNode).toString(); command.withVolume(pathInHost, pathInNode); } long minMainMemoryAvailableMb = (long) (nodeSpec.minMainMemoryAvailableGb * 1024); if (minMainMemoryAvailableMb > 0) { command.withEnvironment("VESPA_TOTAL_MEMORY_MB", Long.toString(minMainMemoryAvailableMb)); } logger.info("Starting new container with args: " + command); command.create(); if (isIPv6) { if (!docker.networkNATed()) { docker.connectContainerToNetwork(containerName, "bridge"); } docker.startContainer(containerName); setupContainerNetworkConnectivity(containerName, nodeInetAddress); } else { docker.startContainer(containerName); if (docker.networkNATed()) { setupContainerNetworkConnectivity(containerName, nodeInetAddress); } } DIRECTORIES_TO_MOUNT.entrySet().stream().filter(Map.Entry::getValue).forEach(entry -> docker.executeInContainerAsRoot(containerName, "chmod", "-R", "a+w", entry.getKey())); } catch (IOException e) { throw new RuntimeException("Failed to create container " + containerName.asString(), e); } } @Override public void removeContainer(final Container existingContainer, ContainerNodeSpec nodeSpec) { final ContainerName containerName = existingContainer.name; PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); if (existingContainer.state.isRunning()) { logger.info("Stopping container " + containerName.asString()); docker.stopContainer(containerName); } logger.info("Deleting container " + containerName.asString()); docker.deleteContainer(containerName); if (docker.networkNATed()) { logger.info("Delete iptables NAT rules for " + containerName.asString()); try { InetAddress nodeInetAddress = environment.getInetAddressForHost(nodeSpec.hostname); String ipv6Str = docker.getGlobalIPv6Address(containerName); String drop = NATCommand.drop(nodeInetAddress, InetAddress.getByName(ipv6Str)); Pair<Integer, String> result = processExecuter.exec(drop); if (result.getFirst() != 0) { logger.debug("Unable to drop NAT rule - error message: " + result.getSecond()); } } catch (IOException e) { logger.warning("Unable to drop NAT rule for container " + containerName, e); } } } @Override public Optional<Container> getContainer(ContainerName containerName) { return docker.getContainer(containerName); } /** * Try to suspend node. Suspending a node means the node should be taken offline, * such that maintenance can be done of the node (upgrading, rebooting, etc), * and such that we will start serving again as soon as possible afterwards. * <p> * Any failures are logged and ignored. */ @Override public void trySuspendNode(ContainerName containerName) { try { executeCommandInContainer(containerName, SUSPEND_NODE_COMMAND); } catch (RuntimeException e) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); logger.warning("Failed trying to suspend container " + containerName.asString() + " with " + Arrays.toString(SUSPEND_NODE_COMMAND), e); } } /** * For macvlan: * Due to a bug in docker (https: * IPv6 gateway in containers connected to more than one docker network * * For nat: * Setup iptables NAT rules to map the hosts public ips to the containers */ private void setupContainerNetworkConnectivity(ContainerName containerName, InetAddress externalAddress) throws IOException { if (docker.networkNATed()) { insertNAT(containerName, externalAddress); } else { InetAddress hostDefaultGateway = DockerNetworkCreator.getDefaultGatewayLinux(true); executeCommandInNetworkNamespace(containerName, "route", "-A", "inet6", "add", "default", "gw", hostDefaultGateway.getHostAddress(), "dev", "eth1"); } } @Override public boolean pullImageAsyncIfNeeded(DockerImage dockerImage) { return docker.pullImageAsyncIfNeeded(dockerImage); } ProcessResult executeCommandInContainer(ContainerName containerName, String... command) { ProcessResult result = docker.executeInContainerAsRoot(containerName, command); if (!result.isSuccess()) { throw new RuntimeException("Container " + containerName.asString() + ": command " + Arrays.toString(command) + " failed: " + result); } return result; } @Override public ProcessResult executeCommandInContainerAsRoot(ContainerName containerName, Long timeoutSeconds, String... command) { return docker.executeInContainerAsRoot(containerName, timeoutSeconds, command); } @Override public ProcessResult executeCommandInContainerAsRoot(ContainerName containerName, String... command) { return docker.executeInContainerAsRoot(containerName, command); } @Override public void executeCommandInNetworkNamespace(ContainerName containerName, String... command) { final PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); final Integer containerPid = docker.getContainer(containerName) .filter(container -> container.state.isRunning()) .map(container -> container.pid) .orElseThrow(() -> new RuntimeException("PID not found for container with name: " + containerName.asString())); final String[] wrappedCommand = Stream.concat( Stream.of("sudo", "nsenter", String.format("--net=/host/proc/%d/ns/net", containerPid), "--"), Stream.of(command)) .toArray(String[]::new); try { Pair<Integer, String> result = processExecuter.exec(wrappedCommand); if (result.getFirst() != 0) { String msg = String.format( "Failed to execute %s in network namespace for %s (PID = %d), exit code: %d, output: %s", Arrays.toString(wrappedCommand), containerName.asString(), containerPid, result.getFirst(), result.getSecond()); logger.error(msg); throw new RuntimeException(msg); } } catch (IOException e) { logger.warning(String.format("IOException while executing %s in network namespace for %s (PID = %d)", Arrays.toString(wrappedCommand), containerName.asString(), containerPid), e); throw new RuntimeException(e); } } @Override public void resumeNode(ContainerName containerName) { executeCommandInContainer(containerName, RESUME_NODE_COMMAND); } @Override public void restartVespaOnNode(ContainerName containerName) { executeCommandInContainer(containerName, RESTART_VESPA_ON_NODE_COMMAND); } @Override public void stopServicesOnNode(ContainerName containerName) { executeCommandInContainer(containerName, STOP_NODE_COMMAND); } @Override public Optional<Docker.ContainerStats> getContainerStats(ContainerName containerName) { return docker.getContainerStats(containerName); } @Override public List<Container> getAllManagedContainers() { return docker.getAllContainersManagedBy(MANAGER_NAME); } @Override public List<ContainerName> listAllManagedContainers() { return docker.listAllContainersManagedBy(MANAGER_NAME); } @Override public void deleteUnusedDockerImages() { docker.deleteUnusedDockerImages(); } /** * Only insert NAT rules if they don't exist (or else they compounded) */ }
argumnets -> arguments
public String getHardwareDivergence(ContainerNodeSpec nodeSpec) { List<String> argumnets = new ArrayList<>(Arrays.asList("specification", "--disk", Double.toString(nodeSpec.minDiskAvailableGb), "--memory", Double.toString(nodeSpec.minMainMemoryAvailableGb), "--cpu_cores", Double.toString(nodeSpec.minCpuCores), "--is_ssd", Boolean.toString(nodeSpec.fastDisk), "--ips", String.join(",", nodeSpec.ipAddresses))); if (nodeSpec.hardwareDivergence.isPresent()) { argumnets.add("--divergence"); argumnets.add(nodeSpec.hardwareDivergence.get()); } return executeMaintainer("com.yahoo.vespa.hosted.node.verification.Main", argumnets.toArray(new String[0])); }
List<String> argumnets = new ArrayList<>(Arrays.asList("specification",
public String getHardwareDivergence(ContainerNodeSpec nodeSpec) { List<String> arguments = new ArrayList<>(Arrays.asList("specification", "--disk", Double.toString(nodeSpec.minDiskAvailableGb), "--memory", Double.toString(nodeSpec.minMainMemoryAvailableGb), "--cpu_cores", Double.toString(nodeSpec.minCpuCores), "--is_ssd", Boolean.toString(nodeSpec.fastDisk), "--ips", String.join(",", nodeSpec.ipAddresses))); if (nodeSpec.hardwareDivergence.isPresent()) { arguments.add("--divergence"); arguments.add(nodeSpec.hardwareDivergence.get()); } return executeMaintainer("com.yahoo.vespa.hosted.node.verification.Main", arguments.toArray(new String[0])); }
class StorageMaintainer { private static final ContainerName NODE_ADMIN = new ContainerName("node-admin"); private static final ObjectMapper objectMapper = new ObjectMapper(); private final CounterWrapper numberOfNodeAdminMaintenanceFails; private final DockerOperations dockerOperations; private final ProcessExecuter processExecuter; private final Environment environment; private final Clock clock; private Map<ContainerName, MaintenanceThrottler> maintenanceThrottlerByContainerName = new ConcurrentHashMap<>(); public StorageMaintainer(DockerOperations dockerOperations, ProcessExecuter processExecuter, MetricReceiverWrapper metricReceiver, Environment environment, Clock clock) { this.dockerOperations = dockerOperations; this.processExecuter = processExecuter; this.environment = environment; this.clock = clock; Dimensions dimensions = new Dimensions.Builder().add("role", "docker").build(); numberOfNodeAdminMaintenanceFails = metricReceiver.declareCounter(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.maintenance.fails"); } public void writeMetricsConfig(ContainerName containerName, ContainerNodeSpec nodeSpec) { final Path yamasAgentFolder = environment.pathInNodeAdminFromPathInNode(containerName, "/etc/yamas-agent/"); Path vespaCheckPath = Paths.get(getDefaults().underVespaHome("libexec/yms/yms_check_vespa")); SecretAgentScheduleMaker vespaSchedule = new SecretAgentScheduleMaker("vespa", 60, vespaCheckPath, "all") .withTag("parentHostname", environment.getParentHostHostname()); Path hostLifeCheckPath = Paths.get(getDefaults().underVespaHome("libexec/yms/yms_check_host_life")); SecretAgentScheduleMaker hostLifeSchedule = new SecretAgentScheduleMaker("host-life", 60, hostLifeCheckPath) .withTag("namespace", "Vespa") .withTag("role", "tenants") .withTag("flavor", nodeSpec.nodeFlavor) .withTag("canonicalFlavor", nodeSpec.nodeCanonicalFlavor) .withTag("state", nodeSpec.nodeState.toString()) .withTag("zone", environment.getZone()) .withTag("parentHostname", environment.getParentHostHostname()); nodeSpec.owner.ifPresent(owner -> hostLifeSchedule .withTag("tenantName", owner.tenant) .withTag("app", owner.application + "." + owner.instance) .withTag("applicationName", owner.application) .withTag("instanceName", owner.instance) .withTag("applicationId", owner.tenant + "." + owner.application + "." + owner.instance)); nodeSpec.membership.ifPresent(membership -> hostLifeSchedule .withTag("clustertype", membership.clusterType) .withTag("clusterid", membership.clusterId)); nodeSpec.vespaVersion.ifPresent(version -> hostLifeSchedule.withTag("vespaVersion", version)); try { vespaSchedule.writeTo(yamasAgentFolder); hostLifeSchedule.writeTo(yamasAgentFolder); final String[] restartYamasAgent = new String[]{"service", "yamas-agent", "restart"}; dockerOperations.executeCommandInContainerAsRoot(containerName, restartYamasAgent); } catch (IOException e) { throw new RuntimeException("Failed to write secret-agent schedules for " + containerName, e); } } public void writeFilebeatConfig(ContainerName containerName, ContainerNodeSpec nodeSpec) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName); try { FilebeatConfigProvider filebeatConfigProvider = new FilebeatConfigProvider(environment); Optional<String> config = filebeatConfigProvider.getConfig(nodeSpec); if (!config.isPresent()) { logger.error("Was not able to generate a config for filebeat, ignoring filebeat file creation." + nodeSpec.toString()); return; } Path filebeatPath = environment.pathInNodeAdminFromPathInNode(containerName, "/etc/filebeat/filebeat.yml"); Files.write(filebeatPath, config.get().getBytes()); logger.info("Wrote filebeat config."); } catch (Throwable t) { logger.error("Failed writing filebeat config; " + nodeSpec, t); } } public Optional<Long> getDiskUsageFor(ContainerName containerName) { Path containerDir = environment.pathInNodeAdminFromPathInNode(containerName, "/home/"); try { return Optional.of(getDiskUsedInBytes(containerDir)); } catch (Throwable e) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName); logger.error("Problems during disk usage calculations in " + containerDir.toAbsolutePath(), e); return Optional.empty(); } } long getDiskUsedInBytes(Path path) throws IOException, InterruptedException { final String[] command = {"du", "-xsk", path.toString()}; Process duCommand = new ProcessBuilder().command(command).start(); if (!duCommand.waitFor(60, TimeUnit.SECONDS)) { duCommand.destroy(); throw new RuntimeException("Disk usage command timed out, aborting."); } String output = IOUtils.readAll(new InputStreamReader(duCommand.getInputStream())); String[] results = output.split("\t"); if (results.length != 2) { throw new RuntimeException("Result from disk usage command not as expected: " + output); } long diskUsageKB = Long.valueOf(results[0]); return diskUsageKB * 1024; } /** * Deletes old log files for vespa, nginx, logstash, etc. */ public void removeOldFilesFromNode(ContainerName containerName) { if (! getMaintenanceThrottlerFor(containerName).shouldRemoveOldFilesNow()) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addRemoveOldFilesCommand(maintainerExecutor, containerName); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).updateNextRemoveOldFilesTime(); } private void addRemoveOldFilesCommand(MaintainerExecutor maintainerExecutor, ContainerName containerName) { String[] pathsToClean = { getDefaults().underVespaHome("logs/elasticsearch2"), getDefaults().underVespaHome("logs/logstash2"), getDefaults().underVespaHome("logs/daemontools_y"), getDefaults().underVespaHome("logs/nginx"), getDefaults().underVespaHome("logs/vespa") }; for (String pathToClean : pathsToClean) { Path path = environment.pathInNodeAdminFromPathInNode(containerName, pathToClean); if (Files.exists(path)) { maintainerExecutor.addJob("delete-files") .withArgument("basePath", path) .withArgument("maxAgeSeconds", Duration.ofDays(3).getSeconds()) .withArgument("fileNameRegex", ".*\\.log.+") .withArgument("recursive", false); } } Path qrsDir = environment.pathInNodeAdminFromPathInNode( containerName, getDefaults().underVespaHome("logs/vespa/qrs")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", qrsDir) .withArgument("maxAgeSeconds", Duration.ofDays(3).getSeconds()) .withArgument("fileNameRegex", ".*QueryAccessLog.*") .withArgument("recursive", false); Path logArchiveDir = environment.pathInNodeAdminFromPathInNode( containerName, getDefaults().underVespaHome("logs/vespa/logarchive")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", logArchiveDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", false); Path fileDistrDir = environment.pathInNodeAdminFromPathInNode( containerName, getDefaults().underVespaHome("var/db/vespa/filedistribution")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", fileDistrDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", true); } /** * Checks if container has any new coredumps, reports and archives them if so * * @param force Set to true to bypass throttling */ public void handleCoreDumpsForContainer(ContainerName containerName, ContainerNodeSpec nodeSpec, boolean force) { if (! getMaintenanceThrottlerFor(containerName).shouldHandleCoredumpsNow() && !force) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addHandleCoredumpsCommand(maintainerExecutor, containerName, nodeSpec); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).updateNextHandleCoredumpsTime(); } private void addHandleCoredumpsCommand(MaintainerExecutor maintainerExecutor, ContainerName containerName, ContainerNodeSpec nodeSpec) { Map<String, Object> attributes = new HashMap<>(); attributes.put("hostname", nodeSpec.hostname); attributes.put("parent_hostname", HostName.getLocalhost()); attributes.put("region", environment.getRegion()); attributes.put("environment", environment.getEnvironment()); attributes.put("flavor", nodeSpec.nodeFlavor); attributes.put("kernel_version", System.getProperty("os.version")); nodeSpec.currentDockerImage.ifPresent(image -> attributes.put("docker_image", image.asString())); nodeSpec.vespaVersion.ifPresent(version -> attributes.put("vespa_version", version)); nodeSpec.owner.ifPresent(owner -> { attributes.put("tenant", owner.tenant); attributes.put("application", owner.application); attributes.put("instance", owner.instance); }); maintainerExecutor.addJob("handle-core-dumps") .withArgument("doneCoredumpsPath", environment.pathInNodeAdminToDoneCoredumps()) .withArgument("coredumpsPath", environment.pathInNodeAdminFromPathInNode( containerName, getDefaults().underVespaHome("var/crash"))) .withArgument("feedEndpoint", environment.getCoredumpFeedEndpoint()) .withArgument("attributes", attributes); } /** * Deletes old * * archived app data * * Vespa logs * * Filedistribution files */ public void cleanNodeAdmin() { if (! getMaintenanceThrottlerFor(NODE_ADMIN).shouldRemoveOldFilesNow()) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); maintainerExecutor.addJob("delete-directories") .withArgument("basePath", environment.getPathResolver().getApplicationStoragePathForNodeAdmin()) .withArgument("maxAgeSeconds", Duration.ofDays(7).getSeconds()) .withArgument("dirNameRegex", "^" + Pattern.quote(Environment.APPLICATION_STORAGE_CLEANUP_PATH_PREFIX)); Path nodeAdminJDiskLogsPath = environment.pathInNodeAdminFromPathInNode( NODE_ADMIN, getDefaults().underVespaHome("logs/vespa/")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", nodeAdminJDiskLogsPath) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", false); Path fileDistrDir = environment.pathInNodeAdminFromPathInNode( NODE_ADMIN, getDefaults().underVespaHome("var/db/vespa/filedistribution")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", fileDistrDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", true); maintainerExecutor.execute(); getMaintenanceThrottlerFor(NODE_ADMIN).updateNextRemoveOldFilesTime(); } /** * Prepares the container-storage for the next container by deleting/archiving all the data of the current container. * Removes old files, reports coredumps and archives container data, runs when container enters state "dirty" */ public void cleanupNodeStorage(ContainerName containerName, ContainerNodeSpec nodeSpec) { MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addRemoveOldFilesCommand(maintainerExecutor, containerName); addHandleCoredumpsCommand(maintainerExecutor, containerName, nodeSpec); addArchiveNodeData(maintainerExecutor, containerName); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).reset(); } private void addArchiveNodeData(MaintainerExecutor maintainerExecutor, ContainerName containerName) { maintainerExecutor.addJob("recursive-delete") .withArgument("path", environment.pathInNodeAdminFromPathInNode( containerName, getDefaults().underVespaHome("var"))); maintainerExecutor.addJob("move-files") .withArgument("from", environment.pathInNodeAdminFromPathInNode(containerName, "/")) .withArgument("to", environment.pathInNodeAdminToNodeCleanup(containerName)); } /** * Runs node-maintainer's SpecVerifier and returns its output * @param nodeSpec Node specification containing the excepted values we want to verify against * @return new combined hardware divergence * @throws RuntimeException if exit code != 0 */ private String executeMaintainer(String mainClass, String... args) { String[] command = Stream.concat( Stream.of("sudo", "VESPA_HOME=" + getDefaults().vespaHome(), getDefaults().underVespaHome("libexec/vespa/node-admin/maintenance.sh"), mainClass), Stream.of(args)) .toArray(String[]::new); try { Pair<Integer, String> result = processExecuter.exec(command); if (result.getFirst() != 0) { numberOfNodeAdminMaintenanceFails.add(); throw new RuntimeException( String.format("Maintainer failed to execute command: %s, Exit code: %d, Stdout/stderr: %s", Arrays.toString(command), result.getFirst(), result.getSecond())); } return result.getSecond().trim(); } catch (IOException e) { throw new RuntimeException("Failed to execute maintainer", e); } } /** * Wrapper for node-admin-maintenance, queues up maintenances jobs and sends a single request to maintenance JVM */ private class MaintainerExecutor { private final List<MaintainerExecutorJob> jobs = new ArrayList<>(); MaintainerExecutorJob addJob(String jobName) { MaintainerExecutorJob job = new MaintainerExecutorJob(jobName); jobs.add(job); return job; } void execute() { String args; try { args = objectMapper.writeValueAsString(jobs); } catch (JsonProcessingException e) { throw new RuntimeException("Failed transform list of maintenance jobs to JSON"); } executeMaintainer("com.yahoo.vespa.hosted.node.maintainer.Maintainer", args); } } private class MaintainerExecutorJob { @JsonProperty(value="type") private final String type; @JsonProperty(value="arguments") private final Map<String, Object> arguments = new HashMap<>(); MaintainerExecutorJob(String type) { this.type = type; } MaintainerExecutorJob withArgument(String argument, Object value) { arguments.put(argument, (value instanceof Path) ? value.toString() : value); return this; } } private MaintenanceThrottler getMaintenanceThrottlerFor(ContainerName containerName) { maintenanceThrottlerByContainerName.putIfAbsent(containerName, new MaintenanceThrottler()); return maintenanceThrottlerByContainerName.get(containerName); } private class MaintenanceThrottler { private Instant nextRemoveOldFilesAt = Instant.EPOCH; private Instant nextHandleOldCoredumpsAt = Instant.EPOCH; void updateNextRemoveOldFilesTime() { nextRemoveOldFilesAt = clock.instant().plus(Duration.ofHours(1)); } boolean shouldRemoveOldFilesNow() { return !nextRemoveOldFilesAt.isAfter(clock.instant()); } void updateNextHandleCoredumpsTime() { nextHandleOldCoredumpsAt = clock.instant().plus(Duration.ofMinutes(5)); } boolean shouldHandleCoredumpsNow() { return !nextHandleOldCoredumpsAt.isAfter(clock.instant()); } void reset() { nextRemoveOldFilesAt = Instant.EPOCH; nextHandleOldCoredumpsAt = Instant.EPOCH; } } }
class StorageMaintainer { private static final ContainerName NODE_ADMIN = new ContainerName("node-admin"); private static final ObjectMapper objectMapper = new ObjectMapper(); private final CounterWrapper numberOfNodeAdminMaintenanceFails; private final DockerOperations dockerOperations; private final ProcessExecuter processExecuter; private final Environment environment; private final Clock clock; private Map<ContainerName, MaintenanceThrottler> maintenanceThrottlerByContainerName = new ConcurrentHashMap<>(); public StorageMaintainer(DockerOperations dockerOperations, ProcessExecuter processExecuter, MetricReceiverWrapper metricReceiver, Environment environment, Clock clock) { this.dockerOperations = dockerOperations; this.processExecuter = processExecuter; this.environment = environment; this.clock = clock; Dimensions dimensions = new Dimensions.Builder().add("role", "docker").build(); numberOfNodeAdminMaintenanceFails = metricReceiver.declareCounter(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.maintenance.fails"); } public void writeMetricsConfig(ContainerName containerName, ContainerNodeSpec nodeSpec) { final Path yamasAgentFolder = environment.pathInNodeAdminFromPathInNode(containerName, "/etc/yamas-agent/"); Path vespaCheckPath = Paths.get(getDefaults().underVespaHome("libexec/yms/yms_check_vespa")); SecretAgentScheduleMaker vespaSchedule = new SecretAgentScheduleMaker("vespa", 60, vespaCheckPath, "all") .withTag("parentHostname", environment.getParentHostHostname()); Path hostLifeCheckPath = Paths.get(getDefaults().underVespaHome("libexec/yms/yms_check_host_life")); SecretAgentScheduleMaker hostLifeSchedule = new SecretAgentScheduleMaker("host-life", 60, hostLifeCheckPath) .withTag("namespace", "Vespa") .withTag("role", "tenants") .withTag("flavor", nodeSpec.nodeFlavor) .withTag("canonicalFlavor", nodeSpec.nodeCanonicalFlavor) .withTag("state", nodeSpec.nodeState.toString()) .withTag("zone", environment.getZone()) .withTag("parentHostname", environment.getParentHostHostname()); nodeSpec.owner.ifPresent(owner -> hostLifeSchedule .withTag("tenantName", owner.tenant) .withTag("app", owner.application + "." + owner.instance) .withTag("applicationName", owner.application) .withTag("instanceName", owner.instance) .withTag("applicationId", owner.tenant + "." + owner.application + "." + owner.instance)); nodeSpec.membership.ifPresent(membership -> hostLifeSchedule .withTag("clustertype", membership.clusterType) .withTag("clusterid", membership.clusterId)); nodeSpec.vespaVersion.ifPresent(version -> hostLifeSchedule.withTag("vespaVersion", version)); try { vespaSchedule.writeTo(yamasAgentFolder); hostLifeSchedule.writeTo(yamasAgentFolder); final String[] restartYamasAgent = new String[]{"service", "yamas-agent", "restart"}; dockerOperations.executeCommandInContainerAsRoot(containerName, restartYamasAgent); } catch (IOException e) { throw new RuntimeException("Failed to write secret-agent schedules for " + containerName, e); } } public void writeFilebeatConfig(ContainerName containerName, ContainerNodeSpec nodeSpec) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName); try { FilebeatConfigProvider filebeatConfigProvider = new FilebeatConfigProvider(environment); Optional<String> config = filebeatConfigProvider.getConfig(nodeSpec); if (!config.isPresent()) { logger.error("Was not able to generate a config for filebeat, ignoring filebeat file creation." + nodeSpec.toString()); return; } Path filebeatPath = environment.pathInNodeAdminFromPathInNode(containerName, "/etc/filebeat/filebeat.yml"); Files.write(filebeatPath, config.get().getBytes()); logger.info("Wrote filebeat config."); } catch (Throwable t) { logger.error("Failed writing filebeat config; " + nodeSpec, t); } } public Optional<Long> getDiskUsageFor(ContainerName containerName) { Path containerDir = environment.pathInNodeAdminFromPathInNode(containerName, "/home/"); try { return Optional.of(getDiskUsedInBytes(containerDir)); } catch (Throwable e) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName); logger.error("Problems during disk usage calculations in " + containerDir.toAbsolutePath(), e); return Optional.empty(); } } long getDiskUsedInBytes(Path path) throws IOException, InterruptedException { final String[] command = {"du", "-xsk", path.toString()}; Process duCommand = new ProcessBuilder().command(command).start(); if (!duCommand.waitFor(60, TimeUnit.SECONDS)) { duCommand.destroy(); throw new RuntimeException("Disk usage command timed out, aborting."); } String output = IOUtils.readAll(new InputStreamReader(duCommand.getInputStream())); String[] results = output.split("\t"); if (results.length != 2) { throw new RuntimeException("Result from disk usage command not as expected: " + output); } long diskUsageKB = Long.valueOf(results[0]); return diskUsageKB * 1024; } /** * Deletes old log files for vespa, nginx, logstash, etc. */ public void removeOldFilesFromNode(ContainerName containerName) { if (! getMaintenanceThrottlerFor(containerName).shouldRemoveOldFilesNow()) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addRemoveOldFilesCommand(maintainerExecutor, containerName); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).updateNextRemoveOldFilesTime(); } private void addRemoveOldFilesCommand(MaintainerExecutor maintainerExecutor, ContainerName containerName) { String[] pathsToClean = { getDefaults().underVespaHome("logs/elasticsearch2"), getDefaults().underVespaHome("logs/logstash2"), getDefaults().underVespaHome("logs/daemontools_y"), getDefaults().underVespaHome("logs/nginx"), getDefaults().underVespaHome("logs/vespa") }; for (String pathToClean : pathsToClean) { Path path = environment.pathInNodeAdminFromPathInNode(containerName, pathToClean); if (Files.exists(path)) { maintainerExecutor.addJob("delete-files") .withArgument("basePath", path) .withArgument("maxAgeSeconds", Duration.ofDays(3).getSeconds()) .withArgument("fileNameRegex", ".*\\.log.+") .withArgument("recursive", false); } } Path qrsDir = environment.pathInNodeAdminFromPathInNode( containerName, getDefaults().underVespaHome("logs/vespa/qrs")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", qrsDir) .withArgument("maxAgeSeconds", Duration.ofDays(3).getSeconds()) .withArgument("fileNameRegex", ".*QueryAccessLog.*") .withArgument("recursive", false); Path logArchiveDir = environment.pathInNodeAdminFromPathInNode( containerName, getDefaults().underVespaHome("logs/vespa/logarchive")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", logArchiveDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", false); Path fileDistrDir = environment.pathInNodeAdminFromPathInNode( containerName, getDefaults().underVespaHome("var/db/vespa/filedistribution")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", fileDistrDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", true); } /** * Checks if container has any new coredumps, reports and archives them if so * * @param force Set to true to bypass throttling */ public void handleCoreDumpsForContainer(ContainerName containerName, ContainerNodeSpec nodeSpec, boolean force) { if (! getMaintenanceThrottlerFor(containerName).shouldHandleCoredumpsNow() && !force) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addHandleCoredumpsCommand(maintainerExecutor, containerName, nodeSpec); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).updateNextHandleCoredumpsTime(); } private void addHandleCoredumpsCommand(MaintainerExecutor maintainerExecutor, ContainerName containerName, ContainerNodeSpec nodeSpec) { Map<String, Object> attributes = new HashMap<>(); attributes.put("hostname", nodeSpec.hostname); attributes.put("parent_hostname", HostName.getLocalhost()); attributes.put("region", environment.getRegion()); attributes.put("environment", environment.getEnvironment()); attributes.put("flavor", nodeSpec.nodeFlavor); attributes.put("kernel_version", System.getProperty("os.version")); nodeSpec.currentDockerImage.ifPresent(image -> attributes.put("docker_image", image.asString())); nodeSpec.vespaVersion.ifPresent(version -> attributes.put("vespa_version", version)); nodeSpec.owner.ifPresent(owner -> { attributes.put("tenant", owner.tenant); attributes.put("application", owner.application); attributes.put("instance", owner.instance); }); maintainerExecutor.addJob("handle-core-dumps") .withArgument("doneCoredumpsPath", environment.pathInNodeAdminToDoneCoredumps()) .withArgument("coredumpsPath", environment.pathInNodeAdminFromPathInNode( containerName, getDefaults().underVespaHome("var/crash"))) .withArgument("feedEndpoint", environment.getCoredumpFeedEndpoint()) .withArgument("attributes", attributes); } /** * Deletes old * * archived app data * * Vespa logs * * Filedistribution files */ public void cleanNodeAdmin() { if (! getMaintenanceThrottlerFor(NODE_ADMIN).shouldRemoveOldFilesNow()) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); maintainerExecutor.addJob("delete-directories") .withArgument("basePath", environment.getPathResolver().getApplicationStoragePathForNodeAdmin()) .withArgument("maxAgeSeconds", Duration.ofDays(7).getSeconds()) .withArgument("dirNameRegex", "^" + Pattern.quote(Environment.APPLICATION_STORAGE_CLEANUP_PATH_PREFIX)); Path nodeAdminJDiskLogsPath = environment.pathInNodeAdminFromPathInNode( NODE_ADMIN, getDefaults().underVespaHome("logs/vespa/")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", nodeAdminJDiskLogsPath) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", false); Path fileDistrDir = environment.pathInNodeAdminFromPathInNode( NODE_ADMIN, getDefaults().underVespaHome("var/db/vespa/filedistribution")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", fileDistrDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", true); maintainerExecutor.execute(); getMaintenanceThrottlerFor(NODE_ADMIN).updateNextRemoveOldFilesTime(); } /** * Prepares the container-storage for the next container by deleting/archiving all the data of the current container. * Removes old files, reports coredumps and archives container data, runs when container enters state "dirty" */ public void cleanupNodeStorage(ContainerName containerName, ContainerNodeSpec nodeSpec) { MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addRemoveOldFilesCommand(maintainerExecutor, containerName); addHandleCoredumpsCommand(maintainerExecutor, containerName, nodeSpec); addArchiveNodeData(maintainerExecutor, containerName); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).reset(); } private void addArchiveNodeData(MaintainerExecutor maintainerExecutor, ContainerName containerName) { maintainerExecutor.addJob("recursive-delete") .withArgument("path", environment.pathInNodeAdminFromPathInNode( containerName, getDefaults().underVespaHome("var"))); maintainerExecutor.addJob("move-files") .withArgument("from", environment.pathInNodeAdminFromPathInNode(containerName, "/")) .withArgument("to", environment.pathInNodeAdminToNodeCleanup(containerName)); } /** * Runs node-maintainer's SpecVerifier and returns its output * @param nodeSpec Node specification containing the excepted values we want to verify against * @return new combined hardware divergence * @throws RuntimeException if exit code != 0 */ private String executeMaintainer(String mainClass, String... args) { String[] command = Stream.concat( Stream.of("sudo", "VESPA_HOME=" + getDefaults().vespaHome(), getDefaults().underVespaHome("libexec/vespa/node-admin/maintenance.sh"), mainClass), Stream.of(args)) .toArray(String[]::new); try { Pair<Integer, String> result = processExecuter.exec(command); if (result.getFirst() != 0) { numberOfNodeAdminMaintenanceFails.add(); throw new RuntimeException( String.format("Maintainer failed to execute command: %s, Exit code: %d, Stdout/stderr: %s", Arrays.toString(command), result.getFirst(), result.getSecond())); } return result.getSecond().trim(); } catch (IOException e) { throw new RuntimeException("Failed to execute maintainer", e); } } /** * Wrapper for node-admin-maintenance, queues up maintenances jobs and sends a single request to maintenance JVM */ private class MaintainerExecutor { private final List<MaintainerExecutorJob> jobs = new ArrayList<>(); MaintainerExecutorJob addJob(String jobName) { MaintainerExecutorJob job = new MaintainerExecutorJob(jobName); jobs.add(job); return job; } void execute() { String args; try { args = objectMapper.writeValueAsString(jobs); } catch (JsonProcessingException e) { throw new RuntimeException("Failed transform list of maintenance jobs to JSON"); } executeMaintainer("com.yahoo.vespa.hosted.node.maintainer.Maintainer", args); } } private class MaintainerExecutorJob { @JsonProperty(value="type") private final String type; @JsonProperty(value="arguments") private final Map<String, Object> arguments = new HashMap<>(); MaintainerExecutorJob(String type) { this.type = type; } MaintainerExecutorJob withArgument(String argument, Object value) { arguments.put(argument, (value instanceof Path) ? value.toString() : value); return this; } } private MaintenanceThrottler getMaintenanceThrottlerFor(ContainerName containerName) { maintenanceThrottlerByContainerName.putIfAbsent(containerName, new MaintenanceThrottler()); return maintenanceThrottlerByContainerName.get(containerName); } private class MaintenanceThrottler { private Instant nextRemoveOldFilesAt = Instant.EPOCH; private Instant nextHandleOldCoredumpsAt = Instant.EPOCH; void updateNextRemoveOldFilesTime() { nextRemoveOldFilesAt = clock.instant().plus(Duration.ofHours(1)); } boolean shouldRemoveOldFilesNow() { return !nextRemoveOldFilesAt.isAfter(clock.instant()); } void updateNextHandleCoredumpsTime() { nextHandleOldCoredumpsAt = clock.instant().plus(Duration.ofMinutes(5)); } boolean shouldHandleCoredumpsNow() { return !nextHandleOldCoredumpsAt.isAfter(clock.instant()); } void reset() { nextRemoveOldFilesAt = Instant.EPOCH; nextHandleOldCoredumpsAt = Instant.EPOCH; } } }
The benefit of avoiding an update is lost since an additional node repo lookup is added. How about moving this close to fetchContainersToRunFromNodeRepository to utilize that method's lookup?
private void updateHardwareDivergence(StorageMaintainer maintainer) { if (currentState != RESUMED) return; try { ContainerNodeSpec nodeSpec = nodeRepository.getContainerNodeSpec(dockerHostHostName) .orElseThrow(() -> new RuntimeException("Failed to get host's node spec from node-repo")); String hardwareDivergence = maintainer.getHardwareDivergence(nodeSpec); if (!nodeSpec.hardwareDivergence.orElse("null").equals(hardwareDivergence)) { NodeAttributes nodeAttributes = new NodeAttributes().withHardwareDivergence(hardwareDivergence); nodeRepository.updateNodeAttributes(dockerHostHostName, nodeAttributes); } } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to report hardware divergence", e); } }
if (!nodeSpec.hardwareDivergence.orElse("null").equals(hardwareDivergence)) {
private void updateHardwareDivergence(StorageMaintainer maintainer) { if (currentState != RESUMED) return; try { ContainerNodeSpec nodeSpec = nodeRepository.getContainerNodeSpec(dockerHostHostName) .orElseThrow(() -> new RuntimeException("Failed to get host's node spec from node-repo")); String hardwareDivergence = maintainer.getHardwareDivergence(nodeSpec); if (!nodeSpec.hardwareDivergence.orElse("null").equals(hardwareDivergence)) { NodeAttributes nodeAttributes = new NodeAttributes().withHardwareDivergence(hardwareDivergence); nodeRepository.updateNodeAttributes(dockerHostHostName, nodeAttributes); } } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to report hardware divergence", e); } }
class NodeAdminStateUpdater { static final Duration FREEZE_CONVERGENCE_TIMEOUT = Duration.ofMinutes(5); private final AtomicBoolean terminated = new AtomicBoolean(false); private State currentState = SUSPENDED_NODE_ADMIN; private State wantedState = RESUMED; private boolean workToDoNow = true; private final Object monitor = new Object(); private final Logger log = Logger.getLogger(NodeAdminStateUpdater.class.getName()); private final ScheduledExecutorService specVerifierScheduler = Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("specverifier")); private final Thread loopThread; private final NodeRepository nodeRepository; private final Orchestrator orchestrator; private final NodeAdmin nodeAdmin; private final Clock clock; private final String dockerHostHostName; private final Duration nodeAdminConvergeStateInterval; private final ClassLocking classLocking; private Optional<ClassLock> classLock; private Instant lastTick; public NodeAdminStateUpdater( NodeRepository nodeRepository, Orchestrator orchestrator, StorageMaintainer storageMaintainer, NodeAdmin nodeAdmin, String dockerHostHostName, Clock clock, Duration nodeAdminConvergeStateInterval, ClassLocking classLocking) { log.info(objectToString() + ": Creating object"); this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.nodeAdmin = nodeAdmin; this.dockerHostHostName = dockerHostHostName; this.clock = clock; this.nodeAdminConvergeStateInterval = nodeAdminConvergeStateInterval; this.classLocking = classLocking; this.lastTick = clock.instant(); this.loopThread = new Thread(() -> { log.info(objectToString() + ": Acquiring lock"); try { classLock = Optional.of(classLocking.lockWhile(NodeAdminStateUpdater.class, () -> !terminated.get())); } catch (LockInterruptException e) { classLock = Optional.empty(); return; } log.info(objectToString() + ": Starting threads and schedulers"); nodeAdmin.start(); specVerifierScheduler.scheduleWithFixedDelay(() -> updateHardwareDivergence(storageMaintainer), 5, 60, TimeUnit.MINUTES); while (! terminated.get()) { tick(); } }); this.loopThread.setName("tick-NodeAdminStateUpdater"); } private String objectToString() { return this.getClass().getSimpleName() + "@" + Integer.toString(System.identityHashCode(this)); } public enum State { TRANSITIONING, RESUMED, SUSPENDED_NODE_ADMIN, SUSPENDED} public Map<String, Object> getDebugPage() { Map<String, Object> debug = new LinkedHashMap<>(); synchronized (monitor) { debug.put("dockerHostHostName", dockerHostHostName); debug.put("wantedState", wantedState); debug.put("currentState", currentState); debug.put("NodeAdmin", nodeAdmin.debugInfo()); } return debug; } public boolean setResumeStateAndCheckIfResumed(State wantedState) { synchronized (monitor) { if (this.wantedState != wantedState) { log.info("Wanted state change: " + this.wantedState + " -> " + wantedState); this.wantedState = wantedState; signalWorkToBeDone(); } return currentState == wantedState; } } void signalWorkToBeDone() { synchronized (monitor) { if (! workToDoNow) { workToDoNow = true; monitor.notifyAll(); } } } void tick() { State wantedStateCopy; synchronized (monitor) { while (! workToDoNow) { Duration timeSinceLastConverge = Duration.between(lastTick, clock.instant()); long remainder = nodeAdminConvergeStateInterval.minus(timeSinceLastConverge).toMillis(); if (remainder > 0) { try { monitor.wait(remainder); } catch (InterruptedException e) { log.info("Interrupted, but ignoring this: NodeAdminStateUpdater"); } } else break; } lastTick = clock.instant(); workToDoNow = false; wantedStateCopy = this.wantedState; } try { convergeState(wantedStateCopy); } catch (OrchestratorException | ConvergenceException | HttpException e) { log.info("Unable to converge to " + wantedStateCopy + ": " + e.getMessage()); } catch (Exception e) { log.log(LogLevel.ERROR, "Error while trying to converge to " + wantedStateCopy, e); } if (wantedStateCopy != RESUMED && currentState == TRANSITIONING) { Duration subsystemFreezeDuration = nodeAdmin.subsystemFreezeDuration(); if (subsystemFreezeDuration.compareTo(FREEZE_CONVERGENCE_TIMEOUT) > 0) { log.info("Timed out trying to freeze, will force unfreezed ticks"); nodeAdmin.setFrozen(false); } } fetchContainersToRunFromNodeRepository(); } /** * This method attempts to converge node-admin w/agents to a {@link State} * with respect to: freeze, Orchestrator, and services running. */ private void convergeState(State wantedState) { if (currentState == wantedState) return; synchronized (monitor) { currentState = TRANSITIONING; } boolean wantFrozen = wantedState != RESUMED; if (!nodeAdmin.setFrozen(wantFrozen)) { throw new ConvergenceException("NodeAdmin is not yet " + (wantFrozen ? "frozen" : "unfrozen")); } switch (wantedState) { case RESUMED: orchestrator.resume(dockerHostHostName); break; case SUSPENDED_NODE_ADMIN: orchestrator.suspend(dockerHostHostName); break; case SUSPENDED: List<String> nodesInActiveState = getNodesInActiveState(); List<String> nodesToSuspend = new ArrayList<>(); nodesToSuspend.addAll(nodesInActiveState); nodesToSuspend.add(dockerHostHostName); orchestrator.suspend(dockerHostHostName, nodesToSuspend); log.info("Orchestrator allows suspension of " + nodesToSuspend); nodeAdmin.stopNodeAgentServices(nodesInActiveState); break; default: throw new IllegalStateException("Unknown wanted state " + wantedState); } log.info("State changed from " + currentState + " to " + wantedState); synchronized (monitor) { currentState = wantedState; } } private void fetchContainersToRunFromNodeRepository() { synchronized (monitor) { if (currentState != RESUMED) { log.info("Frozen, skipping fetching info from node repository"); return; } final List<ContainerNodeSpec> containersToRun; try { containersToRun = nodeRepository.getContainersToRun(dockerHostHostName); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed fetching container info from node repository", e); return; } if (containersToRun == null) { log.warning("Got null from node repository"); return; } try { nodeAdmin.refreshContainersToRun(containersToRun); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed updating node admin: ", e); } } } private List<String> getNodesInActiveState() { try { return nodeRepository.getContainersToRun(dockerHostHostName) .stream() .filter(nodespec -> nodespec.nodeState == Node.State.active) .map(nodespec -> nodespec.hostname) .collect(Collectors.toList()); } catch (IOException e) { throw new RuntimeException("Failed to get nodes from node repo", e); } } public void start() { loopThread.start(); } public void stop() { log.info(objectToString() + ": Stop called"); if (!terminated.compareAndSet(false, true)) { throw new RuntimeException("Can not re-stop a node agent."); } classLocking.interrupt(); signalWorkToBeDone(); specVerifierScheduler.shutdown(); do { try { loopThread.join(); specVerifierScheduler.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e1) { log.info("Interrupted while waiting for NodeAdminStateUpdater thread and specVerfierScheduler to shutdown"); } } while (loopThread.isAlive() || !specVerifierScheduler.isTerminated()); nodeAdmin.stop(); classLock.ifPresent(lock -> { log.info(objectToString() + ": Releasing lock"); lock.close(); }); log.info(objectToString() + ": Stop complete"); } }
class NodeAdminStateUpdater { static final Duration FREEZE_CONVERGENCE_TIMEOUT = Duration.ofMinutes(5); private final AtomicBoolean terminated = new AtomicBoolean(false); private State currentState = SUSPENDED_NODE_ADMIN; private State wantedState = RESUMED; private boolean workToDoNow = true; private final Object monitor = new Object(); private final Logger log = Logger.getLogger(NodeAdminStateUpdater.class.getName()); private final ScheduledExecutorService specVerifierScheduler = Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("specverifier")); private final Thread loopThread; private final NodeRepository nodeRepository; private final Orchestrator orchestrator; private final NodeAdmin nodeAdmin; private final Clock clock; private final String dockerHostHostName; private final Duration nodeAdminConvergeStateInterval; private final ClassLocking classLocking; private Optional<ClassLock> classLock; private Instant lastTick; public NodeAdminStateUpdater( NodeRepository nodeRepository, Orchestrator orchestrator, StorageMaintainer storageMaintainer, NodeAdmin nodeAdmin, String dockerHostHostName, Clock clock, Duration nodeAdminConvergeStateInterval, ClassLocking classLocking) { log.info(objectToString() + ": Creating object"); this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.nodeAdmin = nodeAdmin; this.dockerHostHostName = dockerHostHostName; this.clock = clock; this.nodeAdminConvergeStateInterval = nodeAdminConvergeStateInterval; this.classLocking = classLocking; this.lastTick = clock.instant(); this.loopThread = new Thread(() -> { log.info(objectToString() + ": Acquiring lock"); try { classLock = Optional.of(classLocking.lockWhile(NodeAdminStateUpdater.class, () -> !terminated.get())); } catch (LockInterruptException e) { classLock = Optional.empty(); return; } log.info(objectToString() + ": Starting threads and schedulers"); nodeAdmin.start(); specVerifierScheduler.scheduleWithFixedDelay(() -> updateHardwareDivergence(storageMaintainer), 5, 60, TimeUnit.MINUTES); while (! terminated.get()) { tick(); } }); this.loopThread.setName("tick-NodeAdminStateUpdater"); } private String objectToString() { return this.getClass().getSimpleName() + "@" + Integer.toString(System.identityHashCode(this)); } public enum State { TRANSITIONING, RESUMED, SUSPENDED_NODE_ADMIN, SUSPENDED} public Map<String, Object> getDebugPage() { Map<String, Object> debug = new LinkedHashMap<>(); synchronized (monitor) { debug.put("dockerHostHostName", dockerHostHostName); debug.put("wantedState", wantedState); debug.put("currentState", currentState); debug.put("NodeAdmin", nodeAdmin.debugInfo()); } return debug; } public boolean setResumeStateAndCheckIfResumed(State wantedState) { synchronized (monitor) { if (this.wantedState != wantedState) { log.info("Wanted state change: " + this.wantedState + " -> " + wantedState); this.wantedState = wantedState; signalWorkToBeDone(); } return currentState == wantedState; } } void signalWorkToBeDone() { synchronized (monitor) { if (! workToDoNow) { workToDoNow = true; monitor.notifyAll(); } } } void tick() { State wantedStateCopy; synchronized (monitor) { while (! workToDoNow) { Duration timeSinceLastConverge = Duration.between(lastTick, clock.instant()); long remainder = nodeAdminConvergeStateInterval.minus(timeSinceLastConverge).toMillis(); if (remainder > 0) { try { monitor.wait(remainder); } catch (InterruptedException e) { log.info("Interrupted, but ignoring this: NodeAdminStateUpdater"); } } else break; } lastTick = clock.instant(); workToDoNow = false; wantedStateCopy = this.wantedState; } try { convergeState(wantedStateCopy); } catch (OrchestratorException | ConvergenceException | HttpException e) { log.info("Unable to converge to " + wantedStateCopy + ": " + e.getMessage()); } catch (Exception e) { log.log(LogLevel.ERROR, "Error while trying to converge to " + wantedStateCopy, e); } if (wantedStateCopy != RESUMED && currentState == TRANSITIONING) { Duration subsystemFreezeDuration = nodeAdmin.subsystemFreezeDuration(); if (subsystemFreezeDuration.compareTo(FREEZE_CONVERGENCE_TIMEOUT) > 0) { log.info("Timed out trying to freeze, will force unfreezed ticks"); nodeAdmin.setFrozen(false); } } fetchContainersToRunFromNodeRepository(); } /** * This method attempts to converge node-admin w/agents to a {@link State} * with respect to: freeze, Orchestrator, and services running. */ private void convergeState(State wantedState) { if (currentState == wantedState) return; synchronized (monitor) { currentState = TRANSITIONING; } boolean wantFrozen = wantedState != RESUMED; if (!nodeAdmin.setFrozen(wantFrozen)) { throw new ConvergenceException("NodeAdmin is not yet " + (wantFrozen ? "frozen" : "unfrozen")); } switch (wantedState) { case RESUMED: orchestrator.resume(dockerHostHostName); break; case SUSPENDED_NODE_ADMIN: orchestrator.suspend(dockerHostHostName); break; case SUSPENDED: List<String> nodesInActiveState = getNodesInActiveState(); List<String> nodesToSuspend = new ArrayList<>(); nodesToSuspend.addAll(nodesInActiveState); nodesToSuspend.add(dockerHostHostName); orchestrator.suspend(dockerHostHostName, nodesToSuspend); log.info("Orchestrator allows suspension of " + nodesToSuspend); nodeAdmin.stopNodeAgentServices(nodesInActiveState); break; default: throw new IllegalStateException("Unknown wanted state " + wantedState); } log.info("State changed from " + currentState + " to " + wantedState); synchronized (monitor) { currentState = wantedState; } } private void fetchContainersToRunFromNodeRepository() { synchronized (monitor) { if (currentState != RESUMED) { log.info("Frozen, skipping fetching info from node repository"); return; } final List<ContainerNodeSpec> containersToRun; try { containersToRun = nodeRepository.getContainersToRun(dockerHostHostName); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed fetching container info from node repository", e); return; } if (containersToRun == null) { log.warning("Got null from node repository"); return; } try { nodeAdmin.refreshContainersToRun(containersToRun); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed updating node admin: ", e); } } } private List<String> getNodesInActiveState() { try { return nodeRepository.getContainersToRun(dockerHostHostName) .stream() .filter(nodespec -> nodespec.nodeState == Node.State.active) .map(nodespec -> nodespec.hostname) .collect(Collectors.toList()); } catch (IOException e) { throw new RuntimeException("Failed to get nodes from node repo", e); } } public void start() { loopThread.start(); } public void stop() { log.info(objectToString() + ": Stop called"); if (!terminated.compareAndSet(false, true)) { throw new RuntimeException("Can not re-stop a node agent."); } classLocking.interrupt(); signalWorkToBeDone(); specVerifierScheduler.shutdown(); do { try { loopThread.join(); specVerifierScheduler.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e1) { log.info("Interrupted while waiting for NodeAdminStateUpdater thread and specVerfierScheduler to shutdown"); } } while (loopThread.isAlive() || !specVerifierScheduler.isTerminated()); nodeAdmin.stop(); classLock.ifPresent(lock -> { log.info(objectToString() + ": Releasing lock"); lock.close(); }); log.info(objectToString() + ": Stop complete"); } }
It doesn't really matter since this is only run once and hour, the reason I have is mainly so that we avoid having `PATCH` entries in access-log when nothing has changed.
private void updateHardwareDivergence(StorageMaintainer maintainer) { if (currentState != RESUMED) return; try { ContainerNodeSpec nodeSpec = nodeRepository.getContainerNodeSpec(dockerHostHostName) .orElseThrow(() -> new RuntimeException("Failed to get host's node spec from node-repo")); String hardwareDivergence = maintainer.getHardwareDivergence(nodeSpec); if (!nodeSpec.hardwareDivergence.orElse("null").equals(hardwareDivergence)) { NodeAttributes nodeAttributes = new NodeAttributes().withHardwareDivergence(hardwareDivergence); nodeRepository.updateNodeAttributes(dockerHostHostName, nodeAttributes); } } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to report hardware divergence", e); } }
if (!nodeSpec.hardwareDivergence.orElse("null").equals(hardwareDivergence)) {
private void updateHardwareDivergence(StorageMaintainer maintainer) { if (currentState != RESUMED) return; try { ContainerNodeSpec nodeSpec = nodeRepository.getContainerNodeSpec(dockerHostHostName) .orElseThrow(() -> new RuntimeException("Failed to get host's node spec from node-repo")); String hardwareDivergence = maintainer.getHardwareDivergence(nodeSpec); if (!nodeSpec.hardwareDivergence.orElse("null").equals(hardwareDivergence)) { NodeAttributes nodeAttributes = new NodeAttributes().withHardwareDivergence(hardwareDivergence); nodeRepository.updateNodeAttributes(dockerHostHostName, nodeAttributes); } } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to report hardware divergence", e); } }
class NodeAdminStateUpdater { static final Duration FREEZE_CONVERGENCE_TIMEOUT = Duration.ofMinutes(5); private final AtomicBoolean terminated = new AtomicBoolean(false); private State currentState = SUSPENDED_NODE_ADMIN; private State wantedState = RESUMED; private boolean workToDoNow = true; private final Object monitor = new Object(); private final Logger log = Logger.getLogger(NodeAdminStateUpdater.class.getName()); private final ScheduledExecutorService specVerifierScheduler = Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("specverifier")); private final Thread loopThread; private final NodeRepository nodeRepository; private final Orchestrator orchestrator; private final NodeAdmin nodeAdmin; private final Clock clock; private final String dockerHostHostName; private final Duration nodeAdminConvergeStateInterval; private final ClassLocking classLocking; private Optional<ClassLock> classLock; private Instant lastTick; public NodeAdminStateUpdater( NodeRepository nodeRepository, Orchestrator orchestrator, StorageMaintainer storageMaintainer, NodeAdmin nodeAdmin, String dockerHostHostName, Clock clock, Duration nodeAdminConvergeStateInterval, ClassLocking classLocking) { log.info(objectToString() + ": Creating object"); this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.nodeAdmin = nodeAdmin; this.dockerHostHostName = dockerHostHostName; this.clock = clock; this.nodeAdminConvergeStateInterval = nodeAdminConvergeStateInterval; this.classLocking = classLocking; this.lastTick = clock.instant(); this.loopThread = new Thread(() -> { log.info(objectToString() + ": Acquiring lock"); try { classLock = Optional.of(classLocking.lockWhile(NodeAdminStateUpdater.class, () -> !terminated.get())); } catch (LockInterruptException e) { classLock = Optional.empty(); return; } log.info(objectToString() + ": Starting threads and schedulers"); nodeAdmin.start(); specVerifierScheduler.scheduleWithFixedDelay(() -> updateHardwareDivergence(storageMaintainer), 5, 60, TimeUnit.MINUTES); while (! terminated.get()) { tick(); } }); this.loopThread.setName("tick-NodeAdminStateUpdater"); } private String objectToString() { return this.getClass().getSimpleName() + "@" + Integer.toString(System.identityHashCode(this)); } public enum State { TRANSITIONING, RESUMED, SUSPENDED_NODE_ADMIN, SUSPENDED} public Map<String, Object> getDebugPage() { Map<String, Object> debug = new LinkedHashMap<>(); synchronized (monitor) { debug.put("dockerHostHostName", dockerHostHostName); debug.put("wantedState", wantedState); debug.put("currentState", currentState); debug.put("NodeAdmin", nodeAdmin.debugInfo()); } return debug; } public boolean setResumeStateAndCheckIfResumed(State wantedState) { synchronized (monitor) { if (this.wantedState != wantedState) { log.info("Wanted state change: " + this.wantedState + " -> " + wantedState); this.wantedState = wantedState; signalWorkToBeDone(); } return currentState == wantedState; } } void signalWorkToBeDone() { synchronized (monitor) { if (! workToDoNow) { workToDoNow = true; monitor.notifyAll(); } } } void tick() { State wantedStateCopy; synchronized (monitor) { while (! workToDoNow) { Duration timeSinceLastConverge = Duration.between(lastTick, clock.instant()); long remainder = nodeAdminConvergeStateInterval.minus(timeSinceLastConverge).toMillis(); if (remainder > 0) { try { monitor.wait(remainder); } catch (InterruptedException e) { log.info("Interrupted, but ignoring this: NodeAdminStateUpdater"); } } else break; } lastTick = clock.instant(); workToDoNow = false; wantedStateCopy = this.wantedState; } try { convergeState(wantedStateCopy); } catch (OrchestratorException | ConvergenceException | HttpException e) { log.info("Unable to converge to " + wantedStateCopy + ": " + e.getMessage()); } catch (Exception e) { log.log(LogLevel.ERROR, "Error while trying to converge to " + wantedStateCopy, e); } if (wantedStateCopy != RESUMED && currentState == TRANSITIONING) { Duration subsystemFreezeDuration = nodeAdmin.subsystemFreezeDuration(); if (subsystemFreezeDuration.compareTo(FREEZE_CONVERGENCE_TIMEOUT) > 0) { log.info("Timed out trying to freeze, will force unfreezed ticks"); nodeAdmin.setFrozen(false); } } fetchContainersToRunFromNodeRepository(); } /** * This method attempts to converge node-admin w/agents to a {@link State} * with respect to: freeze, Orchestrator, and services running. */ private void convergeState(State wantedState) { if (currentState == wantedState) return; synchronized (monitor) { currentState = TRANSITIONING; } boolean wantFrozen = wantedState != RESUMED; if (!nodeAdmin.setFrozen(wantFrozen)) { throw new ConvergenceException("NodeAdmin is not yet " + (wantFrozen ? "frozen" : "unfrozen")); } switch (wantedState) { case RESUMED: orchestrator.resume(dockerHostHostName); break; case SUSPENDED_NODE_ADMIN: orchestrator.suspend(dockerHostHostName); break; case SUSPENDED: List<String> nodesInActiveState = getNodesInActiveState(); List<String> nodesToSuspend = new ArrayList<>(); nodesToSuspend.addAll(nodesInActiveState); nodesToSuspend.add(dockerHostHostName); orchestrator.suspend(dockerHostHostName, nodesToSuspend); log.info("Orchestrator allows suspension of " + nodesToSuspend); nodeAdmin.stopNodeAgentServices(nodesInActiveState); break; default: throw new IllegalStateException("Unknown wanted state " + wantedState); } log.info("State changed from " + currentState + " to " + wantedState); synchronized (monitor) { currentState = wantedState; } } private void fetchContainersToRunFromNodeRepository() { synchronized (monitor) { if (currentState != RESUMED) { log.info("Frozen, skipping fetching info from node repository"); return; } final List<ContainerNodeSpec> containersToRun; try { containersToRun = nodeRepository.getContainersToRun(dockerHostHostName); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed fetching container info from node repository", e); return; } if (containersToRun == null) { log.warning("Got null from node repository"); return; } try { nodeAdmin.refreshContainersToRun(containersToRun); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed updating node admin: ", e); } } } private List<String> getNodesInActiveState() { try { return nodeRepository.getContainersToRun(dockerHostHostName) .stream() .filter(nodespec -> nodespec.nodeState == Node.State.active) .map(nodespec -> nodespec.hostname) .collect(Collectors.toList()); } catch (IOException e) { throw new RuntimeException("Failed to get nodes from node repo", e); } } public void start() { loopThread.start(); } public void stop() { log.info(objectToString() + ": Stop called"); if (!terminated.compareAndSet(false, true)) { throw new RuntimeException("Can not re-stop a node agent."); } classLocking.interrupt(); signalWorkToBeDone(); specVerifierScheduler.shutdown(); do { try { loopThread.join(); specVerifierScheduler.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e1) { log.info("Interrupted while waiting for NodeAdminStateUpdater thread and specVerfierScheduler to shutdown"); } } while (loopThread.isAlive() || !specVerifierScheduler.isTerminated()); nodeAdmin.stop(); classLock.ifPresent(lock -> { log.info(objectToString() + ": Releasing lock"); lock.close(); }); log.info(objectToString() + ": Stop complete"); } }
class NodeAdminStateUpdater { static final Duration FREEZE_CONVERGENCE_TIMEOUT = Duration.ofMinutes(5); private final AtomicBoolean terminated = new AtomicBoolean(false); private State currentState = SUSPENDED_NODE_ADMIN; private State wantedState = RESUMED; private boolean workToDoNow = true; private final Object monitor = new Object(); private final Logger log = Logger.getLogger(NodeAdminStateUpdater.class.getName()); private final ScheduledExecutorService specVerifierScheduler = Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("specverifier")); private final Thread loopThread; private final NodeRepository nodeRepository; private final Orchestrator orchestrator; private final NodeAdmin nodeAdmin; private final Clock clock; private final String dockerHostHostName; private final Duration nodeAdminConvergeStateInterval; private final ClassLocking classLocking; private Optional<ClassLock> classLock; private Instant lastTick; public NodeAdminStateUpdater( NodeRepository nodeRepository, Orchestrator orchestrator, StorageMaintainer storageMaintainer, NodeAdmin nodeAdmin, String dockerHostHostName, Clock clock, Duration nodeAdminConvergeStateInterval, ClassLocking classLocking) { log.info(objectToString() + ": Creating object"); this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.nodeAdmin = nodeAdmin; this.dockerHostHostName = dockerHostHostName; this.clock = clock; this.nodeAdminConvergeStateInterval = nodeAdminConvergeStateInterval; this.classLocking = classLocking; this.lastTick = clock.instant(); this.loopThread = new Thread(() -> { log.info(objectToString() + ": Acquiring lock"); try { classLock = Optional.of(classLocking.lockWhile(NodeAdminStateUpdater.class, () -> !terminated.get())); } catch (LockInterruptException e) { classLock = Optional.empty(); return; } log.info(objectToString() + ": Starting threads and schedulers"); nodeAdmin.start(); specVerifierScheduler.scheduleWithFixedDelay(() -> updateHardwareDivergence(storageMaintainer), 5, 60, TimeUnit.MINUTES); while (! terminated.get()) { tick(); } }); this.loopThread.setName("tick-NodeAdminStateUpdater"); } private String objectToString() { return this.getClass().getSimpleName() + "@" + Integer.toString(System.identityHashCode(this)); } public enum State { TRANSITIONING, RESUMED, SUSPENDED_NODE_ADMIN, SUSPENDED} public Map<String, Object> getDebugPage() { Map<String, Object> debug = new LinkedHashMap<>(); synchronized (monitor) { debug.put("dockerHostHostName", dockerHostHostName); debug.put("wantedState", wantedState); debug.put("currentState", currentState); debug.put("NodeAdmin", nodeAdmin.debugInfo()); } return debug; } public boolean setResumeStateAndCheckIfResumed(State wantedState) { synchronized (monitor) { if (this.wantedState != wantedState) { log.info("Wanted state change: " + this.wantedState + " -> " + wantedState); this.wantedState = wantedState; signalWorkToBeDone(); } return currentState == wantedState; } } void signalWorkToBeDone() { synchronized (monitor) { if (! workToDoNow) { workToDoNow = true; monitor.notifyAll(); } } } void tick() { State wantedStateCopy; synchronized (monitor) { while (! workToDoNow) { Duration timeSinceLastConverge = Duration.between(lastTick, clock.instant()); long remainder = nodeAdminConvergeStateInterval.minus(timeSinceLastConverge).toMillis(); if (remainder > 0) { try { monitor.wait(remainder); } catch (InterruptedException e) { log.info("Interrupted, but ignoring this: NodeAdminStateUpdater"); } } else break; } lastTick = clock.instant(); workToDoNow = false; wantedStateCopy = this.wantedState; } try { convergeState(wantedStateCopy); } catch (OrchestratorException | ConvergenceException | HttpException e) { log.info("Unable to converge to " + wantedStateCopy + ": " + e.getMessage()); } catch (Exception e) { log.log(LogLevel.ERROR, "Error while trying to converge to " + wantedStateCopy, e); } if (wantedStateCopy != RESUMED && currentState == TRANSITIONING) { Duration subsystemFreezeDuration = nodeAdmin.subsystemFreezeDuration(); if (subsystemFreezeDuration.compareTo(FREEZE_CONVERGENCE_TIMEOUT) > 0) { log.info("Timed out trying to freeze, will force unfreezed ticks"); nodeAdmin.setFrozen(false); } } fetchContainersToRunFromNodeRepository(); } /** * This method attempts to converge node-admin w/agents to a {@link State} * with respect to: freeze, Orchestrator, and services running. */ private void convergeState(State wantedState) { if (currentState == wantedState) return; synchronized (monitor) { currentState = TRANSITIONING; } boolean wantFrozen = wantedState != RESUMED; if (!nodeAdmin.setFrozen(wantFrozen)) { throw new ConvergenceException("NodeAdmin is not yet " + (wantFrozen ? "frozen" : "unfrozen")); } switch (wantedState) { case RESUMED: orchestrator.resume(dockerHostHostName); break; case SUSPENDED_NODE_ADMIN: orchestrator.suspend(dockerHostHostName); break; case SUSPENDED: List<String> nodesInActiveState = getNodesInActiveState(); List<String> nodesToSuspend = new ArrayList<>(); nodesToSuspend.addAll(nodesInActiveState); nodesToSuspend.add(dockerHostHostName); orchestrator.suspend(dockerHostHostName, nodesToSuspend); log.info("Orchestrator allows suspension of " + nodesToSuspend); nodeAdmin.stopNodeAgentServices(nodesInActiveState); break; default: throw new IllegalStateException("Unknown wanted state " + wantedState); } log.info("State changed from " + currentState + " to " + wantedState); synchronized (monitor) { currentState = wantedState; } } private void fetchContainersToRunFromNodeRepository() { synchronized (monitor) { if (currentState != RESUMED) { log.info("Frozen, skipping fetching info from node repository"); return; } final List<ContainerNodeSpec> containersToRun; try { containersToRun = nodeRepository.getContainersToRun(dockerHostHostName); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed fetching container info from node repository", e); return; } if (containersToRun == null) { log.warning("Got null from node repository"); return; } try { nodeAdmin.refreshContainersToRun(containersToRun); } catch (Exception e) { log.log(LogLevel.WARNING, "Failed updating node admin: ", e); } } } private List<String> getNodesInActiveState() { try { return nodeRepository.getContainersToRun(dockerHostHostName) .stream() .filter(nodespec -> nodespec.nodeState == Node.State.active) .map(nodespec -> nodespec.hostname) .collect(Collectors.toList()); } catch (IOException e) { throw new RuntimeException("Failed to get nodes from node repo", e); } } public void start() { loopThread.start(); } public void stop() { log.info(objectToString() + ": Stop called"); if (!terminated.compareAndSet(false, true)) { throw new RuntimeException("Can not re-stop a node agent."); } classLocking.interrupt(); signalWorkToBeDone(); specVerifierScheduler.shutdown(); do { try { loopThread.join(); specVerifierScheduler.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e1) { log.info("Interrupted while waiting for NodeAdminStateUpdater thread and specVerfierScheduler to shutdown"); } } while (loopThread.isAlive() || !specVerifierScheduler.isTerminated()); nodeAdmin.stop(); classLock.ifPresent(lock -> { log.info(objectToString() + ": Releasing lock"); lock.close(); }); log.info(objectToString() + ": Stop complete"); } }
Fixed.
public String getHardwareDivergence(ContainerNodeSpec nodeSpec) { List<String> argumnets = new ArrayList<>(Arrays.asList("specification", "--disk", Double.toString(nodeSpec.minDiskAvailableGb), "--memory", Double.toString(nodeSpec.minMainMemoryAvailableGb), "--cpu_cores", Double.toString(nodeSpec.minCpuCores), "--is_ssd", Boolean.toString(nodeSpec.fastDisk), "--ips", String.join(",", nodeSpec.ipAddresses))); if (nodeSpec.hardwareDivergence.isPresent()) { argumnets.add("--divergence"); argumnets.add(nodeSpec.hardwareDivergence.get()); } return executeMaintainer("com.yahoo.vespa.hosted.node.verification.Main", argumnets.toArray(new String[0])); }
List<String> argumnets = new ArrayList<>(Arrays.asList("specification",
public String getHardwareDivergence(ContainerNodeSpec nodeSpec) { List<String> arguments = new ArrayList<>(Arrays.asList("specification", "--disk", Double.toString(nodeSpec.minDiskAvailableGb), "--memory", Double.toString(nodeSpec.minMainMemoryAvailableGb), "--cpu_cores", Double.toString(nodeSpec.minCpuCores), "--is_ssd", Boolean.toString(nodeSpec.fastDisk), "--ips", String.join(",", nodeSpec.ipAddresses))); if (nodeSpec.hardwareDivergence.isPresent()) { arguments.add("--divergence"); arguments.add(nodeSpec.hardwareDivergence.get()); } return executeMaintainer("com.yahoo.vespa.hosted.node.verification.Main", arguments.toArray(new String[0])); }
class StorageMaintainer { private static final ContainerName NODE_ADMIN = new ContainerName("node-admin"); private static final ObjectMapper objectMapper = new ObjectMapper(); private final CounterWrapper numberOfNodeAdminMaintenanceFails; private final DockerOperations dockerOperations; private final ProcessExecuter processExecuter; private final Environment environment; private final Clock clock; private Map<ContainerName, MaintenanceThrottler> maintenanceThrottlerByContainerName = new ConcurrentHashMap<>(); public StorageMaintainer(DockerOperations dockerOperations, ProcessExecuter processExecuter, MetricReceiverWrapper metricReceiver, Environment environment, Clock clock) { this.dockerOperations = dockerOperations; this.processExecuter = processExecuter; this.environment = environment; this.clock = clock; Dimensions dimensions = new Dimensions.Builder().add("role", "docker").build(); numberOfNodeAdminMaintenanceFails = metricReceiver.declareCounter(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.maintenance.fails"); } public void writeMetricsConfig(ContainerName containerName, ContainerNodeSpec nodeSpec) { final Path yamasAgentFolder = environment.pathInNodeAdminFromPathInNode(containerName, "/etc/yamas-agent/"); Path vespaCheckPath = Paths.get(getDefaults().underVespaHome("libexec/yms/yms_check_vespa")); SecretAgentScheduleMaker vespaSchedule = new SecretAgentScheduleMaker("vespa", 60, vespaCheckPath, "all") .withTag("parentHostname", environment.getParentHostHostname()); Path hostLifeCheckPath = Paths.get(getDefaults().underVespaHome("libexec/yms/yms_check_host_life")); SecretAgentScheduleMaker hostLifeSchedule = new SecretAgentScheduleMaker("host-life", 60, hostLifeCheckPath) .withTag("namespace", "Vespa") .withTag("role", "tenants") .withTag("flavor", nodeSpec.nodeFlavor) .withTag("canonicalFlavor", nodeSpec.nodeCanonicalFlavor) .withTag("state", nodeSpec.nodeState.toString()) .withTag("zone", environment.getZone()) .withTag("parentHostname", environment.getParentHostHostname()); nodeSpec.owner.ifPresent(owner -> hostLifeSchedule .withTag("tenantName", owner.tenant) .withTag("app", owner.application + "." + owner.instance) .withTag("applicationName", owner.application) .withTag("instanceName", owner.instance) .withTag("applicationId", owner.tenant + "." + owner.application + "." + owner.instance)); nodeSpec.membership.ifPresent(membership -> hostLifeSchedule .withTag("clustertype", membership.clusterType) .withTag("clusterid", membership.clusterId)); nodeSpec.vespaVersion.ifPresent(version -> hostLifeSchedule.withTag("vespaVersion", version)); try { vespaSchedule.writeTo(yamasAgentFolder); hostLifeSchedule.writeTo(yamasAgentFolder); final String[] restartYamasAgent = new String[]{"service", "yamas-agent", "restart"}; dockerOperations.executeCommandInContainerAsRoot(containerName, restartYamasAgent); } catch (IOException e) { throw new RuntimeException("Failed to write secret-agent schedules for " + containerName, e); } } public void writeFilebeatConfig(ContainerName containerName, ContainerNodeSpec nodeSpec) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName); try { FilebeatConfigProvider filebeatConfigProvider = new FilebeatConfigProvider(environment); Optional<String> config = filebeatConfigProvider.getConfig(nodeSpec); if (!config.isPresent()) { logger.error("Was not able to generate a config for filebeat, ignoring filebeat file creation." + nodeSpec.toString()); return; } Path filebeatPath = environment.pathInNodeAdminFromPathInNode(containerName, "/etc/filebeat/filebeat.yml"); Files.write(filebeatPath, config.get().getBytes()); logger.info("Wrote filebeat config."); } catch (Throwable t) { logger.error("Failed writing filebeat config; " + nodeSpec, t); } } public Optional<Long> getDiskUsageFor(ContainerName containerName) { Path containerDir = environment.pathInNodeAdminFromPathInNode(containerName, "/home/"); try { return Optional.of(getDiskUsedInBytes(containerDir)); } catch (Throwable e) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName); logger.error("Problems during disk usage calculations in " + containerDir.toAbsolutePath(), e); return Optional.empty(); } } long getDiskUsedInBytes(Path path) throws IOException, InterruptedException { final String[] command = {"du", "-xsk", path.toString()}; Process duCommand = new ProcessBuilder().command(command).start(); if (!duCommand.waitFor(60, TimeUnit.SECONDS)) { duCommand.destroy(); throw new RuntimeException("Disk usage command timed out, aborting."); } String output = IOUtils.readAll(new InputStreamReader(duCommand.getInputStream())); String[] results = output.split("\t"); if (results.length != 2) { throw new RuntimeException("Result from disk usage command not as expected: " + output); } long diskUsageKB = Long.valueOf(results[0]); return diskUsageKB * 1024; } /** * Deletes old log files for vespa, nginx, logstash, etc. */ public void removeOldFilesFromNode(ContainerName containerName) { if (! getMaintenanceThrottlerFor(containerName).shouldRemoveOldFilesNow()) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addRemoveOldFilesCommand(maintainerExecutor, containerName); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).updateNextRemoveOldFilesTime(); } private void addRemoveOldFilesCommand(MaintainerExecutor maintainerExecutor, ContainerName containerName) { String[] pathsToClean = { getDefaults().underVespaHome("logs/elasticsearch2"), getDefaults().underVespaHome("logs/logstash2"), getDefaults().underVespaHome("logs/daemontools_y"), getDefaults().underVespaHome("logs/nginx"), getDefaults().underVespaHome("logs/vespa") }; for (String pathToClean : pathsToClean) { Path path = environment.pathInNodeAdminFromPathInNode(containerName, pathToClean); if (Files.exists(path)) { maintainerExecutor.addJob("delete-files") .withArgument("basePath", path) .withArgument("maxAgeSeconds", Duration.ofDays(3).getSeconds()) .withArgument("fileNameRegex", ".*\\.log.+") .withArgument("recursive", false); } } Path qrsDir = environment.pathInNodeAdminFromPathInNode( containerName, getDefaults().underVespaHome("logs/vespa/qrs")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", qrsDir) .withArgument("maxAgeSeconds", Duration.ofDays(3).getSeconds()) .withArgument("fileNameRegex", ".*QueryAccessLog.*") .withArgument("recursive", false); Path logArchiveDir = environment.pathInNodeAdminFromPathInNode( containerName, getDefaults().underVespaHome("logs/vespa/logarchive")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", logArchiveDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", false); Path fileDistrDir = environment.pathInNodeAdminFromPathInNode( containerName, getDefaults().underVespaHome("var/db/vespa/filedistribution")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", fileDistrDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", true); } /** * Checks if container has any new coredumps, reports and archives them if so * * @param force Set to true to bypass throttling */ public void handleCoreDumpsForContainer(ContainerName containerName, ContainerNodeSpec nodeSpec, boolean force) { if (! getMaintenanceThrottlerFor(containerName).shouldHandleCoredumpsNow() && !force) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addHandleCoredumpsCommand(maintainerExecutor, containerName, nodeSpec); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).updateNextHandleCoredumpsTime(); } private void addHandleCoredumpsCommand(MaintainerExecutor maintainerExecutor, ContainerName containerName, ContainerNodeSpec nodeSpec) { Map<String, Object> attributes = new HashMap<>(); attributes.put("hostname", nodeSpec.hostname); attributes.put("parent_hostname", HostName.getLocalhost()); attributes.put("region", environment.getRegion()); attributes.put("environment", environment.getEnvironment()); attributes.put("flavor", nodeSpec.nodeFlavor); attributes.put("kernel_version", System.getProperty("os.version")); nodeSpec.currentDockerImage.ifPresent(image -> attributes.put("docker_image", image.asString())); nodeSpec.vespaVersion.ifPresent(version -> attributes.put("vespa_version", version)); nodeSpec.owner.ifPresent(owner -> { attributes.put("tenant", owner.tenant); attributes.put("application", owner.application); attributes.put("instance", owner.instance); }); maintainerExecutor.addJob("handle-core-dumps") .withArgument("doneCoredumpsPath", environment.pathInNodeAdminToDoneCoredumps()) .withArgument("coredumpsPath", environment.pathInNodeAdminFromPathInNode( containerName, getDefaults().underVespaHome("var/crash"))) .withArgument("feedEndpoint", environment.getCoredumpFeedEndpoint()) .withArgument("attributes", attributes); } /** * Deletes old * * archived app data * * Vespa logs * * Filedistribution files */ public void cleanNodeAdmin() { if (! getMaintenanceThrottlerFor(NODE_ADMIN).shouldRemoveOldFilesNow()) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); maintainerExecutor.addJob("delete-directories") .withArgument("basePath", environment.getPathResolver().getApplicationStoragePathForNodeAdmin()) .withArgument("maxAgeSeconds", Duration.ofDays(7).getSeconds()) .withArgument("dirNameRegex", "^" + Pattern.quote(Environment.APPLICATION_STORAGE_CLEANUP_PATH_PREFIX)); Path nodeAdminJDiskLogsPath = environment.pathInNodeAdminFromPathInNode( NODE_ADMIN, getDefaults().underVespaHome("logs/vespa/")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", nodeAdminJDiskLogsPath) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", false); Path fileDistrDir = environment.pathInNodeAdminFromPathInNode( NODE_ADMIN, getDefaults().underVespaHome("var/db/vespa/filedistribution")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", fileDistrDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", true); maintainerExecutor.execute(); getMaintenanceThrottlerFor(NODE_ADMIN).updateNextRemoveOldFilesTime(); } /** * Prepares the container-storage for the next container by deleting/archiving all the data of the current container. * Removes old files, reports coredumps and archives container data, runs when container enters state "dirty" */ public void cleanupNodeStorage(ContainerName containerName, ContainerNodeSpec nodeSpec) { MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addRemoveOldFilesCommand(maintainerExecutor, containerName); addHandleCoredumpsCommand(maintainerExecutor, containerName, nodeSpec); addArchiveNodeData(maintainerExecutor, containerName); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).reset(); } private void addArchiveNodeData(MaintainerExecutor maintainerExecutor, ContainerName containerName) { maintainerExecutor.addJob("recursive-delete") .withArgument("path", environment.pathInNodeAdminFromPathInNode( containerName, getDefaults().underVespaHome("var"))); maintainerExecutor.addJob("move-files") .withArgument("from", environment.pathInNodeAdminFromPathInNode(containerName, "/")) .withArgument("to", environment.pathInNodeAdminToNodeCleanup(containerName)); } /** * Runs node-maintainer's SpecVerifier and returns its output * @param nodeSpec Node specification containing the excepted values we want to verify against * @return new combined hardware divergence * @throws RuntimeException if exit code != 0 */ private String executeMaintainer(String mainClass, String... args) { String[] command = Stream.concat( Stream.of("sudo", "VESPA_HOME=" + getDefaults().vespaHome(), getDefaults().underVespaHome("libexec/vespa/node-admin/maintenance.sh"), mainClass), Stream.of(args)) .toArray(String[]::new); try { Pair<Integer, String> result = processExecuter.exec(command); if (result.getFirst() != 0) { numberOfNodeAdminMaintenanceFails.add(); throw new RuntimeException( String.format("Maintainer failed to execute command: %s, Exit code: %d, Stdout/stderr: %s", Arrays.toString(command), result.getFirst(), result.getSecond())); } return result.getSecond().trim(); } catch (IOException e) { throw new RuntimeException("Failed to execute maintainer", e); } } /** * Wrapper for node-admin-maintenance, queues up maintenances jobs and sends a single request to maintenance JVM */ private class MaintainerExecutor { private final List<MaintainerExecutorJob> jobs = new ArrayList<>(); MaintainerExecutorJob addJob(String jobName) { MaintainerExecutorJob job = new MaintainerExecutorJob(jobName); jobs.add(job); return job; } void execute() { String args; try { args = objectMapper.writeValueAsString(jobs); } catch (JsonProcessingException e) { throw new RuntimeException("Failed transform list of maintenance jobs to JSON"); } executeMaintainer("com.yahoo.vespa.hosted.node.maintainer.Maintainer", args); } } private class MaintainerExecutorJob { @JsonProperty(value="type") private final String type; @JsonProperty(value="arguments") private final Map<String, Object> arguments = new HashMap<>(); MaintainerExecutorJob(String type) { this.type = type; } MaintainerExecutorJob withArgument(String argument, Object value) { arguments.put(argument, (value instanceof Path) ? value.toString() : value); return this; } } private MaintenanceThrottler getMaintenanceThrottlerFor(ContainerName containerName) { maintenanceThrottlerByContainerName.putIfAbsent(containerName, new MaintenanceThrottler()); return maintenanceThrottlerByContainerName.get(containerName); } private class MaintenanceThrottler { private Instant nextRemoveOldFilesAt = Instant.EPOCH; private Instant nextHandleOldCoredumpsAt = Instant.EPOCH; void updateNextRemoveOldFilesTime() { nextRemoveOldFilesAt = clock.instant().plus(Duration.ofHours(1)); } boolean shouldRemoveOldFilesNow() { return !nextRemoveOldFilesAt.isAfter(clock.instant()); } void updateNextHandleCoredumpsTime() { nextHandleOldCoredumpsAt = clock.instant().plus(Duration.ofMinutes(5)); } boolean shouldHandleCoredumpsNow() { return !nextHandleOldCoredumpsAt.isAfter(clock.instant()); } void reset() { nextRemoveOldFilesAt = Instant.EPOCH; nextHandleOldCoredumpsAt = Instant.EPOCH; } } }
class StorageMaintainer { private static final ContainerName NODE_ADMIN = new ContainerName("node-admin"); private static final ObjectMapper objectMapper = new ObjectMapper(); private final CounterWrapper numberOfNodeAdminMaintenanceFails; private final DockerOperations dockerOperations; private final ProcessExecuter processExecuter; private final Environment environment; private final Clock clock; private Map<ContainerName, MaintenanceThrottler> maintenanceThrottlerByContainerName = new ConcurrentHashMap<>(); public StorageMaintainer(DockerOperations dockerOperations, ProcessExecuter processExecuter, MetricReceiverWrapper metricReceiver, Environment environment, Clock clock) { this.dockerOperations = dockerOperations; this.processExecuter = processExecuter; this.environment = environment; this.clock = clock; Dimensions dimensions = new Dimensions.Builder().add("role", "docker").build(); numberOfNodeAdminMaintenanceFails = metricReceiver.declareCounter(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.maintenance.fails"); } public void writeMetricsConfig(ContainerName containerName, ContainerNodeSpec nodeSpec) { final Path yamasAgentFolder = environment.pathInNodeAdminFromPathInNode(containerName, "/etc/yamas-agent/"); Path vespaCheckPath = Paths.get(getDefaults().underVespaHome("libexec/yms/yms_check_vespa")); SecretAgentScheduleMaker vespaSchedule = new SecretAgentScheduleMaker("vespa", 60, vespaCheckPath, "all") .withTag("parentHostname", environment.getParentHostHostname()); Path hostLifeCheckPath = Paths.get(getDefaults().underVespaHome("libexec/yms/yms_check_host_life")); SecretAgentScheduleMaker hostLifeSchedule = new SecretAgentScheduleMaker("host-life", 60, hostLifeCheckPath) .withTag("namespace", "Vespa") .withTag("role", "tenants") .withTag("flavor", nodeSpec.nodeFlavor) .withTag("canonicalFlavor", nodeSpec.nodeCanonicalFlavor) .withTag("state", nodeSpec.nodeState.toString()) .withTag("zone", environment.getZone()) .withTag("parentHostname", environment.getParentHostHostname()); nodeSpec.owner.ifPresent(owner -> hostLifeSchedule .withTag("tenantName", owner.tenant) .withTag("app", owner.application + "." + owner.instance) .withTag("applicationName", owner.application) .withTag("instanceName", owner.instance) .withTag("applicationId", owner.tenant + "." + owner.application + "." + owner.instance)); nodeSpec.membership.ifPresent(membership -> hostLifeSchedule .withTag("clustertype", membership.clusterType) .withTag("clusterid", membership.clusterId)); nodeSpec.vespaVersion.ifPresent(version -> hostLifeSchedule.withTag("vespaVersion", version)); try { vespaSchedule.writeTo(yamasAgentFolder); hostLifeSchedule.writeTo(yamasAgentFolder); final String[] restartYamasAgent = new String[]{"service", "yamas-agent", "restart"}; dockerOperations.executeCommandInContainerAsRoot(containerName, restartYamasAgent); } catch (IOException e) { throw new RuntimeException("Failed to write secret-agent schedules for " + containerName, e); } } public void writeFilebeatConfig(ContainerName containerName, ContainerNodeSpec nodeSpec) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName); try { FilebeatConfigProvider filebeatConfigProvider = new FilebeatConfigProvider(environment); Optional<String> config = filebeatConfigProvider.getConfig(nodeSpec); if (!config.isPresent()) { logger.error("Was not able to generate a config for filebeat, ignoring filebeat file creation." + nodeSpec.toString()); return; } Path filebeatPath = environment.pathInNodeAdminFromPathInNode(containerName, "/etc/filebeat/filebeat.yml"); Files.write(filebeatPath, config.get().getBytes()); logger.info("Wrote filebeat config."); } catch (Throwable t) { logger.error("Failed writing filebeat config; " + nodeSpec, t); } } public Optional<Long> getDiskUsageFor(ContainerName containerName) { Path containerDir = environment.pathInNodeAdminFromPathInNode(containerName, "/home/"); try { return Optional.of(getDiskUsedInBytes(containerDir)); } catch (Throwable e) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName); logger.error("Problems during disk usage calculations in " + containerDir.toAbsolutePath(), e); return Optional.empty(); } } long getDiskUsedInBytes(Path path) throws IOException, InterruptedException { final String[] command = {"du", "-xsk", path.toString()}; Process duCommand = new ProcessBuilder().command(command).start(); if (!duCommand.waitFor(60, TimeUnit.SECONDS)) { duCommand.destroy(); throw new RuntimeException("Disk usage command timed out, aborting."); } String output = IOUtils.readAll(new InputStreamReader(duCommand.getInputStream())); String[] results = output.split("\t"); if (results.length != 2) { throw new RuntimeException("Result from disk usage command not as expected: " + output); } long diskUsageKB = Long.valueOf(results[0]); return diskUsageKB * 1024; } /** * Deletes old log files for vespa, nginx, logstash, etc. */ public void removeOldFilesFromNode(ContainerName containerName) { if (! getMaintenanceThrottlerFor(containerName).shouldRemoveOldFilesNow()) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addRemoveOldFilesCommand(maintainerExecutor, containerName); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).updateNextRemoveOldFilesTime(); } private void addRemoveOldFilesCommand(MaintainerExecutor maintainerExecutor, ContainerName containerName) { String[] pathsToClean = { getDefaults().underVespaHome("logs/elasticsearch2"), getDefaults().underVespaHome("logs/logstash2"), getDefaults().underVespaHome("logs/daemontools_y"), getDefaults().underVespaHome("logs/nginx"), getDefaults().underVespaHome("logs/vespa") }; for (String pathToClean : pathsToClean) { Path path = environment.pathInNodeAdminFromPathInNode(containerName, pathToClean); if (Files.exists(path)) { maintainerExecutor.addJob("delete-files") .withArgument("basePath", path) .withArgument("maxAgeSeconds", Duration.ofDays(3).getSeconds()) .withArgument("fileNameRegex", ".*\\.log.+") .withArgument("recursive", false); } } Path qrsDir = environment.pathInNodeAdminFromPathInNode( containerName, getDefaults().underVespaHome("logs/vespa/qrs")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", qrsDir) .withArgument("maxAgeSeconds", Duration.ofDays(3).getSeconds()) .withArgument("fileNameRegex", ".*QueryAccessLog.*") .withArgument("recursive", false); Path logArchiveDir = environment.pathInNodeAdminFromPathInNode( containerName, getDefaults().underVespaHome("logs/vespa/logarchive")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", logArchiveDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", false); Path fileDistrDir = environment.pathInNodeAdminFromPathInNode( containerName, getDefaults().underVespaHome("var/db/vespa/filedistribution")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", fileDistrDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", true); } /** * Checks if container has any new coredumps, reports and archives them if so * * @param force Set to true to bypass throttling */ public void handleCoreDumpsForContainer(ContainerName containerName, ContainerNodeSpec nodeSpec, boolean force) { if (! getMaintenanceThrottlerFor(containerName).shouldHandleCoredumpsNow() && !force) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addHandleCoredumpsCommand(maintainerExecutor, containerName, nodeSpec); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).updateNextHandleCoredumpsTime(); } private void addHandleCoredumpsCommand(MaintainerExecutor maintainerExecutor, ContainerName containerName, ContainerNodeSpec nodeSpec) { Map<String, Object> attributes = new HashMap<>(); attributes.put("hostname", nodeSpec.hostname); attributes.put("parent_hostname", HostName.getLocalhost()); attributes.put("region", environment.getRegion()); attributes.put("environment", environment.getEnvironment()); attributes.put("flavor", nodeSpec.nodeFlavor); attributes.put("kernel_version", System.getProperty("os.version")); nodeSpec.currentDockerImage.ifPresent(image -> attributes.put("docker_image", image.asString())); nodeSpec.vespaVersion.ifPresent(version -> attributes.put("vespa_version", version)); nodeSpec.owner.ifPresent(owner -> { attributes.put("tenant", owner.tenant); attributes.put("application", owner.application); attributes.put("instance", owner.instance); }); maintainerExecutor.addJob("handle-core-dumps") .withArgument("doneCoredumpsPath", environment.pathInNodeAdminToDoneCoredumps()) .withArgument("coredumpsPath", environment.pathInNodeAdminFromPathInNode( containerName, getDefaults().underVespaHome("var/crash"))) .withArgument("feedEndpoint", environment.getCoredumpFeedEndpoint()) .withArgument("attributes", attributes); } /** * Deletes old * * archived app data * * Vespa logs * * Filedistribution files */ public void cleanNodeAdmin() { if (! getMaintenanceThrottlerFor(NODE_ADMIN).shouldRemoveOldFilesNow()) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); maintainerExecutor.addJob("delete-directories") .withArgument("basePath", environment.getPathResolver().getApplicationStoragePathForNodeAdmin()) .withArgument("maxAgeSeconds", Duration.ofDays(7).getSeconds()) .withArgument("dirNameRegex", "^" + Pattern.quote(Environment.APPLICATION_STORAGE_CLEANUP_PATH_PREFIX)); Path nodeAdminJDiskLogsPath = environment.pathInNodeAdminFromPathInNode( NODE_ADMIN, getDefaults().underVespaHome("logs/vespa/")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", nodeAdminJDiskLogsPath) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", false); Path fileDistrDir = environment.pathInNodeAdminFromPathInNode( NODE_ADMIN, getDefaults().underVespaHome("var/db/vespa/filedistribution")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", fileDistrDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", true); maintainerExecutor.execute(); getMaintenanceThrottlerFor(NODE_ADMIN).updateNextRemoveOldFilesTime(); } /** * Prepares the container-storage for the next container by deleting/archiving all the data of the current container. * Removes old files, reports coredumps and archives container data, runs when container enters state "dirty" */ public void cleanupNodeStorage(ContainerName containerName, ContainerNodeSpec nodeSpec) { MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addRemoveOldFilesCommand(maintainerExecutor, containerName); addHandleCoredumpsCommand(maintainerExecutor, containerName, nodeSpec); addArchiveNodeData(maintainerExecutor, containerName); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).reset(); } private void addArchiveNodeData(MaintainerExecutor maintainerExecutor, ContainerName containerName) { maintainerExecutor.addJob("recursive-delete") .withArgument("path", environment.pathInNodeAdminFromPathInNode( containerName, getDefaults().underVespaHome("var"))); maintainerExecutor.addJob("move-files") .withArgument("from", environment.pathInNodeAdminFromPathInNode(containerName, "/")) .withArgument("to", environment.pathInNodeAdminToNodeCleanup(containerName)); } /** * Runs node-maintainer's SpecVerifier and returns its output * @param nodeSpec Node specification containing the excepted values we want to verify against * @return new combined hardware divergence * @throws RuntimeException if exit code != 0 */ private String executeMaintainer(String mainClass, String... args) { String[] command = Stream.concat( Stream.of("sudo", "VESPA_HOME=" + getDefaults().vespaHome(), getDefaults().underVespaHome("libexec/vespa/node-admin/maintenance.sh"), mainClass), Stream.of(args)) .toArray(String[]::new); try { Pair<Integer, String> result = processExecuter.exec(command); if (result.getFirst() != 0) { numberOfNodeAdminMaintenanceFails.add(); throw new RuntimeException( String.format("Maintainer failed to execute command: %s, Exit code: %d, Stdout/stderr: %s", Arrays.toString(command), result.getFirst(), result.getSecond())); } return result.getSecond().trim(); } catch (IOException e) { throw new RuntimeException("Failed to execute maintainer", e); } } /** * Wrapper for node-admin-maintenance, queues up maintenances jobs and sends a single request to maintenance JVM */ private class MaintainerExecutor { private final List<MaintainerExecutorJob> jobs = new ArrayList<>(); MaintainerExecutorJob addJob(String jobName) { MaintainerExecutorJob job = new MaintainerExecutorJob(jobName); jobs.add(job); return job; } void execute() { String args; try { args = objectMapper.writeValueAsString(jobs); } catch (JsonProcessingException e) { throw new RuntimeException("Failed transform list of maintenance jobs to JSON"); } executeMaintainer("com.yahoo.vespa.hosted.node.maintainer.Maintainer", args); } } private class MaintainerExecutorJob { @JsonProperty(value="type") private final String type; @JsonProperty(value="arguments") private final Map<String, Object> arguments = new HashMap<>(); MaintainerExecutorJob(String type) { this.type = type; } MaintainerExecutorJob withArgument(String argument, Object value) { arguments.put(argument, (value instanceof Path) ? value.toString() : value); return this; } } private MaintenanceThrottler getMaintenanceThrottlerFor(ContainerName containerName) { maintenanceThrottlerByContainerName.putIfAbsent(containerName, new MaintenanceThrottler()); return maintenanceThrottlerByContainerName.get(containerName); } private class MaintenanceThrottler { private Instant nextRemoveOldFilesAt = Instant.EPOCH; private Instant nextHandleOldCoredumpsAt = Instant.EPOCH; void updateNextRemoveOldFilesTime() { nextRemoveOldFilesAt = clock.instant().plus(Duration.ofHours(1)); } boolean shouldRemoveOldFilesNow() { return !nextRemoveOldFilesAt.isAfter(clock.instant()); } void updateNextHandleCoredumpsTime() { nextHandleOldCoredumpsAt = clock.instant().plus(Duration.ofMinutes(5)); } boolean shouldHandleCoredumpsNow() { return !nextHandleOldCoredumpsAt.isAfter(clock.instant()); } void reset() { nextRemoveOldFilesAt = Instant.EPOCH; nextHandleOldCoredumpsAt = Instant.EPOCH; } } }
You don't really need `flatMap` on `Optional`, could just do ``` if (parseResultMb.isPresent()) return parseDouble(parseResultMb.get().getValue()).map(v -> v / 1000.0d); ```
protected Optional<Double> parseMemorySpeed(List<String> commandOutput) { Optional<ParseResult> parseResultGb = parseMemorySpeed(commandOutput, READ_AND_WRITE_SEARCH_WORD_GB); if (parseResultGb.isPresent()) return parseDouble(parseResultGb.get().getValue()); Optional<ParseResult> parseResultMb = parseMemorySpeed(commandOutput, READ_AND_WRITE_SEARCH_WORD_MB); if (parseResultMb.isPresent()) return parseDouble(parseResultMb.get().getValue()).flatMap(v -> Optional.of(v / 1000.0d)); return Optional.empty(); }
if (parseResultMb.isPresent()) return parseDouble(parseResultMb.get().getValue()).flatMap(v -> Optional.of(v / 1000.0d));
protected Optional<Double> parseMemorySpeed(List<String> commandOutput) { Optional<ParseResult> parseResultGb = parseMemorySpeed(commandOutput, READ_AND_WRITE_SEARCH_WORD_GB); if (parseResultGb.isPresent()) return parseDouble(parseResultGb.get().getValue()); Optional<ParseResult> parseResultMb = parseMemorySpeed(commandOutput, READ_AND_WRITE_SEARCH_WORD_MB); if (parseResultMb.isPresent()) return parseDouble(parseResultMb.get().getValue()).map(v -> v / 1000.0d); return Optional.empty(); }
class MemoryBenchmark implements Benchmark { private static final String MEM_BENCHMARK_CREATE_FOLDER = "mkdir -p RAM_test"; private static final String MEM_BENCHMARK_MOUNT_TMPFS = "mount tmpfs -t tmpfs RAM_test/"; private static final String MEM_BENCHMARK_UNMOUNT_TMPFS = "umount RAM_test"; private static final String MEM_BENCHMARK_DELETE_FOLDER = "rm -rf RAM_test"; private static final String MEM_BENCHMARK_WRITE_SPEED = "dd if=/dev/zero of=RAM_test/data_tmp bs=1M count=512"; private static final String MEM_BENCHMARK_READ_SPEED = "dd if=RAM_test/data_tmp of=/dev/null bs=1M count=512"; private static final String READ_AND_WRITE_SEARCH_WORD_GB = "GB/s"; private static final String READ_AND_WRITE_SEARCH_WORD_MB = "MB/s"; private static final String SPLIT_REGEX_STRING = " "; private static final int SEARCH_ELEMENT_INDEX = 8; private static final int RETURN_ELEMENT_INDEX = 7; private static final Logger logger = Logger.getLogger(MemoryBenchmark.class.getName()); private final BenchmarkResults benchmarkResults; private final CommandExecutor commandExecutor; public MemoryBenchmark(BenchmarkResults benchmarkResults, CommandExecutor commandExecutor) { this.benchmarkResults = benchmarkResults; this.commandExecutor = commandExecutor; } @Override public void doBenchmark() { try { setupMountPoint(); parseMemorySpeed(commandExecutor.executeCommand(MEM_BENCHMARK_WRITE_SPEED)) .ifPresent(benchmarkResults::setMemoryWriteSpeedGBs); parseMemorySpeed(commandExecutor.executeCommand(MEM_BENCHMARK_READ_SPEED)) .ifPresent(benchmarkResults::setMemoryReadSpeedGBs); } catch (IOException e) { logger.log(Level.WARNING, "Failed to perform memory benchmark", e); } finally { breakDownMountPoint(); } } private void setupMountPoint() throws IOException { commandExecutor.executeCommand(MEM_BENCHMARK_CREATE_FOLDER); commandExecutor.executeCommand(MEM_BENCHMARK_MOUNT_TMPFS); } private void breakDownMountPoint() { try { commandExecutor.executeCommand(MEM_BENCHMARK_UNMOUNT_TMPFS); } catch (IOException e) { logger.log(Level.WARNING, "Failed to unmount tmpfs folder", e); } try { commandExecutor.executeCommand(MEM_BENCHMARK_DELETE_FOLDER); } catch (IOException e) { logger.log(Level.WARNING, "Failed to delete memory benchmark folder", e); } } private Optional<ParseResult> parseMemorySpeed(List<String> commandOutput, String searchWord) { List<String> searchWords = Collections.singletonList(searchWord); ParseInstructions parseInstructions = new ParseInstructions(SEARCH_ELEMENT_INDEX, RETURN_ELEMENT_INDEX, SPLIT_REGEX_STRING, searchWords); return OutputParser.parseSingleOutput(parseInstructions, commandOutput); } private Optional<Double> parseDouble(String benchmarkOutput) { try { return Optional.of(Double.parseDouble(benchmarkOutput)); } catch (NumberFormatException | NullPointerException e) { return Optional.empty(); } } }
class MemoryBenchmark implements Benchmark { private static final String MEM_BENCHMARK_CREATE_FOLDER = "mkdir -p RAM_test"; private static final String MEM_BENCHMARK_MOUNT_TMPFS = "mount tmpfs -t tmpfs RAM_test/"; private static final String MEM_BENCHMARK_UNMOUNT_TMPFS = "umount RAM_test"; private static final String MEM_BENCHMARK_DELETE_FOLDER = "rm -rf RAM_test"; private static final String MEM_BENCHMARK_WRITE_SPEED = "dd if=/dev/zero of=RAM_test/data_tmp bs=1M count=512"; private static final String MEM_BENCHMARK_READ_SPEED = "dd if=RAM_test/data_tmp of=/dev/null bs=1M count=512"; private static final String READ_AND_WRITE_SEARCH_WORD_GB = "GB/s"; private static final String READ_AND_WRITE_SEARCH_WORD_MB = "MB/s"; private static final String SPLIT_REGEX_STRING = " "; private static final int SEARCH_ELEMENT_INDEX = 8; private static final int RETURN_ELEMENT_INDEX = 7; private static final Logger logger = Logger.getLogger(MemoryBenchmark.class.getName()); private final BenchmarkResults benchmarkResults; private final CommandExecutor commandExecutor; public MemoryBenchmark(BenchmarkResults benchmarkResults, CommandExecutor commandExecutor) { this.benchmarkResults = benchmarkResults; this.commandExecutor = commandExecutor; } @Override public void doBenchmark() { try { setupMountPoint(); parseMemorySpeed(commandExecutor.executeCommand(MEM_BENCHMARK_WRITE_SPEED)) .ifPresent(benchmarkResults::setMemoryWriteSpeedGBs); parseMemorySpeed(commandExecutor.executeCommand(MEM_BENCHMARK_READ_SPEED)) .ifPresent(benchmarkResults::setMemoryReadSpeedGBs); } catch (IOException e) { logger.log(Level.WARNING, "Failed to perform memory benchmark", e); } finally { breakDownMountPoint(); } } private void setupMountPoint() throws IOException { commandExecutor.executeCommand(MEM_BENCHMARK_CREATE_FOLDER); commandExecutor.executeCommand(MEM_BENCHMARK_MOUNT_TMPFS); } private void breakDownMountPoint() { try { commandExecutor.executeCommand(MEM_BENCHMARK_UNMOUNT_TMPFS); } catch (IOException e) { logger.log(Level.WARNING, "Failed to unmount tmpfs folder", e); } try { commandExecutor.executeCommand(MEM_BENCHMARK_DELETE_FOLDER); } catch (IOException e) { logger.log(Level.WARNING, "Failed to delete memory benchmark folder", e); } } private Optional<ParseResult> parseMemorySpeed(List<String> commandOutput, String searchWord) { List<String> searchWords = Collections.singletonList(searchWord); ParseInstructions parseInstructions = new ParseInstructions(SEARCH_ELEMENT_INDEX, RETURN_ELEMENT_INDEX, SPLIT_REGEX_STRING, searchWords); return OutputParser.parseSingleOutput(parseInstructions, commandOutput); } private Optional<Double> parseDouble(String benchmarkOutput) { try { return Optional.of(Double.parseDouble(benchmarkOutput)); } catch (NumberFormatException | NullPointerException e) { return Optional.empty(); } } }
I prefer braces, especially the when other branches has braces.
private String inProgressOutput(JsonNode hosts) { ArrayList<String> statusPerHost = new ArrayList<>(); for (JsonNode host : hosts) { StringBuilder sb = new StringBuilder(); String status = host.get("status").asText(); sb.append(host.get("hostname").asText()).append(": ").append(status); if (status.equals(statusUnknown)) sb.append(" (").append(host.get("message").asText()).append(")"); else if (status.equals(statusInProgress)) { JsonNode fileReferencesArray = host.get("fileReferences"); int size = fileReferencesArray.size(); int finished = 0; for (JsonNode element : fileReferencesArray) { for (Iterator<Map.Entry<String, JsonNode>> it = element.fields(); it.hasNext(); ) { Map.Entry<String, JsonNode> fileReferenceStatus = it.next(); if (fileReferenceStatus.getValue().asDouble() == 1.0) finished++; } } sb.append(" (" + finished + " of " + size + " finished)"); } statusPerHost.add(sb.toString()); } return String.join("\n", statusPerHost); }
else if (status.equals(statusInProgress)) {
private String inProgressOutput(JsonNode hosts) { ArrayList<String> statusPerHost = new ArrayList<>(); for (JsonNode host : hosts) { StringBuilder sb = new StringBuilder(); String status = host.get("status").asText(); sb.append(host.get("hostname").asText()).append(": ").append(status); if (status.equals(statusUnknown)) sb.append(" (").append(host.get("message").asText()).append(")"); else if (status.equals(statusInProgress)) { JsonNode fileReferencesArray = host.get("fileReferences"); int size = fileReferencesArray.size(); int finished = 0; for (JsonNode element : fileReferencesArray) { for (Iterator<Map.Entry<String, JsonNode>> it = element.fields(); it.hasNext(); ) { Map.Entry<String, JsonNode> fileReferenceStatus = it.next(); if (fileReferenceStatus.getValue().asDouble() == 1.0) finished++; } } sb.append(" (" + finished + " of " + size + " finished)"); } statusPerHost.add(sb.toString()); } return String.join("\n", statusPerHost); }
class FileDistributionStatusClient { private static final String statusUnknown = "UNKNOWN"; private static final String statusInProgress = "IN_PROGRESS"; private static final String statusFinished = "FINISHED"; private final String tenantName; private final String applicationName; private final String instanceName; private final String environment; private final String region; private final double timeout; private final boolean debug; FileDistributionStatusClient(CommandLineArguments arguments) { tenantName = arguments.getTenantName(); applicationName = arguments.getApplicationName(); instanceName = arguments.getInstanceName(); environment = arguments.getEnvironment(); region = arguments.getRegion(); timeout = arguments.getTimeout(); debug = arguments.getDebugFlag(); } public static void main(String[] args) { try { new FileDistributionStatusClient(CommandLineArguments.build(args)).run(); } catch (Exception e) { System.err.println(e.getMessage()); System.exit(1); } } public void run() { String json = doHttpRequest(); System.out.println(parseAndGenerateOutput(json)); } private String doHttpRequest() { int timeoutInMillis = (int) (timeout * 1000); RequestConfig config = RequestConfig.custom() .setConnectTimeout(timeoutInMillis) .setConnectionRequestTimeout(timeoutInMillis) .setSocketTimeout(timeoutInMillis) .build(); CloseableHttpClient httpClient = HttpClientBuilder.create().setDefaultRequestConfig(config).build(); URI statusUri = createStatusApiUri(); if (debug) System.out.println("URI:" + statusUri); try { CloseableHttpResponse response = httpClient.execute(new HttpGet(statusUri)); String content = EntityUtils.toString(response.getEntity()); if (debug) System.out.println("response:" + content); if (response.getStatusLine().getStatusCode() == 200) { return content; } else { throw new RuntimeException("Failed to get status for request " + statusUri + ": " + response.getStatusLine() + ": " + content); } } catch (IOException e) { throw new RuntimeException(e); } } String parseAndGenerateOutput(String json) { ObjectMapper objectMapper = new ObjectMapper(); JsonNode jsonNode; try { jsonNode = objectMapper.readTree(json); } catch (IOException e) { throw new RuntimeException(e); } String status = jsonNode.get("status").asText(); switch (status) { case statusUnknown: return "File distribution status unknown: " + jsonNode.get("message").asText(); case statusInProgress: return "File distribution in progress:\n" + inProgressOutput(jsonNode.get("hosts")); case statusFinished: return "File distribution finished"; default: throw new RuntimeException("Unknown status " + status); } } private URI createStatusApiUri() { String path = String.format("/application/v2/tenant/%s/application/%s/environment/%s/region/%s/instance/%s/filedistributionstatus", tenantName, applicationName, environment, region, instanceName); try { return new URIBuilder() .setScheme("http") .setHost("localhost") .setPort(19071) .setPath(path) .addParameter("timeout", String.valueOf(timeout)) .build(); } catch (URISyntaxException e) { throw new RuntimeException(e); } } @Command(name = "vespa-status-filedistribution", description = "Tool for getting file distribution status.") public static class CommandLineArguments { static CommandLineArguments build(String[] args) { CommandLineArguments arguments = null; try { arguments = SingleCommand.singleCommand(CommandLineArguments.class).parse(args); } catch (Exception e) { System.err.println(e.getMessage()); System.err.println("Use --help to show usage.\n"); System.exit(1); } if (arguments.helpOption.showHelpIfRequested()) { System.exit(0); } if (arguments.getTenantName() == null) { System.err.println("'--tenant' not set."); System.exit(1); } if (arguments.getApplicationName() == null) { System.err.println("'--application' not set."); System.exit(1); } return arguments; } @Inject HelpOption helpOption; @Option(name = {"--tenant"}, description = "tenant name") private String tenantNameArg; @Option(name = {"--application"}, description = "application name") private String applicationNameArg; @Option(name = {"--instance"}, description = "instance name") private String instanceNameArg = "default"; @Option(name = {"--environment"}, description = "environment name") private String environmentArg = "prod"; @Option(name = {"--region"}, description = "region name") private String regionArg = "default"; @Option(name = {"--timeout"}, description = "The timeout (in seconds).") private double timeoutArg = 5; @Option(name = {"--debug"}, description = "Print debug log.") private boolean debugArg; public String getTenantName() { return tenantNameArg; } public String getApplicationName() { return applicationNameArg; } public String getInstanceName() { return instanceNameArg; } public String getEnvironment() { return environmentArg; } public String getRegion() { return regionArg; } public double getTimeout() { return timeoutArg; } public boolean getDebugFlag() { return debugArg; } } }
class FileDistributionStatusClient { private static final String statusUnknown = "UNKNOWN"; private static final String statusInProgress = "IN_PROGRESS"; private static final String statusFinished = "FINISHED"; private final String tenantName; private final String applicationName; private final String instanceName; private final String environment; private final String region; private final double timeout; private final boolean debug; FileDistributionStatusClient(CommandLineArguments arguments) { tenantName = arguments.getTenantName(); applicationName = arguments.getApplicationName(); instanceName = arguments.getInstanceName(); environment = arguments.getEnvironment(); region = arguments.getRegion(); timeout = arguments.getTimeout(); debug = arguments.getDebugFlag(); } public static void main(String[] args) { try { new FileDistributionStatusClient(CommandLineArguments.build(args)).run(); } catch (Exception e) { System.err.println(e.getMessage()); System.exit(1); } } public void run() { String json = doHttpRequest(); System.out.println(parseAndGenerateOutput(json)); } private String doHttpRequest() { int timeoutInMillis = (int) (timeout * 1000); RequestConfig config = RequestConfig.custom() .setConnectTimeout(timeoutInMillis) .setConnectionRequestTimeout(timeoutInMillis) .setSocketTimeout(timeoutInMillis) .build(); CloseableHttpClient httpClient = HttpClientBuilder.create().setDefaultRequestConfig(config).build(); URI statusUri = createStatusApiUri(); if (debug) System.out.println("URI:" + statusUri); try { CloseableHttpResponse response = httpClient.execute(new HttpGet(statusUri)); String content = EntityUtils.toString(response.getEntity()); if (debug) System.out.println("response:" + content); if (response.getStatusLine().getStatusCode() == 200) { return content; } else { throw new RuntimeException("Failed to get status for request " + statusUri + ": " + response.getStatusLine() + ": " + content); } } catch (IOException e) { throw new RuntimeException(e); } } String parseAndGenerateOutput(String json) { ObjectMapper objectMapper = new ObjectMapper(); JsonNode jsonNode; try { jsonNode = objectMapper.readTree(json); } catch (IOException e) { throw new RuntimeException(e); } String status = jsonNode.get("status").asText(); switch (status) { case statusUnknown: return "File distribution status unknown: " + jsonNode.get("message").asText(); case statusInProgress: return "File distribution in progress:\n" + inProgressOutput(jsonNode.get("hosts")); case statusFinished: return "File distribution finished"; default: throw new RuntimeException("Unknown status " + status); } } private URI createStatusApiUri() { String path = String.format("/application/v2/tenant/%s/application/%s/environment/%s/region/%s/instance/%s/filedistributionstatus", tenantName, applicationName, environment, region, instanceName); try { return new URIBuilder() .setScheme("http") .setHost("localhost") .setPort(19071) .setPath(path) .addParameter("timeout", String.valueOf(timeout)) .build(); } catch (URISyntaxException e) { throw new RuntimeException(e); } } @Command(name = "vespa-status-filedistribution", description = "Tool for getting file distribution status.") public static class CommandLineArguments { static CommandLineArguments build(String[] args) { CommandLineArguments arguments = null; try { arguments = SingleCommand.singleCommand(CommandLineArguments.class).parse(args); } catch (Exception e) { System.err.println(e.getMessage()); System.err.println("Use --help to show usage.\n"); System.exit(1); } if (arguments.helpOption.showHelpIfRequested()) { System.exit(0); } if (arguments.getTenantName() == null) { System.err.println("'--tenant' not set."); System.exit(1); } if (arguments.getApplicationName() == null) { System.err.println("'--application' not set."); System.exit(1); } return arguments; } @Inject HelpOption helpOption; @Option(name = {"--tenant"}, description = "tenant name") private String tenantNameArg; @Option(name = {"--application"}, description = "application name") private String applicationNameArg; @Option(name = {"--instance"}, description = "instance name") private String instanceNameArg = "default"; @Option(name = {"--environment"}, description = "environment name") private String environmentArg = "prod"; @Option(name = {"--region"}, description = "region name") private String regionArg = "default"; @Option(name = {"--timeout"}, description = "The timeout (in seconds).") private double timeoutArg = 5; @Option(name = {"--debug"}, description = "Print debug log.") private boolean debugArg; public String getTenantName() { return tenantNameArg; } public String getApplicationName() { return applicationNameArg; } public String getInstanceName() { return instanceNameArg; } public String getEnvironment() { return environmentArg; } public String getRegion() { return regionArg; } public double getTimeout() { return timeoutArg; } public boolean getDebugFlag() { return debugArg; } } }
Yeah, agree, will fix in a forthcoming PR
private String inProgressOutput(JsonNode hosts) { ArrayList<String> statusPerHost = new ArrayList<>(); for (JsonNode host : hosts) { StringBuilder sb = new StringBuilder(); String status = host.get("status").asText(); sb.append(host.get("hostname").asText()).append(": ").append(status); if (status.equals(statusUnknown)) sb.append(" (").append(host.get("message").asText()).append(")"); else if (status.equals(statusInProgress)) { JsonNode fileReferencesArray = host.get("fileReferences"); int size = fileReferencesArray.size(); int finished = 0; for (JsonNode element : fileReferencesArray) { for (Iterator<Map.Entry<String, JsonNode>> it = element.fields(); it.hasNext(); ) { Map.Entry<String, JsonNode> fileReferenceStatus = it.next(); if (fileReferenceStatus.getValue().asDouble() == 1.0) finished++; } } sb.append(" (" + finished + " of " + size + " finished)"); } statusPerHost.add(sb.toString()); } return String.join("\n", statusPerHost); }
else if (status.equals(statusInProgress)) {
private String inProgressOutput(JsonNode hosts) { ArrayList<String> statusPerHost = new ArrayList<>(); for (JsonNode host : hosts) { StringBuilder sb = new StringBuilder(); String status = host.get("status").asText(); sb.append(host.get("hostname").asText()).append(": ").append(status); if (status.equals(statusUnknown)) sb.append(" (").append(host.get("message").asText()).append(")"); else if (status.equals(statusInProgress)) { JsonNode fileReferencesArray = host.get("fileReferences"); int size = fileReferencesArray.size(); int finished = 0; for (JsonNode element : fileReferencesArray) { for (Iterator<Map.Entry<String, JsonNode>> it = element.fields(); it.hasNext(); ) { Map.Entry<String, JsonNode> fileReferenceStatus = it.next(); if (fileReferenceStatus.getValue().asDouble() == 1.0) finished++; } } sb.append(" (" + finished + " of " + size + " finished)"); } statusPerHost.add(sb.toString()); } return String.join("\n", statusPerHost); }
class FileDistributionStatusClient { private static final String statusUnknown = "UNKNOWN"; private static final String statusInProgress = "IN_PROGRESS"; private static final String statusFinished = "FINISHED"; private final String tenantName; private final String applicationName; private final String instanceName; private final String environment; private final String region; private final double timeout; private final boolean debug; FileDistributionStatusClient(CommandLineArguments arguments) { tenantName = arguments.getTenantName(); applicationName = arguments.getApplicationName(); instanceName = arguments.getInstanceName(); environment = arguments.getEnvironment(); region = arguments.getRegion(); timeout = arguments.getTimeout(); debug = arguments.getDebugFlag(); } public static void main(String[] args) { try { new FileDistributionStatusClient(CommandLineArguments.build(args)).run(); } catch (Exception e) { System.err.println(e.getMessage()); System.exit(1); } } public void run() { String json = doHttpRequest(); System.out.println(parseAndGenerateOutput(json)); } private String doHttpRequest() { int timeoutInMillis = (int) (timeout * 1000); RequestConfig config = RequestConfig.custom() .setConnectTimeout(timeoutInMillis) .setConnectionRequestTimeout(timeoutInMillis) .setSocketTimeout(timeoutInMillis) .build(); CloseableHttpClient httpClient = HttpClientBuilder.create().setDefaultRequestConfig(config).build(); URI statusUri = createStatusApiUri(); if (debug) System.out.println("URI:" + statusUri); try { CloseableHttpResponse response = httpClient.execute(new HttpGet(statusUri)); String content = EntityUtils.toString(response.getEntity()); if (debug) System.out.println("response:" + content); if (response.getStatusLine().getStatusCode() == 200) { return content; } else { throw new RuntimeException("Failed to get status for request " + statusUri + ": " + response.getStatusLine() + ": " + content); } } catch (IOException e) { throw new RuntimeException(e); } } String parseAndGenerateOutput(String json) { ObjectMapper objectMapper = new ObjectMapper(); JsonNode jsonNode; try { jsonNode = objectMapper.readTree(json); } catch (IOException e) { throw new RuntimeException(e); } String status = jsonNode.get("status").asText(); switch (status) { case statusUnknown: return "File distribution status unknown: " + jsonNode.get("message").asText(); case statusInProgress: return "File distribution in progress:\n" + inProgressOutput(jsonNode.get("hosts")); case statusFinished: return "File distribution finished"; default: throw new RuntimeException("Unknown status " + status); } } private URI createStatusApiUri() { String path = String.format("/application/v2/tenant/%s/application/%s/environment/%s/region/%s/instance/%s/filedistributionstatus", tenantName, applicationName, environment, region, instanceName); try { return new URIBuilder() .setScheme("http") .setHost("localhost") .setPort(19071) .setPath(path) .addParameter("timeout", String.valueOf(timeout)) .build(); } catch (URISyntaxException e) { throw new RuntimeException(e); } } @Command(name = "vespa-status-filedistribution", description = "Tool for getting file distribution status.") public static class CommandLineArguments { static CommandLineArguments build(String[] args) { CommandLineArguments arguments = null; try { arguments = SingleCommand.singleCommand(CommandLineArguments.class).parse(args); } catch (Exception e) { System.err.println(e.getMessage()); System.err.println("Use --help to show usage.\n"); System.exit(1); } if (arguments.helpOption.showHelpIfRequested()) { System.exit(0); } if (arguments.getTenantName() == null) { System.err.println("'--tenant' not set."); System.exit(1); } if (arguments.getApplicationName() == null) { System.err.println("'--application' not set."); System.exit(1); } return arguments; } @Inject HelpOption helpOption; @Option(name = {"--tenant"}, description = "tenant name") private String tenantNameArg; @Option(name = {"--application"}, description = "application name") private String applicationNameArg; @Option(name = {"--instance"}, description = "instance name") private String instanceNameArg = "default"; @Option(name = {"--environment"}, description = "environment name") private String environmentArg = "prod"; @Option(name = {"--region"}, description = "region name") private String regionArg = "default"; @Option(name = {"--timeout"}, description = "The timeout (in seconds).") private double timeoutArg = 5; @Option(name = {"--debug"}, description = "Print debug log.") private boolean debugArg; public String getTenantName() { return tenantNameArg; } public String getApplicationName() { return applicationNameArg; } public String getInstanceName() { return instanceNameArg; } public String getEnvironment() { return environmentArg; } public String getRegion() { return regionArg; } public double getTimeout() { return timeoutArg; } public boolean getDebugFlag() { return debugArg; } } }
class FileDistributionStatusClient { private static final String statusUnknown = "UNKNOWN"; private static final String statusInProgress = "IN_PROGRESS"; private static final String statusFinished = "FINISHED"; private final String tenantName; private final String applicationName; private final String instanceName; private final String environment; private final String region; private final double timeout; private final boolean debug; FileDistributionStatusClient(CommandLineArguments arguments) { tenantName = arguments.getTenantName(); applicationName = arguments.getApplicationName(); instanceName = arguments.getInstanceName(); environment = arguments.getEnvironment(); region = arguments.getRegion(); timeout = arguments.getTimeout(); debug = arguments.getDebugFlag(); } public static void main(String[] args) { try { new FileDistributionStatusClient(CommandLineArguments.build(args)).run(); } catch (Exception e) { System.err.println(e.getMessage()); System.exit(1); } } public void run() { String json = doHttpRequest(); System.out.println(parseAndGenerateOutput(json)); } private String doHttpRequest() { int timeoutInMillis = (int) (timeout * 1000); RequestConfig config = RequestConfig.custom() .setConnectTimeout(timeoutInMillis) .setConnectionRequestTimeout(timeoutInMillis) .setSocketTimeout(timeoutInMillis) .build(); CloseableHttpClient httpClient = HttpClientBuilder.create().setDefaultRequestConfig(config).build(); URI statusUri = createStatusApiUri(); if (debug) System.out.println("URI:" + statusUri); try { CloseableHttpResponse response = httpClient.execute(new HttpGet(statusUri)); String content = EntityUtils.toString(response.getEntity()); if (debug) System.out.println("response:" + content); if (response.getStatusLine().getStatusCode() == 200) { return content; } else { throw new RuntimeException("Failed to get status for request " + statusUri + ": " + response.getStatusLine() + ": " + content); } } catch (IOException e) { throw new RuntimeException(e); } } String parseAndGenerateOutput(String json) { ObjectMapper objectMapper = new ObjectMapper(); JsonNode jsonNode; try { jsonNode = objectMapper.readTree(json); } catch (IOException e) { throw new RuntimeException(e); } String status = jsonNode.get("status").asText(); switch (status) { case statusUnknown: return "File distribution status unknown: " + jsonNode.get("message").asText(); case statusInProgress: return "File distribution in progress:\n" + inProgressOutput(jsonNode.get("hosts")); case statusFinished: return "File distribution finished"; default: throw new RuntimeException("Unknown status " + status); } } private URI createStatusApiUri() { String path = String.format("/application/v2/tenant/%s/application/%s/environment/%s/region/%s/instance/%s/filedistributionstatus", tenantName, applicationName, environment, region, instanceName); try { return new URIBuilder() .setScheme("http") .setHost("localhost") .setPort(19071) .setPath(path) .addParameter("timeout", String.valueOf(timeout)) .build(); } catch (URISyntaxException e) { throw new RuntimeException(e); } } @Command(name = "vespa-status-filedistribution", description = "Tool for getting file distribution status.") public static class CommandLineArguments { static CommandLineArguments build(String[] args) { CommandLineArguments arguments = null; try { arguments = SingleCommand.singleCommand(CommandLineArguments.class).parse(args); } catch (Exception e) { System.err.println(e.getMessage()); System.err.println("Use --help to show usage.\n"); System.exit(1); } if (arguments.helpOption.showHelpIfRequested()) { System.exit(0); } if (arguments.getTenantName() == null) { System.err.println("'--tenant' not set."); System.exit(1); } if (arguments.getApplicationName() == null) { System.err.println("'--application' not set."); System.exit(1); } return arguments; } @Inject HelpOption helpOption; @Option(name = {"--tenant"}, description = "tenant name") private String tenantNameArg; @Option(name = {"--application"}, description = "application name") private String applicationNameArg; @Option(name = {"--instance"}, description = "instance name") private String instanceNameArg = "default"; @Option(name = {"--environment"}, description = "environment name") private String environmentArg = "prod"; @Option(name = {"--region"}, description = "region name") private String regionArg = "default"; @Option(name = {"--timeout"}, description = "The timeout (in seconds).") private double timeoutArg = 5; @Option(name = {"--debug"}, description = "Print debug log.") private boolean debugArg; public String getTenantName() { return tenantNameArg; } public String getApplicationName() { return applicationNameArg; } public String getInstanceName() { return instanceNameArg; } public String getEnvironment() { return environmentArg; } public String getRegion() { return regionArg; } public double getTimeout() { return timeoutArg; } public boolean getDebugFlag() { return debugArg; } } }
No constant hinting to what the value 104 indicates ?
private void startDownloadingFileReferences(String hostName, int port, Set<FileReference> fileReferences) { Target target = supervisor.connect(new Spec(hostName, port)); double timeout = 0.1; Request request = new Request("filedistribution.setFileReferencesToDownload"); request.parameters().add(new StringArray(fileReferences.stream().map(FileReference::value).toArray(String[]::new))); log.log(LogLevel.DEBUG, "Executing " + request.methodName() + " against " + target.toString()); target.invokeSync(request, timeout); if (request.isError() && request.errorCode() != 104) { log.log(LogLevel.INFO, request.methodName() + " failed: " + request.errorCode() + " (" + request.errorMessage() + ")"); } }
if (request.isError() && request.errorCode() != 104) {
private void startDownloadingFileReferences(String hostName, int port, Set<FileReference> fileReferences) { Target target = supervisor.connect(new Spec(hostName, port)); double timeout = 0.1; Request request = new Request("filedistribution.setFileReferencesToDownload"); request.parameters().add(new StringArray(fileReferences.stream().map(FileReference::value).toArray(String[]::new))); log.log(LogLevel.DEBUG, "Executing " + request.methodName() + " against " + target.toString()); target.invokeSync(request, timeout); if (request.isError() && request.errorCode() != ErrorCode.CONNECTION) { log.log(LogLevel.INFO, request.methodName() + " failed: " + request.errorCode() + " (" + request.errorMessage() + ")"); } }
class CombinedLegacyDistribution implements FileDistribution { private final static Logger log = Logger.getLogger(CombinedLegacyDistribution.class.getName()); private final Supervisor supervisor; private final FileDistribution legacy; private final boolean disableFileDistributor; CombinedLegacyDistribution(Supervisor supervisor, FileDBHandler legacy, boolean disableFileDistributor) { this.supervisor = supervisor; this.legacy = legacy; this.disableFileDistributor = disableFileDistributor; } @Override public void sendDeployedFiles(String hostName, Set<FileReference> fileReferences) { legacy.sendDeployedFiles(hostName, fileReferences); } public void startDownload(String hostName, Set<FileReference> fileReferences) { startDownload(hostName, ConfigProxy.BASEPORT, fileReferences); } @Override public void startDownload(String hostName, int port, Set<FileReference> fileReferences) { if (disableFileDistributor) startDownloadingFileReferences(hostName, port, fileReferences); } @Override public void reloadDeployFileDistributor() { legacy.reloadDeployFileDistributor(); } @Override public void removeDeploymentsThatHaveDifferentApplicationId(Collection<String> targetHostnames) { legacy.removeDeploymentsThatHaveDifferentApplicationId(targetHostnames); } }
class CombinedLegacyDistribution implements FileDistribution { private final static Logger log = Logger.getLogger(CombinedLegacyDistribution.class.getName()); private final Supervisor supervisor; private final FileDistribution legacy; private final boolean disableFileDistributor; CombinedLegacyDistribution(Supervisor supervisor, FileDBHandler legacy, boolean disableFileDistributor) { this.supervisor = supervisor; this.legacy = legacy; this.disableFileDistributor = disableFileDistributor; } @Override public void sendDeployedFiles(String hostName, Set<FileReference> fileReferences) { legacy.sendDeployedFiles(hostName, fileReferences); } public void startDownload(String hostName, Set<FileReference> fileReferences) { startDownload(hostName, ConfigProxy.BASEPORT, fileReferences); } @Override public void startDownload(String hostName, int port, Set<FileReference> fileReferences) { if (disableFileDistributor) startDownloadingFileReferences(hostName, port, fileReferences); } @Override public void reloadDeployFileDistributor() { legacy.reloadDeployFileDistributor(); } @Override public void removeDeploymentsThatHaveDifferentApplicationId(Collection<String> targetHostnames) { legacy.removeDeploymentsThatHaveDifferentApplicationId(targetHostnames); } }
Fixed
private void startDownloadingFileReferences(String hostName, int port, Set<FileReference> fileReferences) { Target target = supervisor.connect(new Spec(hostName, port)); double timeout = 0.1; Request request = new Request("filedistribution.setFileReferencesToDownload"); request.parameters().add(new StringArray(fileReferences.stream().map(FileReference::value).toArray(String[]::new))); log.log(LogLevel.DEBUG, "Executing " + request.methodName() + " against " + target.toString()); target.invokeSync(request, timeout); if (request.isError() && request.errorCode() != 104) { log.log(LogLevel.INFO, request.methodName() + " failed: " + request.errorCode() + " (" + request.errorMessage() + ")"); } }
if (request.isError() && request.errorCode() != 104) {
private void startDownloadingFileReferences(String hostName, int port, Set<FileReference> fileReferences) { Target target = supervisor.connect(new Spec(hostName, port)); double timeout = 0.1; Request request = new Request("filedistribution.setFileReferencesToDownload"); request.parameters().add(new StringArray(fileReferences.stream().map(FileReference::value).toArray(String[]::new))); log.log(LogLevel.DEBUG, "Executing " + request.methodName() + " against " + target.toString()); target.invokeSync(request, timeout); if (request.isError() && request.errorCode() != ErrorCode.CONNECTION) { log.log(LogLevel.INFO, request.methodName() + " failed: " + request.errorCode() + " (" + request.errorMessage() + ")"); } }
class CombinedLegacyDistribution implements FileDistribution { private final static Logger log = Logger.getLogger(CombinedLegacyDistribution.class.getName()); private final Supervisor supervisor; private final FileDistribution legacy; private final boolean disableFileDistributor; CombinedLegacyDistribution(Supervisor supervisor, FileDBHandler legacy, boolean disableFileDistributor) { this.supervisor = supervisor; this.legacy = legacy; this.disableFileDistributor = disableFileDistributor; } @Override public void sendDeployedFiles(String hostName, Set<FileReference> fileReferences) { legacy.sendDeployedFiles(hostName, fileReferences); } public void startDownload(String hostName, Set<FileReference> fileReferences) { startDownload(hostName, ConfigProxy.BASEPORT, fileReferences); } @Override public void startDownload(String hostName, int port, Set<FileReference> fileReferences) { if (disableFileDistributor) startDownloadingFileReferences(hostName, port, fileReferences); } @Override public void reloadDeployFileDistributor() { legacy.reloadDeployFileDistributor(); } @Override public void removeDeploymentsThatHaveDifferentApplicationId(Collection<String> targetHostnames) { legacy.removeDeploymentsThatHaveDifferentApplicationId(targetHostnames); } }
class CombinedLegacyDistribution implements FileDistribution { private final static Logger log = Logger.getLogger(CombinedLegacyDistribution.class.getName()); private final Supervisor supervisor; private final FileDistribution legacy; private final boolean disableFileDistributor; CombinedLegacyDistribution(Supervisor supervisor, FileDBHandler legacy, boolean disableFileDistributor) { this.supervisor = supervisor; this.legacy = legacy; this.disableFileDistributor = disableFileDistributor; } @Override public void sendDeployedFiles(String hostName, Set<FileReference> fileReferences) { legacy.sendDeployedFiles(hostName, fileReferences); } public void startDownload(String hostName, Set<FileReference> fileReferences) { startDownload(hostName, ConfigProxy.BASEPORT, fileReferences); } @Override public void startDownload(String hostName, int port, Set<FileReference> fileReferences) { if (disableFileDistributor) startDownloadingFileReferences(hostName, port, fileReferences); } @Override public void reloadDeployFileDistributor() { legacy.reloadDeployFileDistributor(); } @Override public void removeDeploymentsThatHaveDifferentApplicationId(Collection<String> targetHostnames) { legacy.removeDeploymentsThatHaveDifferentApplicationId(targetHostnames); } }
Would be nice if we could execute the command once to get a list of all installed packages and then just compare two `Set`s.
public boolean converge() { if (packages.stream().allMatch(Yum.this::isInstalled)) { return false; } execute(); return true; }
if (packages.stream().allMatch(Yum.this::isInstalled)) {
public boolean converge() { if (packages.stream().allMatch(Yum.this::isInstalled)) { return false; } execute(); return true; }
class Install { private final TaskContext taskContext; private final List<String> packages; private Optional<String> enabledRepo = Optional.empty(); public Install(TaskContext taskContext, List<String> packages) { this.taskContext = taskContext; this.packages = packages; if (packages.isEmpty()) { throw new IllegalArgumentException("No packages specified"); } } public Install enableRepo(String repo) { enabledRepo = Optional.of(repo); return this; } private void execute() { Command command = commandSupplier.get(); command.add("yum", "install", "--assumeyes"); enabledRepo.ifPresent(repo -> command.add("--enablerepo=" + repo)); command.add(packages); command.spawn(logger).waitForTermination().throwIfFailed(); } }
class Install { private final TaskContext taskContext; private final List<String> packages; private Optional<String> enabledRepo = Optional.empty(); public Install(TaskContext taskContext, List<String> packages) { this.taskContext = taskContext; this.packages = packages; if (packages.isEmpty()) { throw new IllegalArgumentException("No packages specified"); } } public Install enableRepo(String repo) { enabledRepo = Optional.of(repo); return this; } private void execute() { Command command = commandSupplier.get(); command.add("yum", "install", "--assumeyes"); enabledRepo.ifPresent(repo -> command.add("--enablerepo=" + repo)); command.add(packages); command.spawn(logger).waitForTermination().throwIfFailed(); } }
Do we want to install both if we already know one of them is installed?
public void testInstall() { TaskContext taskContext = mock(TaskContext.class); TestCommandSupplier commandSupplier = new TestCommandSupplier(taskContext); commandSupplier.expectCommand("yum list installed package-1", 0, ""); commandSupplier.expectCommand("yum list installed package-2", 1, ""); commandSupplier.expectCommand( "yum install --assumeyes --enablerepo=repo-name package-1 package-2", 0, ""); Yum yum = new Yum(taskContext, commandSupplier); yum.install("package-1", "package-2") .enableRepo("repo-name") .converge(); commandSupplier.verifyInvocations(); }
"yum install --assumeyes --enablerepo=repo-name package-1 package-2",
public void testInstall() { TaskContext taskContext = mock(TaskContext.class); TestCommandSupplier commandSupplier = new TestCommandSupplier(taskContext); commandSupplier.expectCommand("yum list installed package-1", 0, ""); commandSupplier.expectCommand("yum list installed package-2", 1, ""); commandSupplier.expectCommand( "yum install --assumeyes --enablerepo=repo-name package-1 package-2", 0, ""); Yum yum = new Yum(taskContext, commandSupplier); yum.install("package-1", "package-2") .enableRepo("repo-name") .converge(); commandSupplier.verifyInvocations(); }
class YumTest { @Test public void testAlreadyInstalled() { TaskContext taskContext = mock(TaskContext.class); TestCommandSupplier commandSupplier = new TestCommandSupplier(taskContext); commandSupplier.expectCommand("yum list installed package-1", 0, ""); commandSupplier.expectCommand("yum list installed package-2", 0, ""); Yum yum = new Yum(taskContext, commandSupplier); yum.install("package-1", "package-2") .enableRepo("repo-name") .converge(); commandSupplier.verifyInvocations(); } @Test @Test(expected = CommandException.class) public void testFailedInstall() { TaskContext taskContext = mock(TaskContext.class); TestCommandSupplier commandSupplier = new TestCommandSupplier(taskContext); commandSupplier.expectCommand("yum list installed package-1", 0, ""); commandSupplier.expectCommand("yum list installed package-2", 1, ""); commandSupplier.expectCommand( "yum install --assumeyes --enablerepo=repo-name package-1 package-2", 1, "error"); Yum yum = new Yum(taskContext, commandSupplier); yum.install("package-1", "package-2") .enableRepo("repo-name") .converge(); fail(); } }
class YumTest { @Test public void testAlreadyInstalled() { TaskContext taskContext = mock(TaskContext.class); TestCommandSupplier commandSupplier = new TestCommandSupplier(taskContext); commandSupplier.expectCommand("yum list installed package-1", 0, ""); commandSupplier.expectCommand("yum list installed package-2", 0, ""); Yum yum = new Yum(taskContext, commandSupplier); yum.install("package-1", "package-2") .enableRepo("repo-name") .converge(); commandSupplier.verifyInvocations(); } @Test @Test(expected = CommandException.class) public void testFailedInstall() { TaskContext taskContext = mock(TaskContext.class); TestCommandSupplier commandSupplier = new TestCommandSupplier(taskContext); commandSupplier.expectCommand("yum list installed package-1", 0, ""); commandSupplier.expectCommand("yum list installed package-2", 1, ""); commandSupplier.expectCommand( "yum install --assumeyes --enablerepo=repo-name package-1 package-2", 1, "error"); Yum yum = new Yum(taskContext, commandSupplier); yum.install("package-1", "package-2") .enableRepo("repo-name") .converge(); fail(); } }
Yes, but that would require parsing the output. Now, it's just an exit code.
public boolean converge() { if (packages.stream().allMatch(Yum.this::isInstalled)) { return false; } execute(); return true; }
if (packages.stream().allMatch(Yum.this::isInstalled)) {
public boolean converge() { if (packages.stream().allMatch(Yum.this::isInstalled)) { return false; } execute(); return true; }
class Install { private final TaskContext taskContext; private final List<String> packages; private Optional<String> enabledRepo = Optional.empty(); public Install(TaskContext taskContext, List<String> packages) { this.taskContext = taskContext; this.packages = packages; if (packages.isEmpty()) { throw new IllegalArgumentException("No packages specified"); } } public Install enableRepo(String repo) { enabledRepo = Optional.of(repo); return this; } private void execute() { Command command = commandSupplier.get(); command.add("yum", "install", "--assumeyes"); enabledRepo.ifPresent(repo -> command.add("--enablerepo=" + repo)); command.add(packages); command.spawn(logger).waitForTermination().throwIfFailed(); } }
class Install { private final TaskContext taskContext; private final List<String> packages; private Optional<String> enabledRepo = Optional.empty(); public Install(TaskContext taskContext, List<String> packages) { this.taskContext = taskContext; this.packages = packages; if (packages.isEmpty()) { throw new IllegalArgumentException("No packages specified"); } } public Install enableRepo(String repo) { enabledRepo = Optional.of(repo); return this; } private void execute() { Command command = commandSupplier.get(); command.add("yum", "install", "--assumeyes"); enabledRepo.ifPresent(repo -> command.add("--enablerepo=" + repo)); command.add(packages); command.spawn(logger).waitForTermination().throwIfFailed(); } }
Could we risk upgrading the package left out?
public void testInstall() { TaskContext taskContext = mock(TaskContext.class); TestCommandSupplier commandSupplier = new TestCommandSupplier(taskContext); commandSupplier.expectCommand("yum list installed package-1", 0, ""); commandSupplier.expectCommand("yum list installed package-2", 1, ""); commandSupplier.expectCommand( "yum install --assumeyes --enablerepo=repo-name package-1 package-2", 0, ""); Yum yum = new Yum(taskContext, commandSupplier); yum.install("package-1", "package-2") .enableRepo("repo-name") .converge(); commandSupplier.verifyInvocations(); }
"yum install --assumeyes --enablerepo=repo-name package-1 package-2",
public void testInstall() { TaskContext taskContext = mock(TaskContext.class); TestCommandSupplier commandSupplier = new TestCommandSupplier(taskContext); commandSupplier.expectCommand("yum list installed package-1", 0, ""); commandSupplier.expectCommand("yum list installed package-2", 1, ""); commandSupplier.expectCommand( "yum install --assumeyes --enablerepo=repo-name package-1 package-2", 0, ""); Yum yum = new Yum(taskContext, commandSupplier); yum.install("package-1", "package-2") .enableRepo("repo-name") .converge(); commandSupplier.verifyInvocations(); }
class YumTest { @Test public void testAlreadyInstalled() { TaskContext taskContext = mock(TaskContext.class); TestCommandSupplier commandSupplier = new TestCommandSupplier(taskContext); commandSupplier.expectCommand("yum list installed package-1", 0, ""); commandSupplier.expectCommand("yum list installed package-2", 0, ""); Yum yum = new Yum(taskContext, commandSupplier); yum.install("package-1", "package-2") .enableRepo("repo-name") .converge(); commandSupplier.verifyInvocations(); } @Test @Test(expected = CommandException.class) public void testFailedInstall() { TaskContext taskContext = mock(TaskContext.class); TestCommandSupplier commandSupplier = new TestCommandSupplier(taskContext); commandSupplier.expectCommand("yum list installed package-1", 0, ""); commandSupplier.expectCommand("yum list installed package-2", 1, ""); commandSupplier.expectCommand( "yum install --assumeyes --enablerepo=repo-name package-1 package-2", 1, "error"); Yum yum = new Yum(taskContext, commandSupplier); yum.install("package-1", "package-2") .enableRepo("repo-name") .converge(); fail(); } }
class YumTest { @Test public void testAlreadyInstalled() { TaskContext taskContext = mock(TaskContext.class); TestCommandSupplier commandSupplier = new TestCommandSupplier(taskContext); commandSupplier.expectCommand("yum list installed package-1", 0, ""); commandSupplier.expectCommand("yum list installed package-2", 0, ""); Yum yum = new Yum(taskContext, commandSupplier); yum.install("package-1", "package-2") .enableRepo("repo-name") .converge(); commandSupplier.verifyInvocations(); } @Test @Test(expected = CommandException.class) public void testFailedInstall() { TaskContext taskContext = mock(TaskContext.class); TestCommandSupplier commandSupplier = new TestCommandSupplier(taskContext); commandSupplier.expectCommand("yum list installed package-1", 0, ""); commandSupplier.expectCommand("yum list installed package-2", 1, ""); commandSupplier.expectCommand( "yum install --assumeyes --enablerepo=repo-name package-1 package-2", 1, "error"); Yum yum = new Yum(taskContext, commandSupplier); yum.install("package-1", "package-2") .enableRepo("repo-name") .converge(); fail(); } }
There should probably be a warning for this?
void converge() { final Optional<ContainerNodeSpec> nodeSpecOptional = nodeRepository.getContainerNodeSpec(hostname); if (!nodeSpecOptional.isPresent() && expectNodeNotInNodeRepo) return; final ContainerNodeSpec nodeSpec = nodeSpecOptional.orElseThrow(() -> new IllegalStateException(String.format("Node '%s' missing from node repository.", hostname))); expectNodeNotInNodeRepo = false; Optional<Container> container = getContainer(); if (!nodeSpec.equals(lastNodeSpec)) { addDebugMessage("Loading new node spec: " + nodeSpec.toString()); lastNodeSpec = nodeSpec; if (container.isPresent()) { storageMaintainer.writeMetricsConfig(containerName, nodeSpec); } } switch (nodeSpec.nodeState) { case ready: case reserved: case parked: case failed: removeContainerIfNeededUpdateContainerState(nodeSpec, container); updateNodeRepoWithCurrentAttributes(nodeSpec); break; case active: storageMaintainer.handleCoreDumpsForContainer(containerName, nodeSpec, false); storageMaintainer.getDiskUsageFor(containerName) .map(diskUsage -> (double) diskUsage / BYTES_IN_GB / nodeSpec.minDiskAvailableGb) .filter(diskUtil -> diskUtil >= 0.8) .ifPresent(diskUtil -> storageMaintainer.removeOldFilesFromNode(containerName)); scheduleDownLoadIfNeeded(nodeSpec); if (isDownloadingImage()) { addDebugMessage("Waiting for image to download " + imageBeingDownloaded.asString()); return; } container = removeContainerIfNeededUpdateContainerState(nodeSpec, container); if (! container.isPresent()) { storageMaintainer.handleCoreDumpsForContainer(containerName, nodeSpec, false); startContainer(nodeSpec); } runLocalResumeScriptIfNeeded(); updateNodeRepoWithCurrentAttributes(nodeSpec); logger.info("Call resume against Orchestrator"); orchestrator.resume(hostname); break; case inactive: removeContainerIfNeededUpdateContainerState(nodeSpec, container); updateNodeRepoWithCurrentAttributes(nodeSpec); break; case provisioned: nodeRepository.markAsDirty(hostname); break; case dirty: removeContainerIfNeededUpdateContainerState(nodeSpec, container); logger.info("State is " + nodeSpec.nodeState + ", will delete application storage and mark node as ready"); storageMaintainer.cleanupNodeStorage(containerName, nodeSpec); updateNodeRepoWithCurrentAttributes(nodeSpec); nodeRepository.markNodeAvailableForNewAllocation(hostname); expectNodeNotInNodeRepo = true; break; default: throw new RuntimeException("UNKNOWN STATE " + nodeSpec.nodeState.name()); } }
if (!nodeSpecOptional.isPresent() && expectNodeNotInNodeRepo) return;
void converge() { final Optional<ContainerNodeSpec> nodeSpecOptional = nodeRepository.getContainerNodeSpec(hostname); if (!nodeSpecOptional.isPresent() && expectNodeNotInNodeRepo) return; final ContainerNodeSpec nodeSpec = nodeSpecOptional.orElseThrow(() -> new IllegalStateException(String.format("Node '%s' missing from node repository.", hostname))); expectNodeNotInNodeRepo = false; Optional<Container> container = getContainer(); if (!nodeSpec.equals(lastNodeSpec)) { addDebugMessage("Loading new node spec: " + nodeSpec.toString()); lastNodeSpec = nodeSpec; if (container.isPresent()) { storageMaintainer.writeMetricsConfig(containerName, nodeSpec); } } switch (nodeSpec.nodeState) { case ready: case reserved: case parked: case failed: removeContainerIfNeededUpdateContainerState(nodeSpec, container); updateNodeRepoWithCurrentAttributes(nodeSpec); break; case active: storageMaintainer.handleCoreDumpsForContainer(containerName, nodeSpec, false); storageMaintainer.getDiskUsageFor(containerName) .map(diskUsage -> (double) diskUsage / BYTES_IN_GB / nodeSpec.minDiskAvailableGb) .filter(diskUtil -> diskUtil >= 0.8) .ifPresent(diskUtil -> storageMaintainer.removeOldFilesFromNode(containerName)); scheduleDownLoadIfNeeded(nodeSpec); if (isDownloadingImage()) { addDebugMessage("Waiting for image to download " + imageBeingDownloaded.asString()); return; } container = removeContainerIfNeededUpdateContainerState(nodeSpec, container); if (! container.isPresent()) { storageMaintainer.handleCoreDumpsForContainer(containerName, nodeSpec, false); startContainer(nodeSpec); } runLocalResumeScriptIfNeeded(); updateNodeRepoWithCurrentAttributes(nodeSpec); logger.info("Call resume against Orchestrator"); orchestrator.resume(hostname); break; case inactive: removeContainerIfNeededUpdateContainerState(nodeSpec, container); updateNodeRepoWithCurrentAttributes(nodeSpec); break; case provisioned: nodeRepository.markAsDirty(hostname); break; case dirty: removeContainerIfNeededUpdateContainerState(nodeSpec, container); logger.info("State is " + nodeSpec.nodeState + ", will delete application storage and mark node as ready"); storageMaintainer.cleanupNodeStorage(containerName, nodeSpec); updateNodeRepoWithCurrentAttributes(nodeSpec); nodeRepository.markNodeAvailableForNewAllocation(hostname); expectNodeNotInNodeRepo = true; break; default: throw new RuntimeException("UNKNOWN STATE " + nodeSpec.nodeState.name()); } }
class NodeAgentImpl implements NodeAgent { private static final long BYTES_IN_GB = 1_000_000_000L; private final AtomicBoolean terminated = new AtomicBoolean(false); private boolean isFrozen = true; private boolean wantFrozen = false; private boolean workToDoNow = true; private boolean expectNodeNotInNodeRepo = false; private final Object monitor = new Object(); private final PrefixLogger logger; private DockerImage imageBeingDownloaded = null; private final ContainerName containerName; private final String hostname; private final NodeRepository nodeRepository; private final Orchestrator orchestrator; private final DockerOperations dockerOperations; private final StorageMaintainer storageMaintainer; private final Runnable aclMaintainer; private final Environment environment; private final Clock clock; private final Duration timeBetweenEachConverge; private final SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); private final LinkedList<String> debugMessages = new LinkedList<>(); private int numberOfUnhandledException = 0; private Instant lastConverge; private final Thread loopThread; private final ScheduledExecutorService filebeatRestarter = Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("filebeatrestarter")); private Consumer<String> serviceRestarter; private Future<?> currentFilebeatRestarter; private boolean resumeScriptRun = false; /** * ABSENT means container is definitely absent - A container that was absent will not suddenly appear without * NodeAgent explicitly starting it. * Otherwise we can't be certain. A container that was running a minute ago may no longer be running without * NodeAgent doing anything (container could have crashed). Therefore we always have to ask docker daemon * to get updated state of the container. */ enum ContainerState { ABSENT, UNKNOWN } private ContainerState containerState = UNKNOWN; private NodeAttributes lastAttributesSet = null; private ContainerNodeSpec lastNodeSpec = null; private CpuUsageReporter lastCpuMetric = new CpuUsageReporter(); public NodeAgentImpl( final String hostName, final NodeRepository nodeRepository, final Orchestrator orchestrator, final DockerOperations dockerOperations, final StorageMaintainer storageMaintainer, final Runnable aclMaintainer, final Environment environment, final Clock clock, final Duration timeBetweenEachConverge) { this.containerName = ContainerName.fromHostname(hostName); this.logger = PrefixLogger.getNodeAgentLogger(NodeAgentImpl.class, containerName); this.hostname = hostName; this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.dockerOperations = dockerOperations; this.storageMaintainer = storageMaintainer; this.aclMaintainer = aclMaintainer; this.environment = environment; this.clock = clock; this.timeBetweenEachConverge = timeBetweenEachConverge; this.lastConverge = clock.instant(); this.loopThread = new Thread(() -> { while (!terminated.get()) tick(); }); this.loopThread.setName("tick-" + hostname); } @Override public boolean setFrozen(boolean frozen) { synchronized (monitor) { if (wantFrozen != frozen) { wantFrozen = frozen; addDebugMessage(wantFrozen ? "Freezing" : "Unfreezing"); signalWorkToBeDone(); } return isFrozen == frozen; } } private void addDebugMessage(String message) { synchronized (debugMessages) { while (debugMessages.size() > 1000) { debugMessages.pop(); } logger.debug(message); debugMessages.add("[" + sdf.format(new Date()) + "] " + message); } } @Override public Map<String, Object> debugInfo() { Map<String, Object> debug = new LinkedHashMap<>(); debug.put("Hostname", hostname); debug.put("isFrozen", isFrozen); debug.put("wantFrozen", wantFrozen); debug.put("terminated", terminated); debug.put("workToDoNow", workToDoNow); synchronized (debugMessages) { debug.put("History", new LinkedList<>(debugMessages)); } debug.put("Node repo state", lastNodeSpec.nodeState.name()); return debug; } @Override public void start() { String message = "Starting with interval " + timeBetweenEachConverge.toMillis() + " ms"; logger.info(message); addDebugMessage(message); loopThread.start(); serviceRestarter = service -> { try { ProcessResult processResult = dockerOperations.executeCommandInContainerAsRoot( containerName, "service", service, "restart"); if (!processResult.isSuccess()) { logger.error("Failed to restart service " + service + ": " + processResult); } } catch (Exception e) { logger.error("Failed to restart service " + service, e); } }; } @Override public void stop() { addDebugMessage("Stopping"); filebeatRestarter.shutdown(); if (!terminated.compareAndSet(false, true)) { throw new RuntimeException("Can not re-stop a node agent."); } signalWorkToBeDone(); do { try { loopThread.join(); filebeatRestarter.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e) { logger.error("Interrupted while waiting for converge thread and filebeatRestarter scheduler to shutdown"); } } while (loopThread.isAlive() || !filebeatRestarter.isTerminated()); logger.info("Stopped"); } private void runLocalResumeScriptIfNeeded() { if (! resumeScriptRun) { addDebugMessage("Starting optional node program resume command"); dockerOperations.resumeNode(containerName); resumeScriptRun = true; } } private void updateNodeRepoWithCurrentAttributes(final ContainerNodeSpec nodeSpec) { final NodeAttributes nodeAttributes = new NodeAttributes() .withRestartGeneration(nodeSpec.wantedRestartGeneration.orElse(null)) .withRebootGeneration(nodeSpec.wantedRebootGeneration.orElse(0L)) .withDockerImage(nodeSpec.wantedDockerImage.filter(node -> containerState != ABSENT).orElse(new DockerImage(""))) .withVespaVersion(nodeSpec.wantedVespaVersion.filter(node -> containerState != ABSENT).orElse("")); publishStateToNodeRepoIfChanged(nodeAttributes); } private void publishStateToNodeRepoIfChanged(NodeAttributes currentAttributes) { if (!currentAttributes.equals(lastAttributesSet)) { logger.info("Publishing new set of attributes to node repo: " + lastAttributesSet + " -> " + currentAttributes); addDebugMessage("Publishing new set of attributes to node repo: {" + lastAttributesSet + "} -> {" + currentAttributes + "}"); nodeRepository.updateNodeAttributes(hostname, currentAttributes); lastAttributesSet = currentAttributes; } } private void startContainer(ContainerNodeSpec nodeSpec) { aclMaintainer.run(); dockerOperations.startContainer(containerName, nodeSpec); lastCpuMetric = new CpuUsageReporter(); currentFilebeatRestarter = filebeatRestarter.scheduleWithFixedDelay(() -> serviceRestarter.accept("filebeat"), 1, 1, TimeUnit.DAYS); storageMaintainer.writeMetricsConfig(containerName, nodeSpec); storageMaintainer.writeFilebeatConfig(containerName, nodeSpec); resumeScriptRun = false; containerState = UNKNOWN; logger.info("Container successfully started, new containerState is " + containerState); } private Optional<Container> removeContainerIfNeededUpdateContainerState(ContainerNodeSpec nodeSpec, Optional<Container> existingContainer) { return existingContainer .flatMap(container -> removeContainerIfNeeded(nodeSpec, container)) .map(container -> { shouldRestartServices(nodeSpec).ifPresent(restartReason -> { logger.info("Will restart services for container " + container + ": " + restartReason); restartServices(nodeSpec, container); }); return container; }); } private Optional<String> shouldRestartServices(ContainerNodeSpec nodeSpec) { if (!nodeSpec.wantedRestartGeneration.isPresent()) return Optional.empty(); if (!nodeSpec.currentRestartGeneration.isPresent() || nodeSpec.currentRestartGeneration.get() < nodeSpec.wantedRestartGeneration.get()) { return Optional.of("Restart requested - wanted restart generation has been bumped: " + nodeSpec.currentRestartGeneration.get() + " -> " + nodeSpec.wantedRestartGeneration.get()); } return Optional.empty(); } private void restartServices(ContainerNodeSpec nodeSpec, Container existingContainer) { if (existingContainer.state.isRunning() && nodeSpec.nodeState == Node.State.active) { ContainerName containerName = existingContainer.name; logger.info("Restarting services for " + containerName); orchestratorSuspendNode(); dockerOperations.restartVespaOnNode(containerName); } } @Override public void stopServices() { logger.info("Stopping services for " + containerName); dockerOperations.trySuspendNode(containerName); dockerOperations.stopServicesOnNode(containerName); } private Optional<String> shouldRemoveContainer(ContainerNodeSpec nodeSpec, Container existingContainer) { final Node.State nodeState = nodeSpec.nodeState; if (nodeState == Node.State.dirty || nodeState == Node.State.provisioned) { return Optional.of("Node in state " + nodeState + ", container should no longer be running"); } if (nodeSpec.wantedDockerImage.isPresent() && !nodeSpec.wantedDockerImage.get().equals(existingContainer.image)) { return Optional.of("The node is supposed to run a new Docker image: " + existingContainer + " -> " + nodeSpec.wantedDockerImage.get()); } if (!existingContainer.state.isRunning()) { return Optional.of("Container no longer running"); } ContainerResources wantedContainerResources = ContainerResources.from( nodeSpec.minCpuCores, nodeSpec.minMainMemoryAvailableGb); if (!wantedContainerResources.equals(existingContainer.resources)) { return Optional.of("Container should be running with different resource allocation, wanted: " + wantedContainerResources + ", actual: " + existingContainer.resources); } return Optional.empty(); } private Optional<Container> removeContainerIfNeeded(ContainerNodeSpec nodeSpec, Container existingContainer) { Optional<String> removeReason = shouldRemoveContainer(nodeSpec, existingContainer); if (removeReason.isPresent()) { logger.info("Will remove container " + existingContainer + ": " + removeReason.get()); if (existingContainer.state.isRunning()) { if (nodeSpec.nodeState == Node.State.active) { orchestratorSuspendNode(); } try { stopServices(); } catch (Exception e) { logger.info("Failed stopping services, ignoring", e); } } if (currentFilebeatRestarter != null) currentFilebeatRestarter.cancel(true); dockerOperations.removeContainer(existingContainer, nodeSpec); containerState = ABSENT; logger.info("Container successfully removed, new containerState is " + containerState); return Optional.empty(); } return Optional.of(existingContainer); } private void scheduleDownLoadIfNeeded(ContainerNodeSpec nodeSpec) { if (nodeSpec.currentDockerImage.equals(nodeSpec.wantedDockerImage)) return; if (dockerOperations.pullImageAsyncIfNeeded(nodeSpec.wantedDockerImage.get())) { imageBeingDownloaded = nodeSpec.wantedDockerImage.get(); } else if (imageBeingDownloaded != null) { imageBeingDownloaded = null; } } private void signalWorkToBeDone() { synchronized (monitor) { if (!workToDoNow) { workToDoNow = true; addDebugMessage("Signaling work to be done"); monitor.notifyAll(); } } } void tick() { boolean isFrozenCopy; synchronized (monitor) { while (!workToDoNow) { long remainder = timeBetweenEachConverge .minus(Duration.between(lastConverge, clock.instant())) .toMillis(); if (remainder > 0) { try { monitor.wait(remainder); } catch (InterruptedException e) { logger.error("Interrupted, but ignoring this: " + hostname); } } else break; } lastConverge = clock.instant(); workToDoNow = false; if (isFrozen != wantFrozen) { isFrozen = wantFrozen; logger.info("Updated NodeAgent's frozen state, new value: isFrozen: " + isFrozen); } isFrozenCopy = isFrozen; } if (isFrozenCopy) { addDebugMessage("tick: isFrozen"); } else { try { converge(); } catch (OrchestratorException e) { logger.info(e.getMessage()); addDebugMessage(e.getMessage()); } catch (DockerException e) { numberOfUnhandledException++; containerState = UNKNOWN; logger.error("Caught a DockerException, resetting containerState to " + containerState, e); } catch (Exception e) { numberOfUnhandledException++; logger.error("Unhandled exception, ignoring.", e); addDebugMessage(e.getMessage()); } catch (Throwable t) { logger.error("Unhandled throwable, taking down system.", t); System.exit(234); } } } @SuppressWarnings("unchecked") public void updateContainerNodeMetrics() { final ContainerNodeSpec nodeSpec = lastNodeSpec; if (nodeSpec == null || containerState == ABSENT) return; Optional<Docker.ContainerStats> containerStats = dockerOperations.getContainerStats(containerName); if (!containerStats.isPresent()) return; Dimensions.Builder dimensionsBuilder = new Dimensions.Builder() .add("host", hostname) .add("role", "tenants") .add("state", nodeSpec.nodeState.toString()) .add("parentHostname", environment.getParentHostHostname()); Dimensions dimensions = dimensionsBuilder.build(); Docker.ContainerStats stats = containerStats.get(); final String APP = MetricReceiverWrapper.APPLICATION_NODE; final int totalNumCpuCores = ((List<Number>) ((Map) stats.getCpuStats().get("cpu_usage")).get("percpu_usage")).size(); final long cpuContainerKernelTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("usage_in_kernelmode")).longValue(); final long cpuContainerTotalTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("total_usage")).longValue(); final long cpuSystemTotalTime = ((Number) stats.getCpuStats().get("system_cpu_usage")).longValue(); final long memoryTotalBytes = ((Number) stats.getMemoryStats().get("limit")).longValue(); final long memoryTotalBytesUsage = ((Number) stats.getMemoryStats().get("usage")).longValue(); final long memoryTotalBytesCache = ((Number) ((Map) stats.getMemoryStats().get("stats")).get("cache")).longValue(); final long diskTotalBytes = (long) (nodeSpec.minDiskAvailableGb * BYTES_IN_GB); final Optional<Long> diskTotalBytesUsed = storageMaintainer.getDiskUsageFor(containerName); lastCpuMetric.updateCpuDeltas(cpuSystemTotalTime, cpuContainerTotalTime, cpuContainerKernelTime); final double allocatedCpuRatio = nodeSpec.minCpuCores / totalNumCpuCores; double cpuUsageRatioOfAllocated = lastCpuMetric.getCpuUsageRatio() / allocatedCpuRatio; double cpuKernelUsageRatioOfAllocated = lastCpuMetric.getCpuKernelUsageRatio() / allocatedCpuRatio; long memoryTotalBytesUsed = memoryTotalBytesUsage - memoryTotalBytesCache; double memoryUsageRatio = (double) memoryTotalBytesUsed / memoryTotalBytes; Optional<Double> diskUsageRatio = diskTotalBytesUsed.map(used -> (double) used / diskTotalBytes); List<DimensionMetrics> metrics = new ArrayList<>(); DimensionMetrics.Builder systemMetricsBuilder = new DimensionMetrics.Builder(APP, dimensions) .withMetric("mem.limit", memoryTotalBytes) .withMetric("mem.used", memoryTotalBytesUsed) .withMetric("mem.util", 100 * memoryUsageRatio) .withMetric("cpu.util", 100 * cpuUsageRatioOfAllocated) .withMetric("cpu.sys.util", 100 * cpuKernelUsageRatioOfAllocated) .withMetric("disk.limit", diskTotalBytes); diskTotalBytesUsed.ifPresent(diskUsed -> systemMetricsBuilder.withMetric("disk.used", diskUsed)); diskUsageRatio.ifPresent(diskRatio -> systemMetricsBuilder.withMetric("disk.util", 100 * diskRatio)); metrics.add(systemMetricsBuilder.build()); stats.getNetworks().forEach((interfaceName, interfaceStats) -> { Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build(); Map<String, Number> infStats = (Map<String, Number>) interfaceStats; DimensionMetrics networkMetrics = new DimensionMetrics.Builder(APP, netDims) .withMetric("net.in.bytes", infStats.get("rx_bytes").longValue()) .withMetric("net.in.errors", infStats.get("rx_errors").longValue()) .withMetric("net.in.dropped", infStats.get("rx_dropped").longValue()) .withMetric("net.out.bytes", infStats.get("tx_bytes").longValue()) .withMetric("net.out.errors", infStats.get("tx_errors").longValue()) .withMetric("net.out.dropped", infStats.get("tx_dropped").longValue()) .build(); metrics.add(networkMetrics); }); pushMetricsToContainer(metrics); } private void pushMetricsToContainer(List<DimensionMetrics> metrics) { StringBuilder params = new StringBuilder(); try { for (DimensionMetrics dimensionMetrics : metrics) { params.append(dimensionMetrics.toSecretAgentReport()); } String wrappedMetrics = "s:" + params.toString(); String[] command = {"vespa-rpc-invoke", "-t", "2", "tcp/localhost:19091", "setExtraMetrics", wrappedMetrics}; dockerOperations.executeCommandInContainerAsRoot(containerName, 5L, command); } catch (DockerExecTimeoutException | JsonProcessingException e) { logger.warning("Unable to push metrics to container: " + containerName, e); } } private Optional<Container> getContainer() { if (containerState == ABSENT) return Optional.empty(); Optional<Container> container = dockerOperations.getContainer(containerName); if (! container.isPresent()) containerState = ABSENT; return container; } @Override public String getHostname() { return hostname; } @Override public boolean isDownloadingImage() { return imageBeingDownloaded != null; } @Override public int getAndResetNumberOfUnhandledExceptions() { int temp = numberOfUnhandledException; numberOfUnhandledException = 0; return temp; } class CpuUsageReporter { private long containerKernelUsage = 0; private long totalContainerUsage = 0; private long totalSystemUsage = 0; private long deltaContainerKernelUsage; private long deltaContainerUsage; private long deltaSystemUsage; private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) { deltaSystemUsage = this.totalSystemUsage == 0 ? 0 : (totalSystemUsage - this.totalSystemUsage); deltaContainerUsage = totalContainerUsage - this.totalContainerUsage; deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage; this.totalSystemUsage = totalSystemUsage; this.totalContainerUsage = totalContainerUsage; this.containerKernelUsage = containerKernelUsage; } /** * Returns the CPU usage ratio for the docker container that this NodeAgent is managing * in the time between the last two times updateCpuDeltas() was called. This is calculated * by dividing the CPU time used by the container with the CPU time used by the entire system. */ double getCpuUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerUsage / deltaSystemUsage; } double getCpuKernelUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerKernelUsage / deltaSystemUsage; } } private void orchestratorSuspendNode() { logger.info("Ask Orchestrator for permission to suspend node " + hostname); orchestrator.suspend(hostname); } }
class NodeAgentImpl implements NodeAgent { private static final long BYTES_IN_GB = 1_000_000_000L; private final AtomicBoolean terminated = new AtomicBoolean(false); private boolean isFrozen = true; private boolean wantFrozen = false; private boolean workToDoNow = true; private boolean expectNodeNotInNodeRepo = false; private final Object monitor = new Object(); private final PrefixLogger logger; private DockerImage imageBeingDownloaded = null; private final ContainerName containerName; private final String hostname; private final NodeRepository nodeRepository; private final Orchestrator orchestrator; private final DockerOperations dockerOperations; private final StorageMaintainer storageMaintainer; private final Runnable aclMaintainer; private final Environment environment; private final Clock clock; private final Duration timeBetweenEachConverge; private final SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); private final LinkedList<String> debugMessages = new LinkedList<>(); private int numberOfUnhandledException = 0; private Instant lastConverge; private final Thread loopThread; private final ScheduledExecutorService filebeatRestarter = Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("filebeatrestarter")); private Consumer<String> serviceRestarter; private Future<?> currentFilebeatRestarter; private boolean resumeScriptRun = false; /** * ABSENT means container is definitely absent - A container that was absent will not suddenly appear without * NodeAgent explicitly starting it. * Otherwise we can't be certain. A container that was running a minute ago may no longer be running without * NodeAgent doing anything (container could have crashed). Therefore we always have to ask docker daemon * to get updated state of the container. */ enum ContainerState { ABSENT, UNKNOWN } private ContainerState containerState = UNKNOWN; private NodeAttributes lastAttributesSet = null; private ContainerNodeSpec lastNodeSpec = null; private CpuUsageReporter lastCpuMetric = new CpuUsageReporter(); public NodeAgentImpl( final String hostName, final NodeRepository nodeRepository, final Orchestrator orchestrator, final DockerOperations dockerOperations, final StorageMaintainer storageMaintainer, final Runnable aclMaintainer, final Environment environment, final Clock clock, final Duration timeBetweenEachConverge) { this.containerName = ContainerName.fromHostname(hostName); this.logger = PrefixLogger.getNodeAgentLogger(NodeAgentImpl.class, containerName); this.hostname = hostName; this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.dockerOperations = dockerOperations; this.storageMaintainer = storageMaintainer; this.aclMaintainer = aclMaintainer; this.environment = environment; this.clock = clock; this.timeBetweenEachConverge = timeBetweenEachConverge; this.lastConverge = clock.instant(); this.loopThread = new Thread(() -> { while (!terminated.get()) tick(); }); this.loopThread.setName("tick-" + hostname); } @Override public boolean setFrozen(boolean frozen) { synchronized (monitor) { if (wantFrozen != frozen) { wantFrozen = frozen; addDebugMessage(wantFrozen ? "Freezing" : "Unfreezing"); signalWorkToBeDone(); } return isFrozen == frozen; } } private void addDebugMessage(String message) { synchronized (debugMessages) { while (debugMessages.size() > 1000) { debugMessages.pop(); } logger.debug(message); debugMessages.add("[" + sdf.format(new Date()) + "] " + message); } } @Override public Map<String, Object> debugInfo() { Map<String, Object> debug = new LinkedHashMap<>(); debug.put("Hostname", hostname); debug.put("isFrozen", isFrozen); debug.put("wantFrozen", wantFrozen); debug.put("terminated", terminated); debug.put("workToDoNow", workToDoNow); synchronized (debugMessages) { debug.put("History", new LinkedList<>(debugMessages)); } debug.put("Node repo state", lastNodeSpec.nodeState.name()); return debug; } @Override public void start() { String message = "Starting with interval " + timeBetweenEachConverge.toMillis() + " ms"; logger.info(message); addDebugMessage(message); loopThread.start(); serviceRestarter = service -> { try { ProcessResult processResult = dockerOperations.executeCommandInContainerAsRoot( containerName, "service", service, "restart"); if (!processResult.isSuccess()) { logger.error("Failed to restart service " + service + ": " + processResult); } } catch (Exception e) { logger.error("Failed to restart service " + service, e); } }; } @Override public void stop() { addDebugMessage("Stopping"); filebeatRestarter.shutdown(); if (!terminated.compareAndSet(false, true)) { throw new RuntimeException("Can not re-stop a node agent."); } signalWorkToBeDone(); do { try { loopThread.join(); filebeatRestarter.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e) { logger.error("Interrupted while waiting for converge thread and filebeatRestarter scheduler to shutdown"); } } while (loopThread.isAlive() || !filebeatRestarter.isTerminated()); logger.info("Stopped"); } private void runLocalResumeScriptIfNeeded() { if (! resumeScriptRun) { addDebugMessage("Starting optional node program resume command"); dockerOperations.resumeNode(containerName); resumeScriptRun = true; } } private void updateNodeRepoWithCurrentAttributes(final ContainerNodeSpec nodeSpec) { final NodeAttributes nodeAttributes = new NodeAttributes() .withRestartGeneration(nodeSpec.wantedRestartGeneration.orElse(null)) .withRebootGeneration(nodeSpec.wantedRebootGeneration.orElse(0L)) .withDockerImage(nodeSpec.wantedDockerImage.filter(node -> containerState != ABSENT).orElse(new DockerImage(""))) .withVespaVersion(nodeSpec.wantedVespaVersion.filter(node -> containerState != ABSENT).orElse("")); publishStateToNodeRepoIfChanged(nodeAttributes); } private void publishStateToNodeRepoIfChanged(NodeAttributes currentAttributes) { if (!currentAttributes.equals(lastAttributesSet)) { logger.info("Publishing new set of attributes to node repo: " + lastAttributesSet + " -> " + currentAttributes); addDebugMessage("Publishing new set of attributes to node repo: {" + lastAttributesSet + "} -> {" + currentAttributes + "}"); nodeRepository.updateNodeAttributes(hostname, currentAttributes); lastAttributesSet = currentAttributes; } } private void startContainer(ContainerNodeSpec nodeSpec) { aclMaintainer.run(); dockerOperations.startContainer(containerName, nodeSpec); lastCpuMetric = new CpuUsageReporter(); currentFilebeatRestarter = filebeatRestarter.scheduleWithFixedDelay(() -> serviceRestarter.accept("filebeat"), 1, 1, TimeUnit.DAYS); storageMaintainer.writeMetricsConfig(containerName, nodeSpec); storageMaintainer.writeFilebeatConfig(containerName, nodeSpec); resumeScriptRun = false; containerState = UNKNOWN; logger.info("Container successfully started, new containerState is " + containerState); } private Optional<Container> removeContainerIfNeededUpdateContainerState(ContainerNodeSpec nodeSpec, Optional<Container> existingContainer) { return existingContainer .flatMap(container -> removeContainerIfNeeded(nodeSpec, container)) .map(container -> { shouldRestartServices(nodeSpec).ifPresent(restartReason -> { logger.info("Will restart services for container " + container + ": " + restartReason); restartServices(nodeSpec, container); }); return container; }); } private Optional<String> shouldRestartServices(ContainerNodeSpec nodeSpec) { if (!nodeSpec.wantedRestartGeneration.isPresent()) return Optional.empty(); if (!nodeSpec.currentRestartGeneration.isPresent() || nodeSpec.currentRestartGeneration.get() < nodeSpec.wantedRestartGeneration.get()) { return Optional.of("Restart requested - wanted restart generation has been bumped: " + nodeSpec.currentRestartGeneration.get() + " -> " + nodeSpec.wantedRestartGeneration.get()); } return Optional.empty(); } private void restartServices(ContainerNodeSpec nodeSpec, Container existingContainer) { if (existingContainer.state.isRunning() && nodeSpec.nodeState == Node.State.active) { ContainerName containerName = existingContainer.name; logger.info("Restarting services for " + containerName); orchestratorSuspendNode(); dockerOperations.restartVespaOnNode(containerName); } } @Override public void stopServices() { logger.info("Stopping services for " + containerName); dockerOperations.trySuspendNode(containerName); dockerOperations.stopServicesOnNode(containerName); } private Optional<String> shouldRemoveContainer(ContainerNodeSpec nodeSpec, Container existingContainer) { final Node.State nodeState = nodeSpec.nodeState; if (nodeState == Node.State.dirty || nodeState == Node.State.provisioned) { return Optional.of("Node in state " + nodeState + ", container should no longer be running"); } if (nodeSpec.wantedDockerImage.isPresent() && !nodeSpec.wantedDockerImage.get().equals(existingContainer.image)) { return Optional.of("The node is supposed to run a new Docker image: " + existingContainer + " -> " + nodeSpec.wantedDockerImage.get()); } if (!existingContainer.state.isRunning()) { return Optional.of("Container no longer running"); } ContainerResources wantedContainerResources = ContainerResources.from( nodeSpec.minCpuCores, nodeSpec.minMainMemoryAvailableGb); if (!wantedContainerResources.equals(existingContainer.resources)) { return Optional.of("Container should be running with different resource allocation, wanted: " + wantedContainerResources + ", actual: " + existingContainer.resources); } return Optional.empty(); } private Optional<Container> removeContainerIfNeeded(ContainerNodeSpec nodeSpec, Container existingContainer) { Optional<String> removeReason = shouldRemoveContainer(nodeSpec, existingContainer); if (removeReason.isPresent()) { logger.info("Will remove container " + existingContainer + ": " + removeReason.get()); if (existingContainer.state.isRunning()) { if (nodeSpec.nodeState == Node.State.active) { orchestratorSuspendNode(); } try { stopServices(); } catch (Exception e) { logger.info("Failed stopping services, ignoring", e); } } if (currentFilebeatRestarter != null) currentFilebeatRestarter.cancel(true); dockerOperations.removeContainer(existingContainer, nodeSpec); containerState = ABSENT; logger.info("Container successfully removed, new containerState is " + containerState); return Optional.empty(); } return Optional.of(existingContainer); } private void scheduleDownLoadIfNeeded(ContainerNodeSpec nodeSpec) { if (nodeSpec.currentDockerImage.equals(nodeSpec.wantedDockerImage)) return; if (dockerOperations.pullImageAsyncIfNeeded(nodeSpec.wantedDockerImage.get())) { imageBeingDownloaded = nodeSpec.wantedDockerImage.get(); } else if (imageBeingDownloaded != null) { imageBeingDownloaded = null; } } private void signalWorkToBeDone() { synchronized (monitor) { if (!workToDoNow) { workToDoNow = true; addDebugMessage("Signaling work to be done"); monitor.notifyAll(); } } } void tick() { boolean isFrozenCopy; synchronized (monitor) { while (!workToDoNow) { long remainder = timeBetweenEachConverge .minus(Duration.between(lastConverge, clock.instant())) .toMillis(); if (remainder > 0) { try { monitor.wait(remainder); } catch (InterruptedException e) { logger.error("Interrupted, but ignoring this: " + hostname); } } else break; } lastConverge = clock.instant(); workToDoNow = false; if (isFrozen != wantFrozen) { isFrozen = wantFrozen; logger.info("Updated NodeAgent's frozen state, new value: isFrozen: " + isFrozen); } isFrozenCopy = isFrozen; } if (isFrozenCopy) { addDebugMessage("tick: isFrozen"); } else { try { converge(); } catch (OrchestratorException e) { logger.info(e.getMessage()); addDebugMessage(e.getMessage()); } catch (DockerException e) { numberOfUnhandledException++; containerState = UNKNOWN; logger.error("Caught a DockerException, resetting containerState to " + containerState, e); } catch (Exception e) { numberOfUnhandledException++; logger.error("Unhandled exception, ignoring.", e); addDebugMessage(e.getMessage()); } catch (Throwable t) { logger.error("Unhandled throwable, taking down system.", t); System.exit(234); } } } @SuppressWarnings("unchecked") public void updateContainerNodeMetrics() { final ContainerNodeSpec nodeSpec = lastNodeSpec; if (nodeSpec == null || containerState == ABSENT) return; Optional<Docker.ContainerStats> containerStats = dockerOperations.getContainerStats(containerName); if (!containerStats.isPresent()) return; Dimensions.Builder dimensionsBuilder = new Dimensions.Builder() .add("host", hostname) .add("role", "tenants") .add("state", nodeSpec.nodeState.toString()) .add("parentHostname", environment.getParentHostHostname()); Dimensions dimensions = dimensionsBuilder.build(); Docker.ContainerStats stats = containerStats.get(); final String APP = MetricReceiverWrapper.APPLICATION_NODE; final int totalNumCpuCores = ((List<Number>) ((Map) stats.getCpuStats().get("cpu_usage")).get("percpu_usage")).size(); final long cpuContainerKernelTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("usage_in_kernelmode")).longValue(); final long cpuContainerTotalTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("total_usage")).longValue(); final long cpuSystemTotalTime = ((Number) stats.getCpuStats().get("system_cpu_usage")).longValue(); final long memoryTotalBytes = ((Number) stats.getMemoryStats().get("limit")).longValue(); final long memoryTotalBytesUsage = ((Number) stats.getMemoryStats().get("usage")).longValue(); final long memoryTotalBytesCache = ((Number) ((Map) stats.getMemoryStats().get("stats")).get("cache")).longValue(); final long diskTotalBytes = (long) (nodeSpec.minDiskAvailableGb * BYTES_IN_GB); final Optional<Long> diskTotalBytesUsed = storageMaintainer.getDiskUsageFor(containerName); lastCpuMetric.updateCpuDeltas(cpuSystemTotalTime, cpuContainerTotalTime, cpuContainerKernelTime); final double allocatedCpuRatio = nodeSpec.minCpuCores / totalNumCpuCores; double cpuUsageRatioOfAllocated = lastCpuMetric.getCpuUsageRatio() / allocatedCpuRatio; double cpuKernelUsageRatioOfAllocated = lastCpuMetric.getCpuKernelUsageRatio() / allocatedCpuRatio; long memoryTotalBytesUsed = memoryTotalBytesUsage - memoryTotalBytesCache; double memoryUsageRatio = (double) memoryTotalBytesUsed / memoryTotalBytes; Optional<Double> diskUsageRatio = diskTotalBytesUsed.map(used -> (double) used / diskTotalBytes); List<DimensionMetrics> metrics = new ArrayList<>(); DimensionMetrics.Builder systemMetricsBuilder = new DimensionMetrics.Builder(APP, dimensions) .withMetric("mem.limit", memoryTotalBytes) .withMetric("mem.used", memoryTotalBytesUsed) .withMetric("mem.util", 100 * memoryUsageRatio) .withMetric("cpu.util", 100 * cpuUsageRatioOfAllocated) .withMetric("cpu.sys.util", 100 * cpuKernelUsageRatioOfAllocated) .withMetric("disk.limit", diskTotalBytes); diskTotalBytesUsed.ifPresent(diskUsed -> systemMetricsBuilder.withMetric("disk.used", diskUsed)); diskUsageRatio.ifPresent(diskRatio -> systemMetricsBuilder.withMetric("disk.util", 100 * diskRatio)); metrics.add(systemMetricsBuilder.build()); stats.getNetworks().forEach((interfaceName, interfaceStats) -> { Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build(); Map<String, Number> infStats = (Map<String, Number>) interfaceStats; DimensionMetrics networkMetrics = new DimensionMetrics.Builder(APP, netDims) .withMetric("net.in.bytes", infStats.get("rx_bytes").longValue()) .withMetric("net.in.errors", infStats.get("rx_errors").longValue()) .withMetric("net.in.dropped", infStats.get("rx_dropped").longValue()) .withMetric("net.out.bytes", infStats.get("tx_bytes").longValue()) .withMetric("net.out.errors", infStats.get("tx_errors").longValue()) .withMetric("net.out.dropped", infStats.get("tx_dropped").longValue()) .build(); metrics.add(networkMetrics); }); pushMetricsToContainer(metrics); } private void pushMetricsToContainer(List<DimensionMetrics> metrics) { StringBuilder params = new StringBuilder(); try { for (DimensionMetrics dimensionMetrics : metrics) { params.append(dimensionMetrics.toSecretAgentReport()); } String wrappedMetrics = "s:" + params.toString(); String[] command = {"vespa-rpc-invoke", "-t", "2", "tcp/localhost:19091", "setExtraMetrics", wrappedMetrics}; dockerOperations.executeCommandInContainerAsRoot(containerName, 5L, command); } catch (DockerExecTimeoutException | JsonProcessingException e) { logger.warning("Unable to push metrics to container: " + containerName, e); } } private Optional<Container> getContainer() { if (containerState == ABSENT) return Optional.empty(); Optional<Container> container = dockerOperations.getContainer(containerName); if (! container.isPresent()) containerState = ABSENT; return container; } @Override public String getHostname() { return hostname; } @Override public boolean isDownloadingImage() { return imageBeingDownloaded != null; } @Override public int getAndResetNumberOfUnhandledExceptions() { int temp = numberOfUnhandledException; numberOfUnhandledException = 0; return temp; } class CpuUsageReporter { private long containerKernelUsage = 0; private long totalContainerUsage = 0; private long totalSystemUsage = 0; private long deltaContainerKernelUsage; private long deltaContainerUsage; private long deltaSystemUsage; private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) { deltaSystemUsage = this.totalSystemUsage == 0 ? 0 : (totalSystemUsage - this.totalSystemUsage); deltaContainerUsage = totalContainerUsage - this.totalContainerUsage; deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage; this.totalSystemUsage = totalSystemUsage; this.totalContainerUsage = totalContainerUsage; this.containerKernelUsage = containerKernelUsage; } /** * Returns the CPU usage ratio for the docker container that this NodeAgent is managing * in the time between the last two times updateCpuDeltas() was called. This is calculated * by dividing the CPU time used by the container with the CPU time used by the entire system. */ double getCpuUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerUsage / deltaSystemUsage; } double getCpuKernelUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerKernelUsage / deltaSystemUsage; } } private void orchestratorSuspendNode() { logger.info("Ask Orchestrator for permission to suspend node " + hostname); orchestrator.suspend(hostname); } }
is there a time window where node is in ready before it's removed?
void converge() { final Optional<ContainerNodeSpec> nodeSpecOptional = nodeRepository.getContainerNodeSpec(hostname); if (!nodeSpecOptional.isPresent() && expectNodeNotInNodeRepo) return; final ContainerNodeSpec nodeSpec = nodeSpecOptional.orElseThrow(() -> new IllegalStateException(String.format("Node '%s' missing from node repository.", hostname))); expectNodeNotInNodeRepo = false; Optional<Container> container = getContainer(); if (!nodeSpec.equals(lastNodeSpec)) { addDebugMessage("Loading new node spec: " + nodeSpec.toString()); lastNodeSpec = nodeSpec; if (container.isPresent()) { storageMaintainer.writeMetricsConfig(containerName, nodeSpec); } } switch (nodeSpec.nodeState) { case ready: case reserved: case parked: case failed: removeContainerIfNeededUpdateContainerState(nodeSpec, container); updateNodeRepoWithCurrentAttributes(nodeSpec); break; case active: storageMaintainer.handleCoreDumpsForContainer(containerName, nodeSpec, false); storageMaintainer.getDiskUsageFor(containerName) .map(diskUsage -> (double) diskUsage / BYTES_IN_GB / nodeSpec.minDiskAvailableGb) .filter(diskUtil -> diskUtil >= 0.8) .ifPresent(diskUtil -> storageMaintainer.removeOldFilesFromNode(containerName)); scheduleDownLoadIfNeeded(nodeSpec); if (isDownloadingImage()) { addDebugMessage("Waiting for image to download " + imageBeingDownloaded.asString()); return; } container = removeContainerIfNeededUpdateContainerState(nodeSpec, container); if (! container.isPresent()) { storageMaintainer.handleCoreDumpsForContainer(containerName, nodeSpec, false); startContainer(nodeSpec); } runLocalResumeScriptIfNeeded(); updateNodeRepoWithCurrentAttributes(nodeSpec); logger.info("Call resume against Orchestrator"); orchestrator.resume(hostname); break; case inactive: removeContainerIfNeededUpdateContainerState(nodeSpec, container); updateNodeRepoWithCurrentAttributes(nodeSpec); break; case provisioned: nodeRepository.markAsDirty(hostname); break; case dirty: removeContainerIfNeededUpdateContainerState(nodeSpec, container); logger.info("State is " + nodeSpec.nodeState + ", will delete application storage and mark node as ready"); storageMaintainer.cleanupNodeStorage(containerName, nodeSpec); updateNodeRepoWithCurrentAttributes(nodeSpec); nodeRepository.markNodeAvailableForNewAllocation(hostname); expectNodeNotInNodeRepo = true; break; default: throw new RuntimeException("UNKNOWN STATE " + nodeSpec.nodeState.name()); } }
expectNodeNotInNodeRepo = true;
void converge() { final Optional<ContainerNodeSpec> nodeSpecOptional = nodeRepository.getContainerNodeSpec(hostname); if (!nodeSpecOptional.isPresent() && expectNodeNotInNodeRepo) return; final ContainerNodeSpec nodeSpec = nodeSpecOptional.orElseThrow(() -> new IllegalStateException(String.format("Node '%s' missing from node repository.", hostname))); expectNodeNotInNodeRepo = false; Optional<Container> container = getContainer(); if (!nodeSpec.equals(lastNodeSpec)) { addDebugMessage("Loading new node spec: " + nodeSpec.toString()); lastNodeSpec = nodeSpec; if (container.isPresent()) { storageMaintainer.writeMetricsConfig(containerName, nodeSpec); } } switch (nodeSpec.nodeState) { case ready: case reserved: case parked: case failed: removeContainerIfNeededUpdateContainerState(nodeSpec, container); updateNodeRepoWithCurrentAttributes(nodeSpec); break; case active: storageMaintainer.handleCoreDumpsForContainer(containerName, nodeSpec, false); storageMaintainer.getDiskUsageFor(containerName) .map(diskUsage -> (double) diskUsage / BYTES_IN_GB / nodeSpec.minDiskAvailableGb) .filter(diskUtil -> diskUtil >= 0.8) .ifPresent(diskUtil -> storageMaintainer.removeOldFilesFromNode(containerName)); scheduleDownLoadIfNeeded(nodeSpec); if (isDownloadingImage()) { addDebugMessage("Waiting for image to download " + imageBeingDownloaded.asString()); return; } container = removeContainerIfNeededUpdateContainerState(nodeSpec, container); if (! container.isPresent()) { storageMaintainer.handleCoreDumpsForContainer(containerName, nodeSpec, false); startContainer(nodeSpec); } runLocalResumeScriptIfNeeded(); updateNodeRepoWithCurrentAttributes(nodeSpec); logger.info("Call resume against Orchestrator"); orchestrator.resume(hostname); break; case inactive: removeContainerIfNeededUpdateContainerState(nodeSpec, container); updateNodeRepoWithCurrentAttributes(nodeSpec); break; case provisioned: nodeRepository.markAsDirty(hostname); break; case dirty: removeContainerIfNeededUpdateContainerState(nodeSpec, container); logger.info("State is " + nodeSpec.nodeState + ", will delete application storage and mark node as ready"); storageMaintainer.cleanupNodeStorage(containerName, nodeSpec); updateNodeRepoWithCurrentAttributes(nodeSpec); nodeRepository.markNodeAvailableForNewAllocation(hostname); expectNodeNotInNodeRepo = true; break; default: throw new RuntimeException("UNKNOWN STATE " + nodeSpec.nodeState.name()); } }
class NodeAgentImpl implements NodeAgent { private static final long BYTES_IN_GB = 1_000_000_000L; private final AtomicBoolean terminated = new AtomicBoolean(false); private boolean isFrozen = true; private boolean wantFrozen = false; private boolean workToDoNow = true; private boolean expectNodeNotInNodeRepo = false; private final Object monitor = new Object(); private final PrefixLogger logger; private DockerImage imageBeingDownloaded = null; private final ContainerName containerName; private final String hostname; private final NodeRepository nodeRepository; private final Orchestrator orchestrator; private final DockerOperations dockerOperations; private final StorageMaintainer storageMaintainer; private final Runnable aclMaintainer; private final Environment environment; private final Clock clock; private final Duration timeBetweenEachConverge; private final SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); private final LinkedList<String> debugMessages = new LinkedList<>(); private int numberOfUnhandledException = 0; private Instant lastConverge; private final Thread loopThread; private final ScheduledExecutorService filebeatRestarter = Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("filebeatrestarter")); private Consumer<String> serviceRestarter; private Future<?> currentFilebeatRestarter; private boolean resumeScriptRun = false; /** * ABSENT means container is definitely absent - A container that was absent will not suddenly appear without * NodeAgent explicitly starting it. * Otherwise we can't be certain. A container that was running a minute ago may no longer be running without * NodeAgent doing anything (container could have crashed). Therefore we always have to ask docker daemon * to get updated state of the container. */ enum ContainerState { ABSENT, UNKNOWN } private ContainerState containerState = UNKNOWN; private NodeAttributes lastAttributesSet = null; private ContainerNodeSpec lastNodeSpec = null; private CpuUsageReporter lastCpuMetric = new CpuUsageReporter(); public NodeAgentImpl( final String hostName, final NodeRepository nodeRepository, final Orchestrator orchestrator, final DockerOperations dockerOperations, final StorageMaintainer storageMaintainer, final Runnable aclMaintainer, final Environment environment, final Clock clock, final Duration timeBetweenEachConverge) { this.containerName = ContainerName.fromHostname(hostName); this.logger = PrefixLogger.getNodeAgentLogger(NodeAgentImpl.class, containerName); this.hostname = hostName; this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.dockerOperations = dockerOperations; this.storageMaintainer = storageMaintainer; this.aclMaintainer = aclMaintainer; this.environment = environment; this.clock = clock; this.timeBetweenEachConverge = timeBetweenEachConverge; this.lastConverge = clock.instant(); this.loopThread = new Thread(() -> { while (!terminated.get()) tick(); }); this.loopThread.setName("tick-" + hostname); } @Override public boolean setFrozen(boolean frozen) { synchronized (monitor) { if (wantFrozen != frozen) { wantFrozen = frozen; addDebugMessage(wantFrozen ? "Freezing" : "Unfreezing"); signalWorkToBeDone(); } return isFrozen == frozen; } } private void addDebugMessage(String message) { synchronized (debugMessages) { while (debugMessages.size() > 1000) { debugMessages.pop(); } logger.debug(message); debugMessages.add("[" + sdf.format(new Date()) + "] " + message); } } @Override public Map<String, Object> debugInfo() { Map<String, Object> debug = new LinkedHashMap<>(); debug.put("Hostname", hostname); debug.put("isFrozen", isFrozen); debug.put("wantFrozen", wantFrozen); debug.put("terminated", terminated); debug.put("workToDoNow", workToDoNow); synchronized (debugMessages) { debug.put("History", new LinkedList<>(debugMessages)); } debug.put("Node repo state", lastNodeSpec.nodeState.name()); return debug; } @Override public void start() { String message = "Starting with interval " + timeBetweenEachConverge.toMillis() + " ms"; logger.info(message); addDebugMessage(message); loopThread.start(); serviceRestarter = service -> { try { ProcessResult processResult = dockerOperations.executeCommandInContainerAsRoot( containerName, "service", service, "restart"); if (!processResult.isSuccess()) { logger.error("Failed to restart service " + service + ": " + processResult); } } catch (Exception e) { logger.error("Failed to restart service " + service, e); } }; } @Override public void stop() { addDebugMessage("Stopping"); filebeatRestarter.shutdown(); if (!terminated.compareAndSet(false, true)) { throw new RuntimeException("Can not re-stop a node agent."); } signalWorkToBeDone(); do { try { loopThread.join(); filebeatRestarter.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e) { logger.error("Interrupted while waiting for converge thread and filebeatRestarter scheduler to shutdown"); } } while (loopThread.isAlive() || !filebeatRestarter.isTerminated()); logger.info("Stopped"); } private void runLocalResumeScriptIfNeeded() { if (! resumeScriptRun) { addDebugMessage("Starting optional node program resume command"); dockerOperations.resumeNode(containerName); resumeScriptRun = true; } } private void updateNodeRepoWithCurrentAttributes(final ContainerNodeSpec nodeSpec) { final NodeAttributes nodeAttributes = new NodeAttributes() .withRestartGeneration(nodeSpec.wantedRestartGeneration.orElse(null)) .withRebootGeneration(nodeSpec.wantedRebootGeneration.orElse(0L)) .withDockerImage(nodeSpec.wantedDockerImage.filter(node -> containerState != ABSENT).orElse(new DockerImage(""))) .withVespaVersion(nodeSpec.wantedVespaVersion.filter(node -> containerState != ABSENT).orElse("")); publishStateToNodeRepoIfChanged(nodeAttributes); } private void publishStateToNodeRepoIfChanged(NodeAttributes currentAttributes) { if (!currentAttributes.equals(lastAttributesSet)) { logger.info("Publishing new set of attributes to node repo: " + lastAttributesSet + " -> " + currentAttributes); addDebugMessage("Publishing new set of attributes to node repo: {" + lastAttributesSet + "} -> {" + currentAttributes + "}"); nodeRepository.updateNodeAttributes(hostname, currentAttributes); lastAttributesSet = currentAttributes; } } private void startContainer(ContainerNodeSpec nodeSpec) { aclMaintainer.run(); dockerOperations.startContainer(containerName, nodeSpec); lastCpuMetric = new CpuUsageReporter(); currentFilebeatRestarter = filebeatRestarter.scheduleWithFixedDelay(() -> serviceRestarter.accept("filebeat"), 1, 1, TimeUnit.DAYS); storageMaintainer.writeMetricsConfig(containerName, nodeSpec); storageMaintainer.writeFilebeatConfig(containerName, nodeSpec); resumeScriptRun = false; containerState = UNKNOWN; logger.info("Container successfully started, new containerState is " + containerState); } private Optional<Container> removeContainerIfNeededUpdateContainerState(ContainerNodeSpec nodeSpec, Optional<Container> existingContainer) { return existingContainer .flatMap(container -> removeContainerIfNeeded(nodeSpec, container)) .map(container -> { shouldRestartServices(nodeSpec).ifPresent(restartReason -> { logger.info("Will restart services for container " + container + ": " + restartReason); restartServices(nodeSpec, container); }); return container; }); } private Optional<String> shouldRestartServices(ContainerNodeSpec nodeSpec) { if (!nodeSpec.wantedRestartGeneration.isPresent()) return Optional.empty(); if (!nodeSpec.currentRestartGeneration.isPresent() || nodeSpec.currentRestartGeneration.get() < nodeSpec.wantedRestartGeneration.get()) { return Optional.of("Restart requested - wanted restart generation has been bumped: " + nodeSpec.currentRestartGeneration.get() + " -> " + nodeSpec.wantedRestartGeneration.get()); } return Optional.empty(); } private void restartServices(ContainerNodeSpec nodeSpec, Container existingContainer) { if (existingContainer.state.isRunning() && nodeSpec.nodeState == Node.State.active) { ContainerName containerName = existingContainer.name; logger.info("Restarting services for " + containerName); orchestratorSuspendNode(); dockerOperations.restartVespaOnNode(containerName); } } @Override public void stopServices() { logger.info("Stopping services for " + containerName); dockerOperations.trySuspendNode(containerName); dockerOperations.stopServicesOnNode(containerName); } private Optional<String> shouldRemoveContainer(ContainerNodeSpec nodeSpec, Container existingContainer) { final Node.State nodeState = nodeSpec.nodeState; if (nodeState == Node.State.dirty || nodeState == Node.State.provisioned) { return Optional.of("Node in state " + nodeState + ", container should no longer be running"); } if (nodeSpec.wantedDockerImage.isPresent() && !nodeSpec.wantedDockerImage.get().equals(existingContainer.image)) { return Optional.of("The node is supposed to run a new Docker image: " + existingContainer + " -> " + nodeSpec.wantedDockerImage.get()); } if (!existingContainer.state.isRunning()) { return Optional.of("Container no longer running"); } ContainerResources wantedContainerResources = ContainerResources.from( nodeSpec.minCpuCores, nodeSpec.minMainMemoryAvailableGb); if (!wantedContainerResources.equals(existingContainer.resources)) { return Optional.of("Container should be running with different resource allocation, wanted: " + wantedContainerResources + ", actual: " + existingContainer.resources); } return Optional.empty(); } private Optional<Container> removeContainerIfNeeded(ContainerNodeSpec nodeSpec, Container existingContainer) { Optional<String> removeReason = shouldRemoveContainer(nodeSpec, existingContainer); if (removeReason.isPresent()) { logger.info("Will remove container " + existingContainer + ": " + removeReason.get()); if (existingContainer.state.isRunning()) { if (nodeSpec.nodeState == Node.State.active) { orchestratorSuspendNode(); } try { stopServices(); } catch (Exception e) { logger.info("Failed stopping services, ignoring", e); } } if (currentFilebeatRestarter != null) currentFilebeatRestarter.cancel(true); dockerOperations.removeContainer(existingContainer, nodeSpec); containerState = ABSENT; logger.info("Container successfully removed, new containerState is " + containerState); return Optional.empty(); } return Optional.of(existingContainer); } private void scheduleDownLoadIfNeeded(ContainerNodeSpec nodeSpec) { if (nodeSpec.currentDockerImage.equals(nodeSpec.wantedDockerImage)) return; if (dockerOperations.pullImageAsyncIfNeeded(nodeSpec.wantedDockerImage.get())) { imageBeingDownloaded = nodeSpec.wantedDockerImage.get(); } else if (imageBeingDownloaded != null) { imageBeingDownloaded = null; } } private void signalWorkToBeDone() { synchronized (monitor) { if (!workToDoNow) { workToDoNow = true; addDebugMessage("Signaling work to be done"); monitor.notifyAll(); } } } void tick() { boolean isFrozenCopy; synchronized (monitor) { while (!workToDoNow) { long remainder = timeBetweenEachConverge .minus(Duration.between(lastConverge, clock.instant())) .toMillis(); if (remainder > 0) { try { monitor.wait(remainder); } catch (InterruptedException e) { logger.error("Interrupted, but ignoring this: " + hostname); } } else break; } lastConverge = clock.instant(); workToDoNow = false; if (isFrozen != wantFrozen) { isFrozen = wantFrozen; logger.info("Updated NodeAgent's frozen state, new value: isFrozen: " + isFrozen); } isFrozenCopy = isFrozen; } if (isFrozenCopy) { addDebugMessage("tick: isFrozen"); } else { try { converge(); } catch (OrchestratorException e) { logger.info(e.getMessage()); addDebugMessage(e.getMessage()); } catch (DockerException e) { numberOfUnhandledException++; containerState = UNKNOWN; logger.error("Caught a DockerException, resetting containerState to " + containerState, e); } catch (Exception e) { numberOfUnhandledException++; logger.error("Unhandled exception, ignoring.", e); addDebugMessage(e.getMessage()); } catch (Throwable t) { logger.error("Unhandled throwable, taking down system.", t); System.exit(234); } } } @SuppressWarnings("unchecked") public void updateContainerNodeMetrics() { final ContainerNodeSpec nodeSpec = lastNodeSpec; if (nodeSpec == null || containerState == ABSENT) return; Optional<Docker.ContainerStats> containerStats = dockerOperations.getContainerStats(containerName); if (!containerStats.isPresent()) return; Dimensions.Builder dimensionsBuilder = new Dimensions.Builder() .add("host", hostname) .add("role", "tenants") .add("state", nodeSpec.nodeState.toString()) .add("parentHostname", environment.getParentHostHostname()); Dimensions dimensions = dimensionsBuilder.build(); Docker.ContainerStats stats = containerStats.get(); final String APP = MetricReceiverWrapper.APPLICATION_NODE; final int totalNumCpuCores = ((List<Number>) ((Map) stats.getCpuStats().get("cpu_usage")).get("percpu_usage")).size(); final long cpuContainerKernelTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("usage_in_kernelmode")).longValue(); final long cpuContainerTotalTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("total_usage")).longValue(); final long cpuSystemTotalTime = ((Number) stats.getCpuStats().get("system_cpu_usage")).longValue(); final long memoryTotalBytes = ((Number) stats.getMemoryStats().get("limit")).longValue(); final long memoryTotalBytesUsage = ((Number) stats.getMemoryStats().get("usage")).longValue(); final long memoryTotalBytesCache = ((Number) ((Map) stats.getMemoryStats().get("stats")).get("cache")).longValue(); final long diskTotalBytes = (long) (nodeSpec.minDiskAvailableGb * BYTES_IN_GB); final Optional<Long> diskTotalBytesUsed = storageMaintainer.getDiskUsageFor(containerName); lastCpuMetric.updateCpuDeltas(cpuSystemTotalTime, cpuContainerTotalTime, cpuContainerKernelTime); final double allocatedCpuRatio = nodeSpec.minCpuCores / totalNumCpuCores; double cpuUsageRatioOfAllocated = lastCpuMetric.getCpuUsageRatio() / allocatedCpuRatio; double cpuKernelUsageRatioOfAllocated = lastCpuMetric.getCpuKernelUsageRatio() / allocatedCpuRatio; long memoryTotalBytesUsed = memoryTotalBytesUsage - memoryTotalBytesCache; double memoryUsageRatio = (double) memoryTotalBytesUsed / memoryTotalBytes; Optional<Double> diskUsageRatio = diskTotalBytesUsed.map(used -> (double) used / diskTotalBytes); List<DimensionMetrics> metrics = new ArrayList<>(); DimensionMetrics.Builder systemMetricsBuilder = new DimensionMetrics.Builder(APP, dimensions) .withMetric("mem.limit", memoryTotalBytes) .withMetric("mem.used", memoryTotalBytesUsed) .withMetric("mem.util", 100 * memoryUsageRatio) .withMetric("cpu.util", 100 * cpuUsageRatioOfAllocated) .withMetric("cpu.sys.util", 100 * cpuKernelUsageRatioOfAllocated) .withMetric("disk.limit", diskTotalBytes); diskTotalBytesUsed.ifPresent(diskUsed -> systemMetricsBuilder.withMetric("disk.used", diskUsed)); diskUsageRatio.ifPresent(diskRatio -> systemMetricsBuilder.withMetric("disk.util", 100 * diskRatio)); metrics.add(systemMetricsBuilder.build()); stats.getNetworks().forEach((interfaceName, interfaceStats) -> { Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build(); Map<String, Number> infStats = (Map<String, Number>) interfaceStats; DimensionMetrics networkMetrics = new DimensionMetrics.Builder(APP, netDims) .withMetric("net.in.bytes", infStats.get("rx_bytes").longValue()) .withMetric("net.in.errors", infStats.get("rx_errors").longValue()) .withMetric("net.in.dropped", infStats.get("rx_dropped").longValue()) .withMetric("net.out.bytes", infStats.get("tx_bytes").longValue()) .withMetric("net.out.errors", infStats.get("tx_errors").longValue()) .withMetric("net.out.dropped", infStats.get("tx_dropped").longValue()) .build(); metrics.add(networkMetrics); }); pushMetricsToContainer(metrics); } private void pushMetricsToContainer(List<DimensionMetrics> metrics) { StringBuilder params = new StringBuilder(); try { for (DimensionMetrics dimensionMetrics : metrics) { params.append(dimensionMetrics.toSecretAgentReport()); } String wrappedMetrics = "s:" + params.toString(); String[] command = {"vespa-rpc-invoke", "-t", "2", "tcp/localhost:19091", "setExtraMetrics", wrappedMetrics}; dockerOperations.executeCommandInContainerAsRoot(containerName, 5L, command); } catch (DockerExecTimeoutException | JsonProcessingException e) { logger.warning("Unable to push metrics to container: " + containerName, e); } } private Optional<Container> getContainer() { if (containerState == ABSENT) return Optional.empty(); Optional<Container> container = dockerOperations.getContainer(containerName); if (! container.isPresent()) containerState = ABSENT; return container; } @Override public String getHostname() { return hostname; } @Override public boolean isDownloadingImage() { return imageBeingDownloaded != null; } @Override public int getAndResetNumberOfUnhandledExceptions() { int temp = numberOfUnhandledException; numberOfUnhandledException = 0; return temp; } class CpuUsageReporter { private long containerKernelUsage = 0; private long totalContainerUsage = 0; private long totalSystemUsage = 0; private long deltaContainerKernelUsage; private long deltaContainerUsage; private long deltaSystemUsage; private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) { deltaSystemUsage = this.totalSystemUsage == 0 ? 0 : (totalSystemUsage - this.totalSystemUsage); deltaContainerUsage = totalContainerUsage - this.totalContainerUsage; deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage; this.totalSystemUsage = totalSystemUsage; this.totalContainerUsage = totalContainerUsage; this.containerKernelUsage = containerKernelUsage; } /** * Returns the CPU usage ratio for the docker container that this NodeAgent is managing * in the time between the last two times updateCpuDeltas() was called. This is calculated * by dividing the CPU time used by the container with the CPU time used by the entire system. */ double getCpuUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerUsage / deltaSystemUsage; } double getCpuKernelUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerKernelUsage / deltaSystemUsage; } } private void orchestratorSuspendNode() { logger.info("Ask Orchestrator for permission to suspend node " + hostname); orchestrator.suspend(hostname); } }
class NodeAgentImpl implements NodeAgent { private static final long BYTES_IN_GB = 1_000_000_000L; private final AtomicBoolean terminated = new AtomicBoolean(false); private boolean isFrozen = true; private boolean wantFrozen = false; private boolean workToDoNow = true; private boolean expectNodeNotInNodeRepo = false; private final Object monitor = new Object(); private final PrefixLogger logger; private DockerImage imageBeingDownloaded = null; private final ContainerName containerName; private final String hostname; private final NodeRepository nodeRepository; private final Orchestrator orchestrator; private final DockerOperations dockerOperations; private final StorageMaintainer storageMaintainer; private final Runnable aclMaintainer; private final Environment environment; private final Clock clock; private final Duration timeBetweenEachConverge; private final SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); private final LinkedList<String> debugMessages = new LinkedList<>(); private int numberOfUnhandledException = 0; private Instant lastConverge; private final Thread loopThread; private final ScheduledExecutorService filebeatRestarter = Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("filebeatrestarter")); private Consumer<String> serviceRestarter; private Future<?> currentFilebeatRestarter; private boolean resumeScriptRun = false; /** * ABSENT means container is definitely absent - A container that was absent will not suddenly appear without * NodeAgent explicitly starting it. * Otherwise we can't be certain. A container that was running a minute ago may no longer be running without * NodeAgent doing anything (container could have crashed). Therefore we always have to ask docker daemon * to get updated state of the container. */ enum ContainerState { ABSENT, UNKNOWN } private ContainerState containerState = UNKNOWN; private NodeAttributes lastAttributesSet = null; private ContainerNodeSpec lastNodeSpec = null; private CpuUsageReporter lastCpuMetric = new CpuUsageReporter(); public NodeAgentImpl( final String hostName, final NodeRepository nodeRepository, final Orchestrator orchestrator, final DockerOperations dockerOperations, final StorageMaintainer storageMaintainer, final Runnable aclMaintainer, final Environment environment, final Clock clock, final Duration timeBetweenEachConverge) { this.containerName = ContainerName.fromHostname(hostName); this.logger = PrefixLogger.getNodeAgentLogger(NodeAgentImpl.class, containerName); this.hostname = hostName; this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.dockerOperations = dockerOperations; this.storageMaintainer = storageMaintainer; this.aclMaintainer = aclMaintainer; this.environment = environment; this.clock = clock; this.timeBetweenEachConverge = timeBetweenEachConverge; this.lastConverge = clock.instant(); this.loopThread = new Thread(() -> { while (!terminated.get()) tick(); }); this.loopThread.setName("tick-" + hostname); } @Override public boolean setFrozen(boolean frozen) { synchronized (monitor) { if (wantFrozen != frozen) { wantFrozen = frozen; addDebugMessage(wantFrozen ? "Freezing" : "Unfreezing"); signalWorkToBeDone(); } return isFrozen == frozen; } } private void addDebugMessage(String message) { synchronized (debugMessages) { while (debugMessages.size() > 1000) { debugMessages.pop(); } logger.debug(message); debugMessages.add("[" + sdf.format(new Date()) + "] " + message); } } @Override public Map<String, Object> debugInfo() { Map<String, Object> debug = new LinkedHashMap<>(); debug.put("Hostname", hostname); debug.put("isFrozen", isFrozen); debug.put("wantFrozen", wantFrozen); debug.put("terminated", terminated); debug.put("workToDoNow", workToDoNow); synchronized (debugMessages) { debug.put("History", new LinkedList<>(debugMessages)); } debug.put("Node repo state", lastNodeSpec.nodeState.name()); return debug; } @Override public void start() { String message = "Starting with interval " + timeBetweenEachConverge.toMillis() + " ms"; logger.info(message); addDebugMessage(message); loopThread.start(); serviceRestarter = service -> { try { ProcessResult processResult = dockerOperations.executeCommandInContainerAsRoot( containerName, "service", service, "restart"); if (!processResult.isSuccess()) { logger.error("Failed to restart service " + service + ": " + processResult); } } catch (Exception e) { logger.error("Failed to restart service " + service, e); } }; } @Override public void stop() { addDebugMessage("Stopping"); filebeatRestarter.shutdown(); if (!terminated.compareAndSet(false, true)) { throw new RuntimeException("Can not re-stop a node agent."); } signalWorkToBeDone(); do { try { loopThread.join(); filebeatRestarter.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e) { logger.error("Interrupted while waiting for converge thread and filebeatRestarter scheduler to shutdown"); } } while (loopThread.isAlive() || !filebeatRestarter.isTerminated()); logger.info("Stopped"); } private void runLocalResumeScriptIfNeeded() { if (! resumeScriptRun) { addDebugMessage("Starting optional node program resume command"); dockerOperations.resumeNode(containerName); resumeScriptRun = true; } } private void updateNodeRepoWithCurrentAttributes(final ContainerNodeSpec nodeSpec) { final NodeAttributes nodeAttributes = new NodeAttributes() .withRestartGeneration(nodeSpec.wantedRestartGeneration.orElse(null)) .withRebootGeneration(nodeSpec.wantedRebootGeneration.orElse(0L)) .withDockerImage(nodeSpec.wantedDockerImage.filter(node -> containerState != ABSENT).orElse(new DockerImage(""))) .withVespaVersion(nodeSpec.wantedVespaVersion.filter(node -> containerState != ABSENT).orElse("")); publishStateToNodeRepoIfChanged(nodeAttributes); } private void publishStateToNodeRepoIfChanged(NodeAttributes currentAttributes) { if (!currentAttributes.equals(lastAttributesSet)) { logger.info("Publishing new set of attributes to node repo: " + lastAttributesSet + " -> " + currentAttributes); addDebugMessage("Publishing new set of attributes to node repo: {" + lastAttributesSet + "} -> {" + currentAttributes + "}"); nodeRepository.updateNodeAttributes(hostname, currentAttributes); lastAttributesSet = currentAttributes; } } private void startContainer(ContainerNodeSpec nodeSpec) { aclMaintainer.run(); dockerOperations.startContainer(containerName, nodeSpec); lastCpuMetric = new CpuUsageReporter(); currentFilebeatRestarter = filebeatRestarter.scheduleWithFixedDelay(() -> serviceRestarter.accept("filebeat"), 1, 1, TimeUnit.DAYS); storageMaintainer.writeMetricsConfig(containerName, nodeSpec); storageMaintainer.writeFilebeatConfig(containerName, nodeSpec); resumeScriptRun = false; containerState = UNKNOWN; logger.info("Container successfully started, new containerState is " + containerState); } private Optional<Container> removeContainerIfNeededUpdateContainerState(ContainerNodeSpec nodeSpec, Optional<Container> existingContainer) { return existingContainer .flatMap(container -> removeContainerIfNeeded(nodeSpec, container)) .map(container -> { shouldRestartServices(nodeSpec).ifPresent(restartReason -> { logger.info("Will restart services for container " + container + ": " + restartReason); restartServices(nodeSpec, container); }); return container; }); } private Optional<String> shouldRestartServices(ContainerNodeSpec nodeSpec) { if (!nodeSpec.wantedRestartGeneration.isPresent()) return Optional.empty(); if (!nodeSpec.currentRestartGeneration.isPresent() || nodeSpec.currentRestartGeneration.get() < nodeSpec.wantedRestartGeneration.get()) { return Optional.of("Restart requested - wanted restart generation has been bumped: " + nodeSpec.currentRestartGeneration.get() + " -> " + nodeSpec.wantedRestartGeneration.get()); } return Optional.empty(); } private void restartServices(ContainerNodeSpec nodeSpec, Container existingContainer) { if (existingContainer.state.isRunning() && nodeSpec.nodeState == Node.State.active) { ContainerName containerName = existingContainer.name; logger.info("Restarting services for " + containerName); orchestratorSuspendNode(); dockerOperations.restartVespaOnNode(containerName); } } @Override public void stopServices() { logger.info("Stopping services for " + containerName); dockerOperations.trySuspendNode(containerName); dockerOperations.stopServicesOnNode(containerName); } private Optional<String> shouldRemoveContainer(ContainerNodeSpec nodeSpec, Container existingContainer) { final Node.State nodeState = nodeSpec.nodeState; if (nodeState == Node.State.dirty || nodeState == Node.State.provisioned) { return Optional.of("Node in state " + nodeState + ", container should no longer be running"); } if (nodeSpec.wantedDockerImage.isPresent() && !nodeSpec.wantedDockerImage.get().equals(existingContainer.image)) { return Optional.of("The node is supposed to run a new Docker image: " + existingContainer + " -> " + nodeSpec.wantedDockerImage.get()); } if (!existingContainer.state.isRunning()) { return Optional.of("Container no longer running"); } ContainerResources wantedContainerResources = ContainerResources.from( nodeSpec.minCpuCores, nodeSpec.minMainMemoryAvailableGb); if (!wantedContainerResources.equals(existingContainer.resources)) { return Optional.of("Container should be running with different resource allocation, wanted: " + wantedContainerResources + ", actual: " + existingContainer.resources); } return Optional.empty(); } private Optional<Container> removeContainerIfNeeded(ContainerNodeSpec nodeSpec, Container existingContainer) { Optional<String> removeReason = shouldRemoveContainer(nodeSpec, existingContainer); if (removeReason.isPresent()) { logger.info("Will remove container " + existingContainer + ": " + removeReason.get()); if (existingContainer.state.isRunning()) { if (nodeSpec.nodeState == Node.State.active) { orchestratorSuspendNode(); } try { stopServices(); } catch (Exception e) { logger.info("Failed stopping services, ignoring", e); } } if (currentFilebeatRestarter != null) currentFilebeatRestarter.cancel(true); dockerOperations.removeContainer(existingContainer, nodeSpec); containerState = ABSENT; logger.info("Container successfully removed, new containerState is " + containerState); return Optional.empty(); } return Optional.of(existingContainer); } private void scheduleDownLoadIfNeeded(ContainerNodeSpec nodeSpec) { if (nodeSpec.currentDockerImage.equals(nodeSpec.wantedDockerImage)) return; if (dockerOperations.pullImageAsyncIfNeeded(nodeSpec.wantedDockerImage.get())) { imageBeingDownloaded = nodeSpec.wantedDockerImage.get(); } else if (imageBeingDownloaded != null) { imageBeingDownloaded = null; } } private void signalWorkToBeDone() { synchronized (monitor) { if (!workToDoNow) { workToDoNow = true; addDebugMessage("Signaling work to be done"); monitor.notifyAll(); } } } void tick() { boolean isFrozenCopy; synchronized (monitor) { while (!workToDoNow) { long remainder = timeBetweenEachConverge .minus(Duration.between(lastConverge, clock.instant())) .toMillis(); if (remainder > 0) { try { monitor.wait(remainder); } catch (InterruptedException e) { logger.error("Interrupted, but ignoring this: " + hostname); } } else break; } lastConverge = clock.instant(); workToDoNow = false; if (isFrozen != wantFrozen) { isFrozen = wantFrozen; logger.info("Updated NodeAgent's frozen state, new value: isFrozen: " + isFrozen); } isFrozenCopy = isFrozen; } if (isFrozenCopy) { addDebugMessage("tick: isFrozen"); } else { try { converge(); } catch (OrchestratorException e) { logger.info(e.getMessage()); addDebugMessage(e.getMessage()); } catch (DockerException e) { numberOfUnhandledException++; containerState = UNKNOWN; logger.error("Caught a DockerException, resetting containerState to " + containerState, e); } catch (Exception e) { numberOfUnhandledException++; logger.error("Unhandled exception, ignoring.", e); addDebugMessage(e.getMessage()); } catch (Throwable t) { logger.error("Unhandled throwable, taking down system.", t); System.exit(234); } } } @SuppressWarnings("unchecked") public void updateContainerNodeMetrics() { final ContainerNodeSpec nodeSpec = lastNodeSpec; if (nodeSpec == null || containerState == ABSENT) return; Optional<Docker.ContainerStats> containerStats = dockerOperations.getContainerStats(containerName); if (!containerStats.isPresent()) return; Dimensions.Builder dimensionsBuilder = new Dimensions.Builder() .add("host", hostname) .add("role", "tenants") .add("state", nodeSpec.nodeState.toString()) .add("parentHostname", environment.getParentHostHostname()); Dimensions dimensions = dimensionsBuilder.build(); Docker.ContainerStats stats = containerStats.get(); final String APP = MetricReceiverWrapper.APPLICATION_NODE; final int totalNumCpuCores = ((List<Number>) ((Map) stats.getCpuStats().get("cpu_usage")).get("percpu_usage")).size(); final long cpuContainerKernelTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("usage_in_kernelmode")).longValue(); final long cpuContainerTotalTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("total_usage")).longValue(); final long cpuSystemTotalTime = ((Number) stats.getCpuStats().get("system_cpu_usage")).longValue(); final long memoryTotalBytes = ((Number) stats.getMemoryStats().get("limit")).longValue(); final long memoryTotalBytesUsage = ((Number) stats.getMemoryStats().get("usage")).longValue(); final long memoryTotalBytesCache = ((Number) ((Map) stats.getMemoryStats().get("stats")).get("cache")).longValue(); final long diskTotalBytes = (long) (nodeSpec.minDiskAvailableGb * BYTES_IN_GB); final Optional<Long> diskTotalBytesUsed = storageMaintainer.getDiskUsageFor(containerName); lastCpuMetric.updateCpuDeltas(cpuSystemTotalTime, cpuContainerTotalTime, cpuContainerKernelTime); final double allocatedCpuRatio = nodeSpec.minCpuCores / totalNumCpuCores; double cpuUsageRatioOfAllocated = lastCpuMetric.getCpuUsageRatio() / allocatedCpuRatio; double cpuKernelUsageRatioOfAllocated = lastCpuMetric.getCpuKernelUsageRatio() / allocatedCpuRatio; long memoryTotalBytesUsed = memoryTotalBytesUsage - memoryTotalBytesCache; double memoryUsageRatio = (double) memoryTotalBytesUsed / memoryTotalBytes; Optional<Double> diskUsageRatio = diskTotalBytesUsed.map(used -> (double) used / diskTotalBytes); List<DimensionMetrics> metrics = new ArrayList<>(); DimensionMetrics.Builder systemMetricsBuilder = new DimensionMetrics.Builder(APP, dimensions) .withMetric("mem.limit", memoryTotalBytes) .withMetric("mem.used", memoryTotalBytesUsed) .withMetric("mem.util", 100 * memoryUsageRatio) .withMetric("cpu.util", 100 * cpuUsageRatioOfAllocated) .withMetric("cpu.sys.util", 100 * cpuKernelUsageRatioOfAllocated) .withMetric("disk.limit", diskTotalBytes); diskTotalBytesUsed.ifPresent(diskUsed -> systemMetricsBuilder.withMetric("disk.used", diskUsed)); diskUsageRatio.ifPresent(diskRatio -> systemMetricsBuilder.withMetric("disk.util", 100 * diskRatio)); metrics.add(systemMetricsBuilder.build()); stats.getNetworks().forEach((interfaceName, interfaceStats) -> { Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build(); Map<String, Number> infStats = (Map<String, Number>) interfaceStats; DimensionMetrics networkMetrics = new DimensionMetrics.Builder(APP, netDims) .withMetric("net.in.bytes", infStats.get("rx_bytes").longValue()) .withMetric("net.in.errors", infStats.get("rx_errors").longValue()) .withMetric("net.in.dropped", infStats.get("rx_dropped").longValue()) .withMetric("net.out.bytes", infStats.get("tx_bytes").longValue()) .withMetric("net.out.errors", infStats.get("tx_errors").longValue()) .withMetric("net.out.dropped", infStats.get("tx_dropped").longValue()) .build(); metrics.add(networkMetrics); }); pushMetricsToContainer(metrics); } private void pushMetricsToContainer(List<DimensionMetrics> metrics) { StringBuilder params = new StringBuilder(); try { for (DimensionMetrics dimensionMetrics : metrics) { params.append(dimensionMetrics.toSecretAgentReport()); } String wrappedMetrics = "s:" + params.toString(); String[] command = {"vespa-rpc-invoke", "-t", "2", "tcp/localhost:19091", "setExtraMetrics", wrappedMetrics}; dockerOperations.executeCommandInContainerAsRoot(containerName, 5L, command); } catch (DockerExecTimeoutException | JsonProcessingException e) { logger.warning("Unable to push metrics to container: " + containerName, e); } } private Optional<Container> getContainer() { if (containerState == ABSENT) return Optional.empty(); Optional<Container> container = dockerOperations.getContainer(containerName); if (! container.isPresent()) containerState = ABSENT; return container; } @Override public String getHostname() { return hostname; } @Override public boolean isDownloadingImage() { return imageBeingDownloaded != null; } @Override public int getAndResetNumberOfUnhandledExceptions() { int temp = numberOfUnhandledException; numberOfUnhandledException = 0; return temp; } class CpuUsageReporter { private long containerKernelUsage = 0; private long totalContainerUsage = 0; private long totalSystemUsage = 0; private long deltaContainerKernelUsage; private long deltaContainerUsage; private long deltaSystemUsage; private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) { deltaSystemUsage = this.totalSystemUsage == 0 ? 0 : (totalSystemUsage - this.totalSystemUsage); deltaContainerUsage = totalContainerUsage - this.totalContainerUsage; deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage; this.totalSystemUsage = totalSystemUsage; this.totalContainerUsage = totalContainerUsage; this.containerKernelUsage = containerKernelUsage; } /** * Returns the CPU usage ratio for the docker container that this NodeAgent is managing * in the time between the last two times updateCpuDeltas() was called. This is calculated * by dividing the CPU time used by the container with the CPU time used by the entire system. */ double getCpuUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerUsage / deltaSystemUsage; } double getCpuKernelUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerKernelUsage / deltaSystemUsage; } } private void orchestratorSuspendNode() { logger.info("Ask Orchestrator for permission to suspend node " + hostname); orchestrator.suspend(hostname); } }
No, the HTTP request to mark available for new allocation immediately deletes the node: https://github.com/vespa-engine/vespa/blob/13b092d7ccb756bad38b7ee26fb781ace108e51c/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java#L496
void converge() { final Optional<ContainerNodeSpec> nodeSpecOptional = nodeRepository.getContainerNodeSpec(hostname); if (!nodeSpecOptional.isPresent() && expectNodeNotInNodeRepo) return; final ContainerNodeSpec nodeSpec = nodeSpecOptional.orElseThrow(() -> new IllegalStateException(String.format("Node '%s' missing from node repository.", hostname))); expectNodeNotInNodeRepo = false; Optional<Container> container = getContainer(); if (!nodeSpec.equals(lastNodeSpec)) { addDebugMessage("Loading new node spec: " + nodeSpec.toString()); lastNodeSpec = nodeSpec; if (container.isPresent()) { storageMaintainer.writeMetricsConfig(containerName, nodeSpec); } } switch (nodeSpec.nodeState) { case ready: case reserved: case parked: case failed: removeContainerIfNeededUpdateContainerState(nodeSpec, container); updateNodeRepoWithCurrentAttributes(nodeSpec); break; case active: storageMaintainer.handleCoreDumpsForContainer(containerName, nodeSpec, false); storageMaintainer.getDiskUsageFor(containerName) .map(diskUsage -> (double) diskUsage / BYTES_IN_GB / nodeSpec.minDiskAvailableGb) .filter(diskUtil -> diskUtil >= 0.8) .ifPresent(diskUtil -> storageMaintainer.removeOldFilesFromNode(containerName)); scheduleDownLoadIfNeeded(nodeSpec); if (isDownloadingImage()) { addDebugMessage("Waiting for image to download " + imageBeingDownloaded.asString()); return; } container = removeContainerIfNeededUpdateContainerState(nodeSpec, container); if (! container.isPresent()) { storageMaintainer.handleCoreDumpsForContainer(containerName, nodeSpec, false); startContainer(nodeSpec); } runLocalResumeScriptIfNeeded(); updateNodeRepoWithCurrentAttributes(nodeSpec); logger.info("Call resume against Orchestrator"); orchestrator.resume(hostname); break; case inactive: removeContainerIfNeededUpdateContainerState(nodeSpec, container); updateNodeRepoWithCurrentAttributes(nodeSpec); break; case provisioned: nodeRepository.markAsDirty(hostname); break; case dirty: removeContainerIfNeededUpdateContainerState(nodeSpec, container); logger.info("State is " + nodeSpec.nodeState + ", will delete application storage and mark node as ready"); storageMaintainer.cleanupNodeStorage(containerName, nodeSpec); updateNodeRepoWithCurrentAttributes(nodeSpec); nodeRepository.markNodeAvailableForNewAllocation(hostname); expectNodeNotInNodeRepo = true; break; default: throw new RuntimeException("UNKNOWN STATE " + nodeSpec.nodeState.name()); } }
expectNodeNotInNodeRepo = true;
void converge() { final Optional<ContainerNodeSpec> nodeSpecOptional = nodeRepository.getContainerNodeSpec(hostname); if (!nodeSpecOptional.isPresent() && expectNodeNotInNodeRepo) return; final ContainerNodeSpec nodeSpec = nodeSpecOptional.orElseThrow(() -> new IllegalStateException(String.format("Node '%s' missing from node repository.", hostname))); expectNodeNotInNodeRepo = false; Optional<Container> container = getContainer(); if (!nodeSpec.equals(lastNodeSpec)) { addDebugMessage("Loading new node spec: " + nodeSpec.toString()); lastNodeSpec = nodeSpec; if (container.isPresent()) { storageMaintainer.writeMetricsConfig(containerName, nodeSpec); } } switch (nodeSpec.nodeState) { case ready: case reserved: case parked: case failed: removeContainerIfNeededUpdateContainerState(nodeSpec, container); updateNodeRepoWithCurrentAttributes(nodeSpec); break; case active: storageMaintainer.handleCoreDumpsForContainer(containerName, nodeSpec, false); storageMaintainer.getDiskUsageFor(containerName) .map(diskUsage -> (double) diskUsage / BYTES_IN_GB / nodeSpec.minDiskAvailableGb) .filter(diskUtil -> diskUtil >= 0.8) .ifPresent(diskUtil -> storageMaintainer.removeOldFilesFromNode(containerName)); scheduleDownLoadIfNeeded(nodeSpec); if (isDownloadingImage()) { addDebugMessage("Waiting for image to download " + imageBeingDownloaded.asString()); return; } container = removeContainerIfNeededUpdateContainerState(nodeSpec, container); if (! container.isPresent()) { storageMaintainer.handleCoreDumpsForContainer(containerName, nodeSpec, false); startContainer(nodeSpec); } runLocalResumeScriptIfNeeded(); updateNodeRepoWithCurrentAttributes(nodeSpec); logger.info("Call resume against Orchestrator"); orchestrator.resume(hostname); break; case inactive: removeContainerIfNeededUpdateContainerState(nodeSpec, container); updateNodeRepoWithCurrentAttributes(nodeSpec); break; case provisioned: nodeRepository.markAsDirty(hostname); break; case dirty: removeContainerIfNeededUpdateContainerState(nodeSpec, container); logger.info("State is " + nodeSpec.nodeState + ", will delete application storage and mark node as ready"); storageMaintainer.cleanupNodeStorage(containerName, nodeSpec); updateNodeRepoWithCurrentAttributes(nodeSpec); nodeRepository.markNodeAvailableForNewAllocation(hostname); expectNodeNotInNodeRepo = true; break; default: throw new RuntimeException("UNKNOWN STATE " + nodeSpec.nodeState.name()); } }
class NodeAgentImpl implements NodeAgent { private static final long BYTES_IN_GB = 1_000_000_000L; private final AtomicBoolean terminated = new AtomicBoolean(false); private boolean isFrozen = true; private boolean wantFrozen = false; private boolean workToDoNow = true; private boolean expectNodeNotInNodeRepo = false; private final Object monitor = new Object(); private final PrefixLogger logger; private DockerImage imageBeingDownloaded = null; private final ContainerName containerName; private final String hostname; private final NodeRepository nodeRepository; private final Orchestrator orchestrator; private final DockerOperations dockerOperations; private final StorageMaintainer storageMaintainer; private final Runnable aclMaintainer; private final Environment environment; private final Clock clock; private final Duration timeBetweenEachConverge; private final SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); private final LinkedList<String> debugMessages = new LinkedList<>(); private int numberOfUnhandledException = 0; private Instant lastConverge; private final Thread loopThread; private final ScheduledExecutorService filebeatRestarter = Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("filebeatrestarter")); private Consumer<String> serviceRestarter; private Future<?> currentFilebeatRestarter; private boolean resumeScriptRun = false; /** * ABSENT means container is definitely absent - A container that was absent will not suddenly appear without * NodeAgent explicitly starting it. * Otherwise we can't be certain. A container that was running a minute ago may no longer be running without * NodeAgent doing anything (container could have crashed). Therefore we always have to ask docker daemon * to get updated state of the container. */ enum ContainerState { ABSENT, UNKNOWN } private ContainerState containerState = UNKNOWN; private NodeAttributes lastAttributesSet = null; private ContainerNodeSpec lastNodeSpec = null; private CpuUsageReporter lastCpuMetric = new CpuUsageReporter(); public NodeAgentImpl( final String hostName, final NodeRepository nodeRepository, final Orchestrator orchestrator, final DockerOperations dockerOperations, final StorageMaintainer storageMaintainer, final Runnable aclMaintainer, final Environment environment, final Clock clock, final Duration timeBetweenEachConverge) { this.containerName = ContainerName.fromHostname(hostName); this.logger = PrefixLogger.getNodeAgentLogger(NodeAgentImpl.class, containerName); this.hostname = hostName; this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.dockerOperations = dockerOperations; this.storageMaintainer = storageMaintainer; this.aclMaintainer = aclMaintainer; this.environment = environment; this.clock = clock; this.timeBetweenEachConverge = timeBetweenEachConverge; this.lastConverge = clock.instant(); this.loopThread = new Thread(() -> { while (!terminated.get()) tick(); }); this.loopThread.setName("tick-" + hostname); } @Override public boolean setFrozen(boolean frozen) { synchronized (monitor) { if (wantFrozen != frozen) { wantFrozen = frozen; addDebugMessage(wantFrozen ? "Freezing" : "Unfreezing"); signalWorkToBeDone(); } return isFrozen == frozen; } } private void addDebugMessage(String message) { synchronized (debugMessages) { while (debugMessages.size() > 1000) { debugMessages.pop(); } logger.debug(message); debugMessages.add("[" + sdf.format(new Date()) + "] " + message); } } @Override public Map<String, Object> debugInfo() { Map<String, Object> debug = new LinkedHashMap<>(); debug.put("Hostname", hostname); debug.put("isFrozen", isFrozen); debug.put("wantFrozen", wantFrozen); debug.put("terminated", terminated); debug.put("workToDoNow", workToDoNow); synchronized (debugMessages) { debug.put("History", new LinkedList<>(debugMessages)); } debug.put("Node repo state", lastNodeSpec.nodeState.name()); return debug; } @Override public void start() { String message = "Starting with interval " + timeBetweenEachConverge.toMillis() + " ms"; logger.info(message); addDebugMessage(message); loopThread.start(); serviceRestarter = service -> { try { ProcessResult processResult = dockerOperations.executeCommandInContainerAsRoot( containerName, "service", service, "restart"); if (!processResult.isSuccess()) { logger.error("Failed to restart service " + service + ": " + processResult); } } catch (Exception e) { logger.error("Failed to restart service " + service, e); } }; } @Override public void stop() { addDebugMessage("Stopping"); filebeatRestarter.shutdown(); if (!terminated.compareAndSet(false, true)) { throw new RuntimeException("Can not re-stop a node agent."); } signalWorkToBeDone(); do { try { loopThread.join(); filebeatRestarter.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e) { logger.error("Interrupted while waiting for converge thread and filebeatRestarter scheduler to shutdown"); } } while (loopThread.isAlive() || !filebeatRestarter.isTerminated()); logger.info("Stopped"); } private void runLocalResumeScriptIfNeeded() { if (! resumeScriptRun) { addDebugMessage("Starting optional node program resume command"); dockerOperations.resumeNode(containerName); resumeScriptRun = true; } } private void updateNodeRepoWithCurrentAttributes(final ContainerNodeSpec nodeSpec) { final NodeAttributes nodeAttributes = new NodeAttributes() .withRestartGeneration(nodeSpec.wantedRestartGeneration.orElse(null)) .withRebootGeneration(nodeSpec.wantedRebootGeneration.orElse(0L)) .withDockerImage(nodeSpec.wantedDockerImage.filter(node -> containerState != ABSENT).orElse(new DockerImage(""))) .withVespaVersion(nodeSpec.wantedVespaVersion.filter(node -> containerState != ABSENT).orElse("")); publishStateToNodeRepoIfChanged(nodeAttributes); } private void publishStateToNodeRepoIfChanged(NodeAttributes currentAttributes) { if (!currentAttributes.equals(lastAttributesSet)) { logger.info("Publishing new set of attributes to node repo: " + lastAttributesSet + " -> " + currentAttributes); addDebugMessage("Publishing new set of attributes to node repo: {" + lastAttributesSet + "} -> {" + currentAttributes + "}"); nodeRepository.updateNodeAttributes(hostname, currentAttributes); lastAttributesSet = currentAttributes; } } private void startContainer(ContainerNodeSpec nodeSpec) { aclMaintainer.run(); dockerOperations.startContainer(containerName, nodeSpec); lastCpuMetric = new CpuUsageReporter(); currentFilebeatRestarter = filebeatRestarter.scheduleWithFixedDelay(() -> serviceRestarter.accept("filebeat"), 1, 1, TimeUnit.DAYS); storageMaintainer.writeMetricsConfig(containerName, nodeSpec); storageMaintainer.writeFilebeatConfig(containerName, nodeSpec); resumeScriptRun = false; containerState = UNKNOWN; logger.info("Container successfully started, new containerState is " + containerState); } private Optional<Container> removeContainerIfNeededUpdateContainerState(ContainerNodeSpec nodeSpec, Optional<Container> existingContainer) { return existingContainer .flatMap(container -> removeContainerIfNeeded(nodeSpec, container)) .map(container -> { shouldRestartServices(nodeSpec).ifPresent(restartReason -> { logger.info("Will restart services for container " + container + ": " + restartReason); restartServices(nodeSpec, container); }); return container; }); } private Optional<String> shouldRestartServices(ContainerNodeSpec nodeSpec) { if (!nodeSpec.wantedRestartGeneration.isPresent()) return Optional.empty(); if (!nodeSpec.currentRestartGeneration.isPresent() || nodeSpec.currentRestartGeneration.get() < nodeSpec.wantedRestartGeneration.get()) { return Optional.of("Restart requested - wanted restart generation has been bumped: " + nodeSpec.currentRestartGeneration.get() + " -> " + nodeSpec.wantedRestartGeneration.get()); } return Optional.empty(); } private void restartServices(ContainerNodeSpec nodeSpec, Container existingContainer) { if (existingContainer.state.isRunning() && nodeSpec.nodeState == Node.State.active) { ContainerName containerName = existingContainer.name; logger.info("Restarting services for " + containerName); orchestratorSuspendNode(); dockerOperations.restartVespaOnNode(containerName); } } @Override public void stopServices() { logger.info("Stopping services for " + containerName); dockerOperations.trySuspendNode(containerName); dockerOperations.stopServicesOnNode(containerName); } private Optional<String> shouldRemoveContainer(ContainerNodeSpec nodeSpec, Container existingContainer) { final Node.State nodeState = nodeSpec.nodeState; if (nodeState == Node.State.dirty || nodeState == Node.State.provisioned) { return Optional.of("Node in state " + nodeState + ", container should no longer be running"); } if (nodeSpec.wantedDockerImage.isPresent() && !nodeSpec.wantedDockerImage.get().equals(existingContainer.image)) { return Optional.of("The node is supposed to run a new Docker image: " + existingContainer + " -> " + nodeSpec.wantedDockerImage.get()); } if (!existingContainer.state.isRunning()) { return Optional.of("Container no longer running"); } ContainerResources wantedContainerResources = ContainerResources.from( nodeSpec.minCpuCores, nodeSpec.minMainMemoryAvailableGb); if (!wantedContainerResources.equals(existingContainer.resources)) { return Optional.of("Container should be running with different resource allocation, wanted: " + wantedContainerResources + ", actual: " + existingContainer.resources); } return Optional.empty(); } private Optional<Container> removeContainerIfNeeded(ContainerNodeSpec nodeSpec, Container existingContainer) { Optional<String> removeReason = shouldRemoveContainer(nodeSpec, existingContainer); if (removeReason.isPresent()) { logger.info("Will remove container " + existingContainer + ": " + removeReason.get()); if (existingContainer.state.isRunning()) { if (nodeSpec.nodeState == Node.State.active) { orchestratorSuspendNode(); } try { stopServices(); } catch (Exception e) { logger.info("Failed stopping services, ignoring", e); } } if (currentFilebeatRestarter != null) currentFilebeatRestarter.cancel(true); dockerOperations.removeContainer(existingContainer, nodeSpec); containerState = ABSENT; logger.info("Container successfully removed, new containerState is " + containerState); return Optional.empty(); } return Optional.of(existingContainer); } private void scheduleDownLoadIfNeeded(ContainerNodeSpec nodeSpec) { if (nodeSpec.currentDockerImage.equals(nodeSpec.wantedDockerImage)) return; if (dockerOperations.pullImageAsyncIfNeeded(nodeSpec.wantedDockerImage.get())) { imageBeingDownloaded = nodeSpec.wantedDockerImage.get(); } else if (imageBeingDownloaded != null) { imageBeingDownloaded = null; } } private void signalWorkToBeDone() { synchronized (monitor) { if (!workToDoNow) { workToDoNow = true; addDebugMessage("Signaling work to be done"); monitor.notifyAll(); } } } void tick() { boolean isFrozenCopy; synchronized (monitor) { while (!workToDoNow) { long remainder = timeBetweenEachConverge .minus(Duration.between(lastConverge, clock.instant())) .toMillis(); if (remainder > 0) { try { monitor.wait(remainder); } catch (InterruptedException e) { logger.error("Interrupted, but ignoring this: " + hostname); } } else break; } lastConverge = clock.instant(); workToDoNow = false; if (isFrozen != wantFrozen) { isFrozen = wantFrozen; logger.info("Updated NodeAgent's frozen state, new value: isFrozen: " + isFrozen); } isFrozenCopy = isFrozen; } if (isFrozenCopy) { addDebugMessage("tick: isFrozen"); } else { try { converge(); } catch (OrchestratorException e) { logger.info(e.getMessage()); addDebugMessage(e.getMessage()); } catch (DockerException e) { numberOfUnhandledException++; containerState = UNKNOWN; logger.error("Caught a DockerException, resetting containerState to " + containerState, e); } catch (Exception e) { numberOfUnhandledException++; logger.error("Unhandled exception, ignoring.", e); addDebugMessage(e.getMessage()); } catch (Throwable t) { logger.error("Unhandled throwable, taking down system.", t); System.exit(234); } } } @SuppressWarnings("unchecked") public void updateContainerNodeMetrics() { final ContainerNodeSpec nodeSpec = lastNodeSpec; if (nodeSpec == null || containerState == ABSENT) return; Optional<Docker.ContainerStats> containerStats = dockerOperations.getContainerStats(containerName); if (!containerStats.isPresent()) return; Dimensions.Builder dimensionsBuilder = new Dimensions.Builder() .add("host", hostname) .add("role", "tenants") .add("state", nodeSpec.nodeState.toString()) .add("parentHostname", environment.getParentHostHostname()); Dimensions dimensions = dimensionsBuilder.build(); Docker.ContainerStats stats = containerStats.get(); final String APP = MetricReceiverWrapper.APPLICATION_NODE; final int totalNumCpuCores = ((List<Number>) ((Map) stats.getCpuStats().get("cpu_usage")).get("percpu_usage")).size(); final long cpuContainerKernelTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("usage_in_kernelmode")).longValue(); final long cpuContainerTotalTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("total_usage")).longValue(); final long cpuSystemTotalTime = ((Number) stats.getCpuStats().get("system_cpu_usage")).longValue(); final long memoryTotalBytes = ((Number) stats.getMemoryStats().get("limit")).longValue(); final long memoryTotalBytesUsage = ((Number) stats.getMemoryStats().get("usage")).longValue(); final long memoryTotalBytesCache = ((Number) ((Map) stats.getMemoryStats().get("stats")).get("cache")).longValue(); final long diskTotalBytes = (long) (nodeSpec.minDiskAvailableGb * BYTES_IN_GB); final Optional<Long> diskTotalBytesUsed = storageMaintainer.getDiskUsageFor(containerName); lastCpuMetric.updateCpuDeltas(cpuSystemTotalTime, cpuContainerTotalTime, cpuContainerKernelTime); final double allocatedCpuRatio = nodeSpec.minCpuCores / totalNumCpuCores; double cpuUsageRatioOfAllocated = lastCpuMetric.getCpuUsageRatio() / allocatedCpuRatio; double cpuKernelUsageRatioOfAllocated = lastCpuMetric.getCpuKernelUsageRatio() / allocatedCpuRatio; long memoryTotalBytesUsed = memoryTotalBytesUsage - memoryTotalBytesCache; double memoryUsageRatio = (double) memoryTotalBytesUsed / memoryTotalBytes; Optional<Double> diskUsageRatio = diskTotalBytesUsed.map(used -> (double) used / diskTotalBytes); List<DimensionMetrics> metrics = new ArrayList<>(); DimensionMetrics.Builder systemMetricsBuilder = new DimensionMetrics.Builder(APP, dimensions) .withMetric("mem.limit", memoryTotalBytes) .withMetric("mem.used", memoryTotalBytesUsed) .withMetric("mem.util", 100 * memoryUsageRatio) .withMetric("cpu.util", 100 * cpuUsageRatioOfAllocated) .withMetric("cpu.sys.util", 100 * cpuKernelUsageRatioOfAllocated) .withMetric("disk.limit", diskTotalBytes); diskTotalBytesUsed.ifPresent(diskUsed -> systemMetricsBuilder.withMetric("disk.used", diskUsed)); diskUsageRatio.ifPresent(diskRatio -> systemMetricsBuilder.withMetric("disk.util", 100 * diskRatio)); metrics.add(systemMetricsBuilder.build()); stats.getNetworks().forEach((interfaceName, interfaceStats) -> { Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build(); Map<String, Number> infStats = (Map<String, Number>) interfaceStats; DimensionMetrics networkMetrics = new DimensionMetrics.Builder(APP, netDims) .withMetric("net.in.bytes", infStats.get("rx_bytes").longValue()) .withMetric("net.in.errors", infStats.get("rx_errors").longValue()) .withMetric("net.in.dropped", infStats.get("rx_dropped").longValue()) .withMetric("net.out.bytes", infStats.get("tx_bytes").longValue()) .withMetric("net.out.errors", infStats.get("tx_errors").longValue()) .withMetric("net.out.dropped", infStats.get("tx_dropped").longValue()) .build(); metrics.add(networkMetrics); }); pushMetricsToContainer(metrics); } private void pushMetricsToContainer(List<DimensionMetrics> metrics) { StringBuilder params = new StringBuilder(); try { for (DimensionMetrics dimensionMetrics : metrics) { params.append(dimensionMetrics.toSecretAgentReport()); } String wrappedMetrics = "s:" + params.toString(); String[] command = {"vespa-rpc-invoke", "-t", "2", "tcp/localhost:19091", "setExtraMetrics", wrappedMetrics}; dockerOperations.executeCommandInContainerAsRoot(containerName, 5L, command); } catch (DockerExecTimeoutException | JsonProcessingException e) { logger.warning("Unable to push metrics to container: " + containerName, e); } } private Optional<Container> getContainer() { if (containerState == ABSENT) return Optional.empty(); Optional<Container> container = dockerOperations.getContainer(containerName); if (! container.isPresent()) containerState = ABSENT; return container; } @Override public String getHostname() { return hostname; } @Override public boolean isDownloadingImage() { return imageBeingDownloaded != null; } @Override public int getAndResetNumberOfUnhandledExceptions() { int temp = numberOfUnhandledException; numberOfUnhandledException = 0; return temp; } class CpuUsageReporter { private long containerKernelUsage = 0; private long totalContainerUsage = 0; private long totalSystemUsage = 0; private long deltaContainerKernelUsage; private long deltaContainerUsage; private long deltaSystemUsage; private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) { deltaSystemUsage = this.totalSystemUsage == 0 ? 0 : (totalSystemUsage - this.totalSystemUsage); deltaContainerUsage = totalContainerUsage - this.totalContainerUsage; deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage; this.totalSystemUsage = totalSystemUsage; this.totalContainerUsage = totalContainerUsage; this.containerKernelUsage = containerKernelUsage; } /** * Returns the CPU usage ratio for the docker container that this NodeAgent is managing * in the time between the last two times updateCpuDeltas() was called. This is calculated * by dividing the CPU time used by the container with the CPU time used by the entire system. */ double getCpuUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerUsage / deltaSystemUsage; } double getCpuKernelUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerKernelUsage / deltaSystemUsage; } } private void orchestratorSuspendNode() { logger.info("Ask Orchestrator for permission to suspend node " + hostname); orchestrator.suspend(hostname); } }
class NodeAgentImpl implements NodeAgent { private static final long BYTES_IN_GB = 1_000_000_000L; private final AtomicBoolean terminated = new AtomicBoolean(false); private boolean isFrozen = true; private boolean wantFrozen = false; private boolean workToDoNow = true; private boolean expectNodeNotInNodeRepo = false; private final Object monitor = new Object(); private final PrefixLogger logger; private DockerImage imageBeingDownloaded = null; private final ContainerName containerName; private final String hostname; private final NodeRepository nodeRepository; private final Orchestrator orchestrator; private final DockerOperations dockerOperations; private final StorageMaintainer storageMaintainer; private final Runnable aclMaintainer; private final Environment environment; private final Clock clock; private final Duration timeBetweenEachConverge; private final SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); private final LinkedList<String> debugMessages = new LinkedList<>(); private int numberOfUnhandledException = 0; private Instant lastConverge; private final Thread loopThread; private final ScheduledExecutorService filebeatRestarter = Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("filebeatrestarter")); private Consumer<String> serviceRestarter; private Future<?> currentFilebeatRestarter; private boolean resumeScriptRun = false; /** * ABSENT means container is definitely absent - A container that was absent will not suddenly appear without * NodeAgent explicitly starting it. * Otherwise we can't be certain. A container that was running a minute ago may no longer be running without * NodeAgent doing anything (container could have crashed). Therefore we always have to ask docker daemon * to get updated state of the container. */ enum ContainerState { ABSENT, UNKNOWN } private ContainerState containerState = UNKNOWN; private NodeAttributes lastAttributesSet = null; private ContainerNodeSpec lastNodeSpec = null; private CpuUsageReporter lastCpuMetric = new CpuUsageReporter(); public NodeAgentImpl( final String hostName, final NodeRepository nodeRepository, final Orchestrator orchestrator, final DockerOperations dockerOperations, final StorageMaintainer storageMaintainer, final Runnable aclMaintainer, final Environment environment, final Clock clock, final Duration timeBetweenEachConverge) { this.containerName = ContainerName.fromHostname(hostName); this.logger = PrefixLogger.getNodeAgentLogger(NodeAgentImpl.class, containerName); this.hostname = hostName; this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.dockerOperations = dockerOperations; this.storageMaintainer = storageMaintainer; this.aclMaintainer = aclMaintainer; this.environment = environment; this.clock = clock; this.timeBetweenEachConverge = timeBetweenEachConverge; this.lastConverge = clock.instant(); this.loopThread = new Thread(() -> { while (!terminated.get()) tick(); }); this.loopThread.setName("tick-" + hostname); } @Override public boolean setFrozen(boolean frozen) { synchronized (monitor) { if (wantFrozen != frozen) { wantFrozen = frozen; addDebugMessage(wantFrozen ? "Freezing" : "Unfreezing"); signalWorkToBeDone(); } return isFrozen == frozen; } } private void addDebugMessage(String message) { synchronized (debugMessages) { while (debugMessages.size() > 1000) { debugMessages.pop(); } logger.debug(message); debugMessages.add("[" + sdf.format(new Date()) + "] " + message); } } @Override public Map<String, Object> debugInfo() { Map<String, Object> debug = new LinkedHashMap<>(); debug.put("Hostname", hostname); debug.put("isFrozen", isFrozen); debug.put("wantFrozen", wantFrozen); debug.put("terminated", terminated); debug.put("workToDoNow", workToDoNow); synchronized (debugMessages) { debug.put("History", new LinkedList<>(debugMessages)); } debug.put("Node repo state", lastNodeSpec.nodeState.name()); return debug; } @Override public void start() { String message = "Starting with interval " + timeBetweenEachConverge.toMillis() + " ms"; logger.info(message); addDebugMessage(message); loopThread.start(); serviceRestarter = service -> { try { ProcessResult processResult = dockerOperations.executeCommandInContainerAsRoot( containerName, "service", service, "restart"); if (!processResult.isSuccess()) { logger.error("Failed to restart service " + service + ": " + processResult); } } catch (Exception e) { logger.error("Failed to restart service " + service, e); } }; } @Override public void stop() { addDebugMessage("Stopping"); filebeatRestarter.shutdown(); if (!terminated.compareAndSet(false, true)) { throw new RuntimeException("Can not re-stop a node agent."); } signalWorkToBeDone(); do { try { loopThread.join(); filebeatRestarter.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e) { logger.error("Interrupted while waiting for converge thread and filebeatRestarter scheduler to shutdown"); } } while (loopThread.isAlive() || !filebeatRestarter.isTerminated()); logger.info("Stopped"); } private void runLocalResumeScriptIfNeeded() { if (! resumeScriptRun) { addDebugMessage("Starting optional node program resume command"); dockerOperations.resumeNode(containerName); resumeScriptRun = true; } } private void updateNodeRepoWithCurrentAttributes(final ContainerNodeSpec nodeSpec) { final NodeAttributes nodeAttributes = new NodeAttributes() .withRestartGeneration(nodeSpec.wantedRestartGeneration.orElse(null)) .withRebootGeneration(nodeSpec.wantedRebootGeneration.orElse(0L)) .withDockerImage(nodeSpec.wantedDockerImage.filter(node -> containerState != ABSENT).orElse(new DockerImage(""))) .withVespaVersion(nodeSpec.wantedVespaVersion.filter(node -> containerState != ABSENT).orElse("")); publishStateToNodeRepoIfChanged(nodeAttributes); } private void publishStateToNodeRepoIfChanged(NodeAttributes currentAttributes) { if (!currentAttributes.equals(lastAttributesSet)) { logger.info("Publishing new set of attributes to node repo: " + lastAttributesSet + " -> " + currentAttributes); addDebugMessage("Publishing new set of attributes to node repo: {" + lastAttributesSet + "} -> {" + currentAttributes + "}"); nodeRepository.updateNodeAttributes(hostname, currentAttributes); lastAttributesSet = currentAttributes; } } private void startContainer(ContainerNodeSpec nodeSpec) { aclMaintainer.run(); dockerOperations.startContainer(containerName, nodeSpec); lastCpuMetric = new CpuUsageReporter(); currentFilebeatRestarter = filebeatRestarter.scheduleWithFixedDelay(() -> serviceRestarter.accept("filebeat"), 1, 1, TimeUnit.DAYS); storageMaintainer.writeMetricsConfig(containerName, nodeSpec); storageMaintainer.writeFilebeatConfig(containerName, nodeSpec); resumeScriptRun = false; containerState = UNKNOWN; logger.info("Container successfully started, new containerState is " + containerState); } private Optional<Container> removeContainerIfNeededUpdateContainerState(ContainerNodeSpec nodeSpec, Optional<Container> existingContainer) { return existingContainer .flatMap(container -> removeContainerIfNeeded(nodeSpec, container)) .map(container -> { shouldRestartServices(nodeSpec).ifPresent(restartReason -> { logger.info("Will restart services for container " + container + ": " + restartReason); restartServices(nodeSpec, container); }); return container; }); } private Optional<String> shouldRestartServices(ContainerNodeSpec nodeSpec) { if (!nodeSpec.wantedRestartGeneration.isPresent()) return Optional.empty(); if (!nodeSpec.currentRestartGeneration.isPresent() || nodeSpec.currentRestartGeneration.get() < nodeSpec.wantedRestartGeneration.get()) { return Optional.of("Restart requested - wanted restart generation has been bumped: " + nodeSpec.currentRestartGeneration.get() + " -> " + nodeSpec.wantedRestartGeneration.get()); } return Optional.empty(); } private void restartServices(ContainerNodeSpec nodeSpec, Container existingContainer) { if (existingContainer.state.isRunning() && nodeSpec.nodeState == Node.State.active) { ContainerName containerName = existingContainer.name; logger.info("Restarting services for " + containerName); orchestratorSuspendNode(); dockerOperations.restartVespaOnNode(containerName); } } @Override public void stopServices() { logger.info("Stopping services for " + containerName); dockerOperations.trySuspendNode(containerName); dockerOperations.stopServicesOnNode(containerName); } private Optional<String> shouldRemoveContainer(ContainerNodeSpec nodeSpec, Container existingContainer) { final Node.State nodeState = nodeSpec.nodeState; if (nodeState == Node.State.dirty || nodeState == Node.State.provisioned) { return Optional.of("Node in state " + nodeState + ", container should no longer be running"); } if (nodeSpec.wantedDockerImage.isPresent() && !nodeSpec.wantedDockerImage.get().equals(existingContainer.image)) { return Optional.of("The node is supposed to run a new Docker image: " + existingContainer + " -> " + nodeSpec.wantedDockerImage.get()); } if (!existingContainer.state.isRunning()) { return Optional.of("Container no longer running"); } ContainerResources wantedContainerResources = ContainerResources.from( nodeSpec.minCpuCores, nodeSpec.minMainMemoryAvailableGb); if (!wantedContainerResources.equals(existingContainer.resources)) { return Optional.of("Container should be running with different resource allocation, wanted: " + wantedContainerResources + ", actual: " + existingContainer.resources); } return Optional.empty(); } private Optional<Container> removeContainerIfNeeded(ContainerNodeSpec nodeSpec, Container existingContainer) { Optional<String> removeReason = shouldRemoveContainer(nodeSpec, existingContainer); if (removeReason.isPresent()) { logger.info("Will remove container " + existingContainer + ": " + removeReason.get()); if (existingContainer.state.isRunning()) { if (nodeSpec.nodeState == Node.State.active) { orchestratorSuspendNode(); } try { stopServices(); } catch (Exception e) { logger.info("Failed stopping services, ignoring", e); } } if (currentFilebeatRestarter != null) currentFilebeatRestarter.cancel(true); dockerOperations.removeContainer(existingContainer, nodeSpec); containerState = ABSENT; logger.info("Container successfully removed, new containerState is " + containerState); return Optional.empty(); } return Optional.of(existingContainer); } private void scheduleDownLoadIfNeeded(ContainerNodeSpec nodeSpec) { if (nodeSpec.currentDockerImage.equals(nodeSpec.wantedDockerImage)) return; if (dockerOperations.pullImageAsyncIfNeeded(nodeSpec.wantedDockerImage.get())) { imageBeingDownloaded = nodeSpec.wantedDockerImage.get(); } else if (imageBeingDownloaded != null) { imageBeingDownloaded = null; } } private void signalWorkToBeDone() { synchronized (monitor) { if (!workToDoNow) { workToDoNow = true; addDebugMessage("Signaling work to be done"); monitor.notifyAll(); } } } void tick() { boolean isFrozenCopy; synchronized (monitor) { while (!workToDoNow) { long remainder = timeBetweenEachConverge .minus(Duration.between(lastConverge, clock.instant())) .toMillis(); if (remainder > 0) { try { monitor.wait(remainder); } catch (InterruptedException e) { logger.error("Interrupted, but ignoring this: " + hostname); } } else break; } lastConverge = clock.instant(); workToDoNow = false; if (isFrozen != wantFrozen) { isFrozen = wantFrozen; logger.info("Updated NodeAgent's frozen state, new value: isFrozen: " + isFrozen); } isFrozenCopy = isFrozen; } if (isFrozenCopy) { addDebugMessage("tick: isFrozen"); } else { try { converge(); } catch (OrchestratorException e) { logger.info(e.getMessage()); addDebugMessage(e.getMessage()); } catch (DockerException e) { numberOfUnhandledException++; containerState = UNKNOWN; logger.error("Caught a DockerException, resetting containerState to " + containerState, e); } catch (Exception e) { numberOfUnhandledException++; logger.error("Unhandled exception, ignoring.", e); addDebugMessage(e.getMessage()); } catch (Throwable t) { logger.error("Unhandled throwable, taking down system.", t); System.exit(234); } } } @SuppressWarnings("unchecked") public void updateContainerNodeMetrics() { final ContainerNodeSpec nodeSpec = lastNodeSpec; if (nodeSpec == null || containerState == ABSENT) return; Optional<Docker.ContainerStats> containerStats = dockerOperations.getContainerStats(containerName); if (!containerStats.isPresent()) return; Dimensions.Builder dimensionsBuilder = new Dimensions.Builder() .add("host", hostname) .add("role", "tenants") .add("state", nodeSpec.nodeState.toString()) .add("parentHostname", environment.getParentHostHostname()); Dimensions dimensions = dimensionsBuilder.build(); Docker.ContainerStats stats = containerStats.get(); final String APP = MetricReceiverWrapper.APPLICATION_NODE; final int totalNumCpuCores = ((List<Number>) ((Map) stats.getCpuStats().get("cpu_usage")).get("percpu_usage")).size(); final long cpuContainerKernelTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("usage_in_kernelmode")).longValue(); final long cpuContainerTotalTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("total_usage")).longValue(); final long cpuSystemTotalTime = ((Number) stats.getCpuStats().get("system_cpu_usage")).longValue(); final long memoryTotalBytes = ((Number) stats.getMemoryStats().get("limit")).longValue(); final long memoryTotalBytesUsage = ((Number) stats.getMemoryStats().get("usage")).longValue(); final long memoryTotalBytesCache = ((Number) ((Map) stats.getMemoryStats().get("stats")).get("cache")).longValue(); final long diskTotalBytes = (long) (nodeSpec.minDiskAvailableGb * BYTES_IN_GB); final Optional<Long> diskTotalBytesUsed = storageMaintainer.getDiskUsageFor(containerName); lastCpuMetric.updateCpuDeltas(cpuSystemTotalTime, cpuContainerTotalTime, cpuContainerKernelTime); final double allocatedCpuRatio = nodeSpec.minCpuCores / totalNumCpuCores; double cpuUsageRatioOfAllocated = lastCpuMetric.getCpuUsageRatio() / allocatedCpuRatio; double cpuKernelUsageRatioOfAllocated = lastCpuMetric.getCpuKernelUsageRatio() / allocatedCpuRatio; long memoryTotalBytesUsed = memoryTotalBytesUsage - memoryTotalBytesCache; double memoryUsageRatio = (double) memoryTotalBytesUsed / memoryTotalBytes; Optional<Double> diskUsageRatio = diskTotalBytesUsed.map(used -> (double) used / diskTotalBytes); List<DimensionMetrics> metrics = new ArrayList<>(); DimensionMetrics.Builder systemMetricsBuilder = new DimensionMetrics.Builder(APP, dimensions) .withMetric("mem.limit", memoryTotalBytes) .withMetric("mem.used", memoryTotalBytesUsed) .withMetric("mem.util", 100 * memoryUsageRatio) .withMetric("cpu.util", 100 * cpuUsageRatioOfAllocated) .withMetric("cpu.sys.util", 100 * cpuKernelUsageRatioOfAllocated) .withMetric("disk.limit", diskTotalBytes); diskTotalBytesUsed.ifPresent(diskUsed -> systemMetricsBuilder.withMetric("disk.used", diskUsed)); diskUsageRatio.ifPresent(diskRatio -> systemMetricsBuilder.withMetric("disk.util", 100 * diskRatio)); metrics.add(systemMetricsBuilder.build()); stats.getNetworks().forEach((interfaceName, interfaceStats) -> { Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build(); Map<String, Number> infStats = (Map<String, Number>) interfaceStats; DimensionMetrics networkMetrics = new DimensionMetrics.Builder(APP, netDims) .withMetric("net.in.bytes", infStats.get("rx_bytes").longValue()) .withMetric("net.in.errors", infStats.get("rx_errors").longValue()) .withMetric("net.in.dropped", infStats.get("rx_dropped").longValue()) .withMetric("net.out.bytes", infStats.get("tx_bytes").longValue()) .withMetric("net.out.errors", infStats.get("tx_errors").longValue()) .withMetric("net.out.dropped", infStats.get("tx_dropped").longValue()) .build(); metrics.add(networkMetrics); }); pushMetricsToContainer(metrics); } private void pushMetricsToContainer(List<DimensionMetrics> metrics) { StringBuilder params = new StringBuilder(); try { for (DimensionMetrics dimensionMetrics : metrics) { params.append(dimensionMetrics.toSecretAgentReport()); } String wrappedMetrics = "s:" + params.toString(); String[] command = {"vespa-rpc-invoke", "-t", "2", "tcp/localhost:19091", "setExtraMetrics", wrappedMetrics}; dockerOperations.executeCommandInContainerAsRoot(containerName, 5L, command); } catch (DockerExecTimeoutException | JsonProcessingException e) { logger.warning("Unable to push metrics to container: " + containerName, e); } } private Optional<Container> getContainer() { if (containerState == ABSENT) return Optional.empty(); Optional<Container> container = dockerOperations.getContainer(containerName); if (! container.isPresent()) containerState = ABSENT; return container; } @Override public String getHostname() { return hostname; } @Override public boolean isDownloadingImage() { return imageBeingDownloaded != null; } @Override public int getAndResetNumberOfUnhandledExceptions() { int temp = numberOfUnhandledException; numberOfUnhandledException = 0; return temp; } class CpuUsageReporter { private long containerKernelUsage = 0; private long totalContainerUsage = 0; private long totalSystemUsage = 0; private long deltaContainerKernelUsage; private long deltaContainerUsage; private long deltaSystemUsage; private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) { deltaSystemUsage = this.totalSystemUsage == 0 ? 0 : (totalSystemUsage - this.totalSystemUsage); deltaContainerUsage = totalContainerUsage - this.totalContainerUsage; deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage; this.totalSystemUsage = totalSystemUsage; this.totalContainerUsage = totalContainerUsage; this.containerKernelUsage = containerKernelUsage; } /** * Returns the CPU usage ratio for the docker container that this NodeAgent is managing * in the time between the last two times updateCpuDeltas() was called. This is calculated * by dividing the CPU time used by the container with the CPU time used by the entire system. */ double getCpuUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerUsage / deltaSystemUsage; } double getCpuKernelUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerKernelUsage / deltaSystemUsage; } } private void orchestratorSuspendNode() { logger.info("Ask Orchestrator for permission to suspend node " + hostname); orchestrator.suspend(hostname); } }
_Maybe_ INFO, why would there be a warning for something that is expected and not a problem?
void converge() { final Optional<ContainerNodeSpec> nodeSpecOptional = nodeRepository.getContainerNodeSpec(hostname); if (!nodeSpecOptional.isPresent() && expectNodeNotInNodeRepo) return; final ContainerNodeSpec nodeSpec = nodeSpecOptional.orElseThrow(() -> new IllegalStateException(String.format("Node '%s' missing from node repository.", hostname))); expectNodeNotInNodeRepo = false; Optional<Container> container = getContainer(); if (!nodeSpec.equals(lastNodeSpec)) { addDebugMessage("Loading new node spec: " + nodeSpec.toString()); lastNodeSpec = nodeSpec; if (container.isPresent()) { storageMaintainer.writeMetricsConfig(containerName, nodeSpec); } } switch (nodeSpec.nodeState) { case ready: case reserved: case parked: case failed: removeContainerIfNeededUpdateContainerState(nodeSpec, container); updateNodeRepoWithCurrentAttributes(nodeSpec); break; case active: storageMaintainer.handleCoreDumpsForContainer(containerName, nodeSpec, false); storageMaintainer.getDiskUsageFor(containerName) .map(diskUsage -> (double) diskUsage / BYTES_IN_GB / nodeSpec.minDiskAvailableGb) .filter(diskUtil -> diskUtil >= 0.8) .ifPresent(diskUtil -> storageMaintainer.removeOldFilesFromNode(containerName)); scheduleDownLoadIfNeeded(nodeSpec); if (isDownloadingImage()) { addDebugMessage("Waiting for image to download " + imageBeingDownloaded.asString()); return; } container = removeContainerIfNeededUpdateContainerState(nodeSpec, container); if (! container.isPresent()) { storageMaintainer.handleCoreDumpsForContainer(containerName, nodeSpec, false); startContainer(nodeSpec); } runLocalResumeScriptIfNeeded(); updateNodeRepoWithCurrentAttributes(nodeSpec); logger.info("Call resume against Orchestrator"); orchestrator.resume(hostname); break; case inactive: removeContainerIfNeededUpdateContainerState(nodeSpec, container); updateNodeRepoWithCurrentAttributes(nodeSpec); break; case provisioned: nodeRepository.markAsDirty(hostname); break; case dirty: removeContainerIfNeededUpdateContainerState(nodeSpec, container); logger.info("State is " + nodeSpec.nodeState + ", will delete application storage and mark node as ready"); storageMaintainer.cleanupNodeStorage(containerName, nodeSpec); updateNodeRepoWithCurrentAttributes(nodeSpec); nodeRepository.markNodeAvailableForNewAllocation(hostname); expectNodeNotInNodeRepo = true; break; default: throw new RuntimeException("UNKNOWN STATE " + nodeSpec.nodeState.name()); } }
if (!nodeSpecOptional.isPresent() && expectNodeNotInNodeRepo) return;
void converge() { final Optional<ContainerNodeSpec> nodeSpecOptional = nodeRepository.getContainerNodeSpec(hostname); if (!nodeSpecOptional.isPresent() && expectNodeNotInNodeRepo) return; final ContainerNodeSpec nodeSpec = nodeSpecOptional.orElseThrow(() -> new IllegalStateException(String.format("Node '%s' missing from node repository.", hostname))); expectNodeNotInNodeRepo = false; Optional<Container> container = getContainer(); if (!nodeSpec.equals(lastNodeSpec)) { addDebugMessage("Loading new node spec: " + nodeSpec.toString()); lastNodeSpec = nodeSpec; if (container.isPresent()) { storageMaintainer.writeMetricsConfig(containerName, nodeSpec); } } switch (nodeSpec.nodeState) { case ready: case reserved: case parked: case failed: removeContainerIfNeededUpdateContainerState(nodeSpec, container); updateNodeRepoWithCurrentAttributes(nodeSpec); break; case active: storageMaintainer.handleCoreDumpsForContainer(containerName, nodeSpec, false); storageMaintainer.getDiskUsageFor(containerName) .map(diskUsage -> (double) diskUsage / BYTES_IN_GB / nodeSpec.minDiskAvailableGb) .filter(diskUtil -> diskUtil >= 0.8) .ifPresent(diskUtil -> storageMaintainer.removeOldFilesFromNode(containerName)); scheduleDownLoadIfNeeded(nodeSpec); if (isDownloadingImage()) { addDebugMessage("Waiting for image to download " + imageBeingDownloaded.asString()); return; } container = removeContainerIfNeededUpdateContainerState(nodeSpec, container); if (! container.isPresent()) { storageMaintainer.handleCoreDumpsForContainer(containerName, nodeSpec, false); startContainer(nodeSpec); } runLocalResumeScriptIfNeeded(); updateNodeRepoWithCurrentAttributes(nodeSpec); logger.info("Call resume against Orchestrator"); orchestrator.resume(hostname); break; case inactive: removeContainerIfNeededUpdateContainerState(nodeSpec, container); updateNodeRepoWithCurrentAttributes(nodeSpec); break; case provisioned: nodeRepository.markAsDirty(hostname); break; case dirty: removeContainerIfNeededUpdateContainerState(nodeSpec, container); logger.info("State is " + nodeSpec.nodeState + ", will delete application storage and mark node as ready"); storageMaintainer.cleanupNodeStorage(containerName, nodeSpec); updateNodeRepoWithCurrentAttributes(nodeSpec); nodeRepository.markNodeAvailableForNewAllocation(hostname); expectNodeNotInNodeRepo = true; break; default: throw new RuntimeException("UNKNOWN STATE " + nodeSpec.nodeState.name()); } }
class NodeAgentImpl implements NodeAgent { private static final long BYTES_IN_GB = 1_000_000_000L; private final AtomicBoolean terminated = new AtomicBoolean(false); private boolean isFrozen = true; private boolean wantFrozen = false; private boolean workToDoNow = true; private boolean expectNodeNotInNodeRepo = false; private final Object monitor = new Object(); private final PrefixLogger logger; private DockerImage imageBeingDownloaded = null; private final ContainerName containerName; private final String hostname; private final NodeRepository nodeRepository; private final Orchestrator orchestrator; private final DockerOperations dockerOperations; private final StorageMaintainer storageMaintainer; private final Runnable aclMaintainer; private final Environment environment; private final Clock clock; private final Duration timeBetweenEachConverge; private final SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); private final LinkedList<String> debugMessages = new LinkedList<>(); private int numberOfUnhandledException = 0; private Instant lastConverge; private final Thread loopThread; private final ScheduledExecutorService filebeatRestarter = Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("filebeatrestarter")); private Consumer<String> serviceRestarter; private Future<?> currentFilebeatRestarter; private boolean resumeScriptRun = false; /** * ABSENT means container is definitely absent - A container that was absent will not suddenly appear without * NodeAgent explicitly starting it. * Otherwise we can't be certain. A container that was running a minute ago may no longer be running without * NodeAgent doing anything (container could have crashed). Therefore we always have to ask docker daemon * to get updated state of the container. */ enum ContainerState { ABSENT, UNKNOWN } private ContainerState containerState = UNKNOWN; private NodeAttributes lastAttributesSet = null; private ContainerNodeSpec lastNodeSpec = null; private CpuUsageReporter lastCpuMetric = new CpuUsageReporter(); public NodeAgentImpl( final String hostName, final NodeRepository nodeRepository, final Orchestrator orchestrator, final DockerOperations dockerOperations, final StorageMaintainer storageMaintainer, final Runnable aclMaintainer, final Environment environment, final Clock clock, final Duration timeBetweenEachConverge) { this.containerName = ContainerName.fromHostname(hostName); this.logger = PrefixLogger.getNodeAgentLogger(NodeAgentImpl.class, containerName); this.hostname = hostName; this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.dockerOperations = dockerOperations; this.storageMaintainer = storageMaintainer; this.aclMaintainer = aclMaintainer; this.environment = environment; this.clock = clock; this.timeBetweenEachConverge = timeBetweenEachConverge; this.lastConverge = clock.instant(); this.loopThread = new Thread(() -> { while (!terminated.get()) tick(); }); this.loopThread.setName("tick-" + hostname); } @Override public boolean setFrozen(boolean frozen) { synchronized (monitor) { if (wantFrozen != frozen) { wantFrozen = frozen; addDebugMessage(wantFrozen ? "Freezing" : "Unfreezing"); signalWorkToBeDone(); } return isFrozen == frozen; } } private void addDebugMessage(String message) { synchronized (debugMessages) { while (debugMessages.size() > 1000) { debugMessages.pop(); } logger.debug(message); debugMessages.add("[" + sdf.format(new Date()) + "] " + message); } } @Override public Map<String, Object> debugInfo() { Map<String, Object> debug = new LinkedHashMap<>(); debug.put("Hostname", hostname); debug.put("isFrozen", isFrozen); debug.put("wantFrozen", wantFrozen); debug.put("terminated", terminated); debug.put("workToDoNow", workToDoNow); synchronized (debugMessages) { debug.put("History", new LinkedList<>(debugMessages)); } debug.put("Node repo state", lastNodeSpec.nodeState.name()); return debug; } @Override public void start() { String message = "Starting with interval " + timeBetweenEachConverge.toMillis() + " ms"; logger.info(message); addDebugMessage(message); loopThread.start(); serviceRestarter = service -> { try { ProcessResult processResult = dockerOperations.executeCommandInContainerAsRoot( containerName, "service", service, "restart"); if (!processResult.isSuccess()) { logger.error("Failed to restart service " + service + ": " + processResult); } } catch (Exception e) { logger.error("Failed to restart service " + service, e); } }; } @Override public void stop() { addDebugMessage("Stopping"); filebeatRestarter.shutdown(); if (!terminated.compareAndSet(false, true)) { throw new RuntimeException("Can not re-stop a node agent."); } signalWorkToBeDone(); do { try { loopThread.join(); filebeatRestarter.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e) { logger.error("Interrupted while waiting for converge thread and filebeatRestarter scheduler to shutdown"); } } while (loopThread.isAlive() || !filebeatRestarter.isTerminated()); logger.info("Stopped"); } private void runLocalResumeScriptIfNeeded() { if (! resumeScriptRun) { addDebugMessage("Starting optional node program resume command"); dockerOperations.resumeNode(containerName); resumeScriptRun = true; } } private void updateNodeRepoWithCurrentAttributes(final ContainerNodeSpec nodeSpec) { final NodeAttributes nodeAttributes = new NodeAttributes() .withRestartGeneration(nodeSpec.wantedRestartGeneration.orElse(null)) .withRebootGeneration(nodeSpec.wantedRebootGeneration.orElse(0L)) .withDockerImage(nodeSpec.wantedDockerImage.filter(node -> containerState != ABSENT).orElse(new DockerImage(""))) .withVespaVersion(nodeSpec.wantedVespaVersion.filter(node -> containerState != ABSENT).orElse("")); publishStateToNodeRepoIfChanged(nodeAttributes); } private void publishStateToNodeRepoIfChanged(NodeAttributes currentAttributes) { if (!currentAttributes.equals(lastAttributesSet)) { logger.info("Publishing new set of attributes to node repo: " + lastAttributesSet + " -> " + currentAttributes); addDebugMessage("Publishing new set of attributes to node repo: {" + lastAttributesSet + "} -> {" + currentAttributes + "}"); nodeRepository.updateNodeAttributes(hostname, currentAttributes); lastAttributesSet = currentAttributes; } } private void startContainer(ContainerNodeSpec nodeSpec) { aclMaintainer.run(); dockerOperations.startContainer(containerName, nodeSpec); lastCpuMetric = new CpuUsageReporter(); currentFilebeatRestarter = filebeatRestarter.scheduleWithFixedDelay(() -> serviceRestarter.accept("filebeat"), 1, 1, TimeUnit.DAYS); storageMaintainer.writeMetricsConfig(containerName, nodeSpec); storageMaintainer.writeFilebeatConfig(containerName, nodeSpec); resumeScriptRun = false; containerState = UNKNOWN; logger.info("Container successfully started, new containerState is " + containerState); } private Optional<Container> removeContainerIfNeededUpdateContainerState(ContainerNodeSpec nodeSpec, Optional<Container> existingContainer) { return existingContainer .flatMap(container -> removeContainerIfNeeded(nodeSpec, container)) .map(container -> { shouldRestartServices(nodeSpec).ifPresent(restartReason -> { logger.info("Will restart services for container " + container + ": " + restartReason); restartServices(nodeSpec, container); }); return container; }); } private Optional<String> shouldRestartServices(ContainerNodeSpec nodeSpec) { if (!nodeSpec.wantedRestartGeneration.isPresent()) return Optional.empty(); if (!nodeSpec.currentRestartGeneration.isPresent() || nodeSpec.currentRestartGeneration.get() < nodeSpec.wantedRestartGeneration.get()) { return Optional.of("Restart requested - wanted restart generation has been bumped: " + nodeSpec.currentRestartGeneration.get() + " -> " + nodeSpec.wantedRestartGeneration.get()); } return Optional.empty(); } private void restartServices(ContainerNodeSpec nodeSpec, Container existingContainer) { if (existingContainer.state.isRunning() && nodeSpec.nodeState == Node.State.active) { ContainerName containerName = existingContainer.name; logger.info("Restarting services for " + containerName); orchestratorSuspendNode(); dockerOperations.restartVespaOnNode(containerName); } } @Override public void stopServices() { logger.info("Stopping services for " + containerName); dockerOperations.trySuspendNode(containerName); dockerOperations.stopServicesOnNode(containerName); } private Optional<String> shouldRemoveContainer(ContainerNodeSpec nodeSpec, Container existingContainer) { final Node.State nodeState = nodeSpec.nodeState; if (nodeState == Node.State.dirty || nodeState == Node.State.provisioned) { return Optional.of("Node in state " + nodeState + ", container should no longer be running"); } if (nodeSpec.wantedDockerImage.isPresent() && !nodeSpec.wantedDockerImage.get().equals(existingContainer.image)) { return Optional.of("The node is supposed to run a new Docker image: " + existingContainer + " -> " + nodeSpec.wantedDockerImage.get()); } if (!existingContainer.state.isRunning()) { return Optional.of("Container no longer running"); } ContainerResources wantedContainerResources = ContainerResources.from( nodeSpec.minCpuCores, nodeSpec.minMainMemoryAvailableGb); if (!wantedContainerResources.equals(existingContainer.resources)) { return Optional.of("Container should be running with different resource allocation, wanted: " + wantedContainerResources + ", actual: " + existingContainer.resources); } return Optional.empty(); } private Optional<Container> removeContainerIfNeeded(ContainerNodeSpec nodeSpec, Container existingContainer) { Optional<String> removeReason = shouldRemoveContainer(nodeSpec, existingContainer); if (removeReason.isPresent()) { logger.info("Will remove container " + existingContainer + ": " + removeReason.get()); if (existingContainer.state.isRunning()) { if (nodeSpec.nodeState == Node.State.active) { orchestratorSuspendNode(); } try { stopServices(); } catch (Exception e) { logger.info("Failed stopping services, ignoring", e); } } if (currentFilebeatRestarter != null) currentFilebeatRestarter.cancel(true); dockerOperations.removeContainer(existingContainer, nodeSpec); containerState = ABSENT; logger.info("Container successfully removed, new containerState is " + containerState); return Optional.empty(); } return Optional.of(existingContainer); } private void scheduleDownLoadIfNeeded(ContainerNodeSpec nodeSpec) { if (nodeSpec.currentDockerImage.equals(nodeSpec.wantedDockerImage)) return; if (dockerOperations.pullImageAsyncIfNeeded(nodeSpec.wantedDockerImage.get())) { imageBeingDownloaded = nodeSpec.wantedDockerImage.get(); } else if (imageBeingDownloaded != null) { imageBeingDownloaded = null; } } private void signalWorkToBeDone() { synchronized (monitor) { if (!workToDoNow) { workToDoNow = true; addDebugMessage("Signaling work to be done"); monitor.notifyAll(); } } } void tick() { boolean isFrozenCopy; synchronized (monitor) { while (!workToDoNow) { long remainder = timeBetweenEachConverge .minus(Duration.between(lastConverge, clock.instant())) .toMillis(); if (remainder > 0) { try { monitor.wait(remainder); } catch (InterruptedException e) { logger.error("Interrupted, but ignoring this: " + hostname); } } else break; } lastConverge = clock.instant(); workToDoNow = false; if (isFrozen != wantFrozen) { isFrozen = wantFrozen; logger.info("Updated NodeAgent's frozen state, new value: isFrozen: " + isFrozen); } isFrozenCopy = isFrozen; } if (isFrozenCopy) { addDebugMessage("tick: isFrozen"); } else { try { converge(); } catch (OrchestratorException e) { logger.info(e.getMessage()); addDebugMessage(e.getMessage()); } catch (DockerException e) { numberOfUnhandledException++; containerState = UNKNOWN; logger.error("Caught a DockerException, resetting containerState to " + containerState, e); } catch (Exception e) { numberOfUnhandledException++; logger.error("Unhandled exception, ignoring.", e); addDebugMessage(e.getMessage()); } catch (Throwable t) { logger.error("Unhandled throwable, taking down system.", t); System.exit(234); } } } @SuppressWarnings("unchecked") public void updateContainerNodeMetrics() { final ContainerNodeSpec nodeSpec = lastNodeSpec; if (nodeSpec == null || containerState == ABSENT) return; Optional<Docker.ContainerStats> containerStats = dockerOperations.getContainerStats(containerName); if (!containerStats.isPresent()) return; Dimensions.Builder dimensionsBuilder = new Dimensions.Builder() .add("host", hostname) .add("role", "tenants") .add("state", nodeSpec.nodeState.toString()) .add("parentHostname", environment.getParentHostHostname()); Dimensions dimensions = dimensionsBuilder.build(); Docker.ContainerStats stats = containerStats.get(); final String APP = MetricReceiverWrapper.APPLICATION_NODE; final int totalNumCpuCores = ((List<Number>) ((Map) stats.getCpuStats().get("cpu_usage")).get("percpu_usage")).size(); final long cpuContainerKernelTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("usage_in_kernelmode")).longValue(); final long cpuContainerTotalTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("total_usage")).longValue(); final long cpuSystemTotalTime = ((Number) stats.getCpuStats().get("system_cpu_usage")).longValue(); final long memoryTotalBytes = ((Number) stats.getMemoryStats().get("limit")).longValue(); final long memoryTotalBytesUsage = ((Number) stats.getMemoryStats().get("usage")).longValue(); final long memoryTotalBytesCache = ((Number) ((Map) stats.getMemoryStats().get("stats")).get("cache")).longValue(); final long diskTotalBytes = (long) (nodeSpec.minDiskAvailableGb * BYTES_IN_GB); final Optional<Long> diskTotalBytesUsed = storageMaintainer.getDiskUsageFor(containerName); lastCpuMetric.updateCpuDeltas(cpuSystemTotalTime, cpuContainerTotalTime, cpuContainerKernelTime); final double allocatedCpuRatio = nodeSpec.minCpuCores / totalNumCpuCores; double cpuUsageRatioOfAllocated = lastCpuMetric.getCpuUsageRatio() / allocatedCpuRatio; double cpuKernelUsageRatioOfAllocated = lastCpuMetric.getCpuKernelUsageRatio() / allocatedCpuRatio; long memoryTotalBytesUsed = memoryTotalBytesUsage - memoryTotalBytesCache; double memoryUsageRatio = (double) memoryTotalBytesUsed / memoryTotalBytes; Optional<Double> diskUsageRatio = diskTotalBytesUsed.map(used -> (double) used / diskTotalBytes); List<DimensionMetrics> metrics = new ArrayList<>(); DimensionMetrics.Builder systemMetricsBuilder = new DimensionMetrics.Builder(APP, dimensions) .withMetric("mem.limit", memoryTotalBytes) .withMetric("mem.used", memoryTotalBytesUsed) .withMetric("mem.util", 100 * memoryUsageRatio) .withMetric("cpu.util", 100 * cpuUsageRatioOfAllocated) .withMetric("cpu.sys.util", 100 * cpuKernelUsageRatioOfAllocated) .withMetric("disk.limit", diskTotalBytes); diskTotalBytesUsed.ifPresent(diskUsed -> systemMetricsBuilder.withMetric("disk.used", diskUsed)); diskUsageRatio.ifPresent(diskRatio -> systemMetricsBuilder.withMetric("disk.util", 100 * diskRatio)); metrics.add(systemMetricsBuilder.build()); stats.getNetworks().forEach((interfaceName, interfaceStats) -> { Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build(); Map<String, Number> infStats = (Map<String, Number>) interfaceStats; DimensionMetrics networkMetrics = new DimensionMetrics.Builder(APP, netDims) .withMetric("net.in.bytes", infStats.get("rx_bytes").longValue()) .withMetric("net.in.errors", infStats.get("rx_errors").longValue()) .withMetric("net.in.dropped", infStats.get("rx_dropped").longValue()) .withMetric("net.out.bytes", infStats.get("tx_bytes").longValue()) .withMetric("net.out.errors", infStats.get("tx_errors").longValue()) .withMetric("net.out.dropped", infStats.get("tx_dropped").longValue()) .build(); metrics.add(networkMetrics); }); pushMetricsToContainer(metrics); } private void pushMetricsToContainer(List<DimensionMetrics> metrics) { StringBuilder params = new StringBuilder(); try { for (DimensionMetrics dimensionMetrics : metrics) { params.append(dimensionMetrics.toSecretAgentReport()); } String wrappedMetrics = "s:" + params.toString(); String[] command = {"vespa-rpc-invoke", "-t", "2", "tcp/localhost:19091", "setExtraMetrics", wrappedMetrics}; dockerOperations.executeCommandInContainerAsRoot(containerName, 5L, command); } catch (DockerExecTimeoutException | JsonProcessingException e) { logger.warning("Unable to push metrics to container: " + containerName, e); } } private Optional<Container> getContainer() { if (containerState == ABSENT) return Optional.empty(); Optional<Container> container = dockerOperations.getContainer(containerName); if (! container.isPresent()) containerState = ABSENT; return container; } @Override public String getHostname() { return hostname; } @Override public boolean isDownloadingImage() { return imageBeingDownloaded != null; } @Override public int getAndResetNumberOfUnhandledExceptions() { int temp = numberOfUnhandledException; numberOfUnhandledException = 0; return temp; } class CpuUsageReporter { private long containerKernelUsage = 0; private long totalContainerUsage = 0; private long totalSystemUsage = 0; private long deltaContainerKernelUsage; private long deltaContainerUsage; private long deltaSystemUsage; private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) { deltaSystemUsage = this.totalSystemUsage == 0 ? 0 : (totalSystemUsage - this.totalSystemUsage); deltaContainerUsage = totalContainerUsage - this.totalContainerUsage; deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage; this.totalSystemUsage = totalSystemUsage; this.totalContainerUsage = totalContainerUsage; this.containerKernelUsage = containerKernelUsage; } /** * Returns the CPU usage ratio for the docker container that this NodeAgent is managing * in the time between the last two times updateCpuDeltas() was called. This is calculated * by dividing the CPU time used by the container with the CPU time used by the entire system. */ double getCpuUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerUsage / deltaSystemUsage; } double getCpuKernelUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerKernelUsage / deltaSystemUsage; } } private void orchestratorSuspendNode() { logger.info("Ask Orchestrator for permission to suspend node " + hostname); orchestrator.suspend(hostname); } }
class NodeAgentImpl implements NodeAgent { private static final long BYTES_IN_GB = 1_000_000_000L; private final AtomicBoolean terminated = new AtomicBoolean(false); private boolean isFrozen = true; private boolean wantFrozen = false; private boolean workToDoNow = true; private boolean expectNodeNotInNodeRepo = false; private final Object monitor = new Object(); private final PrefixLogger logger; private DockerImage imageBeingDownloaded = null; private final ContainerName containerName; private final String hostname; private final NodeRepository nodeRepository; private final Orchestrator orchestrator; private final DockerOperations dockerOperations; private final StorageMaintainer storageMaintainer; private final Runnable aclMaintainer; private final Environment environment; private final Clock clock; private final Duration timeBetweenEachConverge; private final SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); private final LinkedList<String> debugMessages = new LinkedList<>(); private int numberOfUnhandledException = 0; private Instant lastConverge; private final Thread loopThread; private final ScheduledExecutorService filebeatRestarter = Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("filebeatrestarter")); private Consumer<String> serviceRestarter; private Future<?> currentFilebeatRestarter; private boolean resumeScriptRun = false; /** * ABSENT means container is definitely absent - A container that was absent will not suddenly appear without * NodeAgent explicitly starting it. * Otherwise we can't be certain. A container that was running a minute ago may no longer be running without * NodeAgent doing anything (container could have crashed). Therefore we always have to ask docker daemon * to get updated state of the container. */ enum ContainerState { ABSENT, UNKNOWN } private ContainerState containerState = UNKNOWN; private NodeAttributes lastAttributesSet = null; private ContainerNodeSpec lastNodeSpec = null; private CpuUsageReporter lastCpuMetric = new CpuUsageReporter(); public NodeAgentImpl( final String hostName, final NodeRepository nodeRepository, final Orchestrator orchestrator, final DockerOperations dockerOperations, final StorageMaintainer storageMaintainer, final Runnable aclMaintainer, final Environment environment, final Clock clock, final Duration timeBetweenEachConverge) { this.containerName = ContainerName.fromHostname(hostName); this.logger = PrefixLogger.getNodeAgentLogger(NodeAgentImpl.class, containerName); this.hostname = hostName; this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.dockerOperations = dockerOperations; this.storageMaintainer = storageMaintainer; this.aclMaintainer = aclMaintainer; this.environment = environment; this.clock = clock; this.timeBetweenEachConverge = timeBetweenEachConverge; this.lastConverge = clock.instant(); this.loopThread = new Thread(() -> { while (!terminated.get()) tick(); }); this.loopThread.setName("tick-" + hostname); } @Override public boolean setFrozen(boolean frozen) { synchronized (monitor) { if (wantFrozen != frozen) { wantFrozen = frozen; addDebugMessage(wantFrozen ? "Freezing" : "Unfreezing"); signalWorkToBeDone(); } return isFrozen == frozen; } } private void addDebugMessage(String message) { synchronized (debugMessages) { while (debugMessages.size() > 1000) { debugMessages.pop(); } logger.debug(message); debugMessages.add("[" + sdf.format(new Date()) + "] " + message); } } @Override public Map<String, Object> debugInfo() { Map<String, Object> debug = new LinkedHashMap<>(); debug.put("Hostname", hostname); debug.put("isFrozen", isFrozen); debug.put("wantFrozen", wantFrozen); debug.put("terminated", terminated); debug.put("workToDoNow", workToDoNow); synchronized (debugMessages) { debug.put("History", new LinkedList<>(debugMessages)); } debug.put("Node repo state", lastNodeSpec.nodeState.name()); return debug; } @Override public void start() { String message = "Starting with interval " + timeBetweenEachConverge.toMillis() + " ms"; logger.info(message); addDebugMessage(message); loopThread.start(); serviceRestarter = service -> { try { ProcessResult processResult = dockerOperations.executeCommandInContainerAsRoot( containerName, "service", service, "restart"); if (!processResult.isSuccess()) { logger.error("Failed to restart service " + service + ": " + processResult); } } catch (Exception e) { logger.error("Failed to restart service " + service, e); } }; } @Override public void stop() { addDebugMessage("Stopping"); filebeatRestarter.shutdown(); if (!terminated.compareAndSet(false, true)) { throw new RuntimeException("Can not re-stop a node agent."); } signalWorkToBeDone(); do { try { loopThread.join(); filebeatRestarter.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e) { logger.error("Interrupted while waiting for converge thread and filebeatRestarter scheduler to shutdown"); } } while (loopThread.isAlive() || !filebeatRestarter.isTerminated()); logger.info("Stopped"); } private void runLocalResumeScriptIfNeeded() { if (! resumeScriptRun) { addDebugMessage("Starting optional node program resume command"); dockerOperations.resumeNode(containerName); resumeScriptRun = true; } } private void updateNodeRepoWithCurrentAttributes(final ContainerNodeSpec nodeSpec) { final NodeAttributes nodeAttributes = new NodeAttributes() .withRestartGeneration(nodeSpec.wantedRestartGeneration.orElse(null)) .withRebootGeneration(nodeSpec.wantedRebootGeneration.orElse(0L)) .withDockerImage(nodeSpec.wantedDockerImage.filter(node -> containerState != ABSENT).orElse(new DockerImage(""))) .withVespaVersion(nodeSpec.wantedVespaVersion.filter(node -> containerState != ABSENT).orElse("")); publishStateToNodeRepoIfChanged(nodeAttributes); } private void publishStateToNodeRepoIfChanged(NodeAttributes currentAttributes) { if (!currentAttributes.equals(lastAttributesSet)) { logger.info("Publishing new set of attributes to node repo: " + lastAttributesSet + " -> " + currentAttributes); addDebugMessage("Publishing new set of attributes to node repo: {" + lastAttributesSet + "} -> {" + currentAttributes + "}"); nodeRepository.updateNodeAttributes(hostname, currentAttributes); lastAttributesSet = currentAttributes; } } private void startContainer(ContainerNodeSpec nodeSpec) { aclMaintainer.run(); dockerOperations.startContainer(containerName, nodeSpec); lastCpuMetric = new CpuUsageReporter(); currentFilebeatRestarter = filebeatRestarter.scheduleWithFixedDelay(() -> serviceRestarter.accept("filebeat"), 1, 1, TimeUnit.DAYS); storageMaintainer.writeMetricsConfig(containerName, nodeSpec); storageMaintainer.writeFilebeatConfig(containerName, nodeSpec); resumeScriptRun = false; containerState = UNKNOWN; logger.info("Container successfully started, new containerState is " + containerState); } private Optional<Container> removeContainerIfNeededUpdateContainerState(ContainerNodeSpec nodeSpec, Optional<Container> existingContainer) { return existingContainer .flatMap(container -> removeContainerIfNeeded(nodeSpec, container)) .map(container -> { shouldRestartServices(nodeSpec).ifPresent(restartReason -> { logger.info("Will restart services for container " + container + ": " + restartReason); restartServices(nodeSpec, container); }); return container; }); } private Optional<String> shouldRestartServices(ContainerNodeSpec nodeSpec) { if (!nodeSpec.wantedRestartGeneration.isPresent()) return Optional.empty(); if (!nodeSpec.currentRestartGeneration.isPresent() || nodeSpec.currentRestartGeneration.get() < nodeSpec.wantedRestartGeneration.get()) { return Optional.of("Restart requested - wanted restart generation has been bumped: " + nodeSpec.currentRestartGeneration.get() + " -> " + nodeSpec.wantedRestartGeneration.get()); } return Optional.empty(); } private void restartServices(ContainerNodeSpec nodeSpec, Container existingContainer) { if (existingContainer.state.isRunning() && nodeSpec.nodeState == Node.State.active) { ContainerName containerName = existingContainer.name; logger.info("Restarting services for " + containerName); orchestratorSuspendNode(); dockerOperations.restartVespaOnNode(containerName); } } @Override public void stopServices() { logger.info("Stopping services for " + containerName); dockerOperations.trySuspendNode(containerName); dockerOperations.stopServicesOnNode(containerName); } private Optional<String> shouldRemoveContainer(ContainerNodeSpec nodeSpec, Container existingContainer) { final Node.State nodeState = nodeSpec.nodeState; if (nodeState == Node.State.dirty || nodeState == Node.State.provisioned) { return Optional.of("Node in state " + nodeState + ", container should no longer be running"); } if (nodeSpec.wantedDockerImage.isPresent() && !nodeSpec.wantedDockerImage.get().equals(existingContainer.image)) { return Optional.of("The node is supposed to run a new Docker image: " + existingContainer + " -> " + nodeSpec.wantedDockerImage.get()); } if (!existingContainer.state.isRunning()) { return Optional.of("Container no longer running"); } ContainerResources wantedContainerResources = ContainerResources.from( nodeSpec.minCpuCores, nodeSpec.minMainMemoryAvailableGb); if (!wantedContainerResources.equals(existingContainer.resources)) { return Optional.of("Container should be running with different resource allocation, wanted: " + wantedContainerResources + ", actual: " + existingContainer.resources); } return Optional.empty(); } private Optional<Container> removeContainerIfNeeded(ContainerNodeSpec nodeSpec, Container existingContainer) { Optional<String> removeReason = shouldRemoveContainer(nodeSpec, existingContainer); if (removeReason.isPresent()) { logger.info("Will remove container " + existingContainer + ": " + removeReason.get()); if (existingContainer.state.isRunning()) { if (nodeSpec.nodeState == Node.State.active) { orchestratorSuspendNode(); } try { stopServices(); } catch (Exception e) { logger.info("Failed stopping services, ignoring", e); } } if (currentFilebeatRestarter != null) currentFilebeatRestarter.cancel(true); dockerOperations.removeContainer(existingContainer, nodeSpec); containerState = ABSENT; logger.info("Container successfully removed, new containerState is " + containerState); return Optional.empty(); } return Optional.of(existingContainer); } private void scheduleDownLoadIfNeeded(ContainerNodeSpec nodeSpec) { if (nodeSpec.currentDockerImage.equals(nodeSpec.wantedDockerImage)) return; if (dockerOperations.pullImageAsyncIfNeeded(nodeSpec.wantedDockerImage.get())) { imageBeingDownloaded = nodeSpec.wantedDockerImage.get(); } else if (imageBeingDownloaded != null) { imageBeingDownloaded = null; } } private void signalWorkToBeDone() { synchronized (monitor) { if (!workToDoNow) { workToDoNow = true; addDebugMessage("Signaling work to be done"); monitor.notifyAll(); } } } void tick() { boolean isFrozenCopy; synchronized (monitor) { while (!workToDoNow) { long remainder = timeBetweenEachConverge .minus(Duration.between(lastConverge, clock.instant())) .toMillis(); if (remainder > 0) { try { monitor.wait(remainder); } catch (InterruptedException e) { logger.error("Interrupted, but ignoring this: " + hostname); } } else break; } lastConverge = clock.instant(); workToDoNow = false; if (isFrozen != wantFrozen) { isFrozen = wantFrozen; logger.info("Updated NodeAgent's frozen state, new value: isFrozen: " + isFrozen); } isFrozenCopy = isFrozen; } if (isFrozenCopy) { addDebugMessage("tick: isFrozen"); } else { try { converge(); } catch (OrchestratorException e) { logger.info(e.getMessage()); addDebugMessage(e.getMessage()); } catch (DockerException e) { numberOfUnhandledException++; containerState = UNKNOWN; logger.error("Caught a DockerException, resetting containerState to " + containerState, e); } catch (Exception e) { numberOfUnhandledException++; logger.error("Unhandled exception, ignoring.", e); addDebugMessage(e.getMessage()); } catch (Throwable t) { logger.error("Unhandled throwable, taking down system.", t); System.exit(234); } } } @SuppressWarnings("unchecked") public void updateContainerNodeMetrics() { final ContainerNodeSpec nodeSpec = lastNodeSpec; if (nodeSpec == null || containerState == ABSENT) return; Optional<Docker.ContainerStats> containerStats = dockerOperations.getContainerStats(containerName); if (!containerStats.isPresent()) return; Dimensions.Builder dimensionsBuilder = new Dimensions.Builder() .add("host", hostname) .add("role", "tenants") .add("state", nodeSpec.nodeState.toString()) .add("parentHostname", environment.getParentHostHostname()); Dimensions dimensions = dimensionsBuilder.build(); Docker.ContainerStats stats = containerStats.get(); final String APP = MetricReceiverWrapper.APPLICATION_NODE; final int totalNumCpuCores = ((List<Number>) ((Map) stats.getCpuStats().get("cpu_usage")).get("percpu_usage")).size(); final long cpuContainerKernelTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("usage_in_kernelmode")).longValue(); final long cpuContainerTotalTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("total_usage")).longValue(); final long cpuSystemTotalTime = ((Number) stats.getCpuStats().get("system_cpu_usage")).longValue(); final long memoryTotalBytes = ((Number) stats.getMemoryStats().get("limit")).longValue(); final long memoryTotalBytesUsage = ((Number) stats.getMemoryStats().get("usage")).longValue(); final long memoryTotalBytesCache = ((Number) ((Map) stats.getMemoryStats().get("stats")).get("cache")).longValue(); final long diskTotalBytes = (long) (nodeSpec.minDiskAvailableGb * BYTES_IN_GB); final Optional<Long> diskTotalBytesUsed = storageMaintainer.getDiskUsageFor(containerName); lastCpuMetric.updateCpuDeltas(cpuSystemTotalTime, cpuContainerTotalTime, cpuContainerKernelTime); final double allocatedCpuRatio = nodeSpec.minCpuCores / totalNumCpuCores; double cpuUsageRatioOfAllocated = lastCpuMetric.getCpuUsageRatio() / allocatedCpuRatio; double cpuKernelUsageRatioOfAllocated = lastCpuMetric.getCpuKernelUsageRatio() / allocatedCpuRatio; long memoryTotalBytesUsed = memoryTotalBytesUsage - memoryTotalBytesCache; double memoryUsageRatio = (double) memoryTotalBytesUsed / memoryTotalBytes; Optional<Double> diskUsageRatio = diskTotalBytesUsed.map(used -> (double) used / diskTotalBytes); List<DimensionMetrics> metrics = new ArrayList<>(); DimensionMetrics.Builder systemMetricsBuilder = new DimensionMetrics.Builder(APP, dimensions) .withMetric("mem.limit", memoryTotalBytes) .withMetric("mem.used", memoryTotalBytesUsed) .withMetric("mem.util", 100 * memoryUsageRatio) .withMetric("cpu.util", 100 * cpuUsageRatioOfAllocated) .withMetric("cpu.sys.util", 100 * cpuKernelUsageRatioOfAllocated) .withMetric("disk.limit", diskTotalBytes); diskTotalBytesUsed.ifPresent(diskUsed -> systemMetricsBuilder.withMetric("disk.used", diskUsed)); diskUsageRatio.ifPresent(diskRatio -> systemMetricsBuilder.withMetric("disk.util", 100 * diskRatio)); metrics.add(systemMetricsBuilder.build()); stats.getNetworks().forEach((interfaceName, interfaceStats) -> { Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build(); Map<String, Number> infStats = (Map<String, Number>) interfaceStats; DimensionMetrics networkMetrics = new DimensionMetrics.Builder(APP, netDims) .withMetric("net.in.bytes", infStats.get("rx_bytes").longValue()) .withMetric("net.in.errors", infStats.get("rx_errors").longValue()) .withMetric("net.in.dropped", infStats.get("rx_dropped").longValue()) .withMetric("net.out.bytes", infStats.get("tx_bytes").longValue()) .withMetric("net.out.errors", infStats.get("tx_errors").longValue()) .withMetric("net.out.dropped", infStats.get("tx_dropped").longValue()) .build(); metrics.add(networkMetrics); }); pushMetricsToContainer(metrics); } private void pushMetricsToContainer(List<DimensionMetrics> metrics) { StringBuilder params = new StringBuilder(); try { for (DimensionMetrics dimensionMetrics : metrics) { params.append(dimensionMetrics.toSecretAgentReport()); } String wrappedMetrics = "s:" + params.toString(); String[] command = {"vespa-rpc-invoke", "-t", "2", "tcp/localhost:19091", "setExtraMetrics", wrappedMetrics}; dockerOperations.executeCommandInContainerAsRoot(containerName, 5L, command); } catch (DockerExecTimeoutException | JsonProcessingException e) { logger.warning("Unable to push metrics to container: " + containerName, e); } } private Optional<Container> getContainer() { if (containerState == ABSENT) return Optional.empty(); Optional<Container> container = dockerOperations.getContainer(containerName); if (! container.isPresent()) containerState = ABSENT; return container; } @Override public String getHostname() { return hostname; } @Override public boolean isDownloadingImage() { return imageBeingDownloaded != null; } @Override public int getAndResetNumberOfUnhandledExceptions() { int temp = numberOfUnhandledException; numberOfUnhandledException = 0; return temp; } class CpuUsageReporter { private long containerKernelUsage = 0; private long totalContainerUsage = 0; private long totalSystemUsage = 0; private long deltaContainerKernelUsage; private long deltaContainerUsage; private long deltaSystemUsage; private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) { deltaSystemUsage = this.totalSystemUsage == 0 ? 0 : (totalSystemUsage - this.totalSystemUsage); deltaContainerUsage = totalContainerUsage - this.totalContainerUsage; deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage; this.totalSystemUsage = totalSystemUsage; this.totalContainerUsage = totalContainerUsage; this.containerKernelUsage = containerKernelUsage; } /** * Returns the CPU usage ratio for the docker container that this NodeAgent is managing * in the time between the last two times updateCpuDeltas() was called. This is calculated * by dividing the CPU time used by the container with the CPU time used by the entire system. */ double getCpuUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerUsage / deltaSystemUsage; } double getCpuKernelUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerKernelUsage / deltaSystemUsage; } } private void orchestratorSuspendNode() { logger.info("Ask Orchestrator for permission to suspend node " + hostname); orchestrator.suspend(hostname); } }
ok, I guess it's fine.
void converge() { final Optional<ContainerNodeSpec> nodeSpecOptional = nodeRepository.getContainerNodeSpec(hostname); if (!nodeSpecOptional.isPresent() && expectNodeNotInNodeRepo) return; final ContainerNodeSpec nodeSpec = nodeSpecOptional.orElseThrow(() -> new IllegalStateException(String.format("Node '%s' missing from node repository.", hostname))); expectNodeNotInNodeRepo = false; Optional<Container> container = getContainer(); if (!nodeSpec.equals(lastNodeSpec)) { addDebugMessage("Loading new node spec: " + nodeSpec.toString()); lastNodeSpec = nodeSpec; if (container.isPresent()) { storageMaintainer.writeMetricsConfig(containerName, nodeSpec); } } switch (nodeSpec.nodeState) { case ready: case reserved: case parked: case failed: removeContainerIfNeededUpdateContainerState(nodeSpec, container); updateNodeRepoWithCurrentAttributes(nodeSpec); break; case active: storageMaintainer.handleCoreDumpsForContainer(containerName, nodeSpec, false); storageMaintainer.getDiskUsageFor(containerName) .map(diskUsage -> (double) diskUsage / BYTES_IN_GB / nodeSpec.minDiskAvailableGb) .filter(diskUtil -> diskUtil >= 0.8) .ifPresent(diskUtil -> storageMaintainer.removeOldFilesFromNode(containerName)); scheduleDownLoadIfNeeded(nodeSpec); if (isDownloadingImage()) { addDebugMessage("Waiting for image to download " + imageBeingDownloaded.asString()); return; } container = removeContainerIfNeededUpdateContainerState(nodeSpec, container); if (! container.isPresent()) { storageMaintainer.handleCoreDumpsForContainer(containerName, nodeSpec, false); startContainer(nodeSpec); } runLocalResumeScriptIfNeeded(); updateNodeRepoWithCurrentAttributes(nodeSpec); logger.info("Call resume against Orchestrator"); orchestrator.resume(hostname); break; case inactive: removeContainerIfNeededUpdateContainerState(nodeSpec, container); updateNodeRepoWithCurrentAttributes(nodeSpec); break; case provisioned: nodeRepository.markAsDirty(hostname); break; case dirty: removeContainerIfNeededUpdateContainerState(nodeSpec, container); logger.info("State is " + nodeSpec.nodeState + ", will delete application storage and mark node as ready"); storageMaintainer.cleanupNodeStorage(containerName, nodeSpec); updateNodeRepoWithCurrentAttributes(nodeSpec); nodeRepository.markNodeAvailableForNewAllocation(hostname); expectNodeNotInNodeRepo = true; break; default: throw new RuntimeException("UNKNOWN STATE " + nodeSpec.nodeState.name()); } }
if (!nodeSpecOptional.isPresent() && expectNodeNotInNodeRepo) return;
void converge() { final Optional<ContainerNodeSpec> nodeSpecOptional = nodeRepository.getContainerNodeSpec(hostname); if (!nodeSpecOptional.isPresent() && expectNodeNotInNodeRepo) return; final ContainerNodeSpec nodeSpec = nodeSpecOptional.orElseThrow(() -> new IllegalStateException(String.format("Node '%s' missing from node repository.", hostname))); expectNodeNotInNodeRepo = false; Optional<Container> container = getContainer(); if (!nodeSpec.equals(lastNodeSpec)) { addDebugMessage("Loading new node spec: " + nodeSpec.toString()); lastNodeSpec = nodeSpec; if (container.isPresent()) { storageMaintainer.writeMetricsConfig(containerName, nodeSpec); } } switch (nodeSpec.nodeState) { case ready: case reserved: case parked: case failed: removeContainerIfNeededUpdateContainerState(nodeSpec, container); updateNodeRepoWithCurrentAttributes(nodeSpec); break; case active: storageMaintainer.handleCoreDumpsForContainer(containerName, nodeSpec, false); storageMaintainer.getDiskUsageFor(containerName) .map(diskUsage -> (double) diskUsage / BYTES_IN_GB / nodeSpec.minDiskAvailableGb) .filter(diskUtil -> diskUtil >= 0.8) .ifPresent(diskUtil -> storageMaintainer.removeOldFilesFromNode(containerName)); scheduleDownLoadIfNeeded(nodeSpec); if (isDownloadingImage()) { addDebugMessage("Waiting for image to download " + imageBeingDownloaded.asString()); return; } container = removeContainerIfNeededUpdateContainerState(nodeSpec, container); if (! container.isPresent()) { storageMaintainer.handleCoreDumpsForContainer(containerName, nodeSpec, false); startContainer(nodeSpec); } runLocalResumeScriptIfNeeded(); updateNodeRepoWithCurrentAttributes(nodeSpec); logger.info("Call resume against Orchestrator"); orchestrator.resume(hostname); break; case inactive: removeContainerIfNeededUpdateContainerState(nodeSpec, container); updateNodeRepoWithCurrentAttributes(nodeSpec); break; case provisioned: nodeRepository.markAsDirty(hostname); break; case dirty: removeContainerIfNeededUpdateContainerState(nodeSpec, container); logger.info("State is " + nodeSpec.nodeState + ", will delete application storage and mark node as ready"); storageMaintainer.cleanupNodeStorage(containerName, nodeSpec); updateNodeRepoWithCurrentAttributes(nodeSpec); nodeRepository.markNodeAvailableForNewAllocation(hostname); expectNodeNotInNodeRepo = true; break; default: throw new RuntimeException("UNKNOWN STATE " + nodeSpec.nodeState.name()); } }
class NodeAgentImpl implements NodeAgent { private static final long BYTES_IN_GB = 1_000_000_000L; private final AtomicBoolean terminated = new AtomicBoolean(false); private boolean isFrozen = true; private boolean wantFrozen = false; private boolean workToDoNow = true; private boolean expectNodeNotInNodeRepo = false; private final Object monitor = new Object(); private final PrefixLogger logger; private DockerImage imageBeingDownloaded = null; private final ContainerName containerName; private final String hostname; private final NodeRepository nodeRepository; private final Orchestrator orchestrator; private final DockerOperations dockerOperations; private final StorageMaintainer storageMaintainer; private final Runnable aclMaintainer; private final Environment environment; private final Clock clock; private final Duration timeBetweenEachConverge; private final SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); private final LinkedList<String> debugMessages = new LinkedList<>(); private int numberOfUnhandledException = 0; private Instant lastConverge; private final Thread loopThread; private final ScheduledExecutorService filebeatRestarter = Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("filebeatrestarter")); private Consumer<String> serviceRestarter; private Future<?> currentFilebeatRestarter; private boolean resumeScriptRun = false; /** * ABSENT means container is definitely absent - A container that was absent will not suddenly appear without * NodeAgent explicitly starting it. * Otherwise we can't be certain. A container that was running a minute ago may no longer be running without * NodeAgent doing anything (container could have crashed). Therefore we always have to ask docker daemon * to get updated state of the container. */ enum ContainerState { ABSENT, UNKNOWN } private ContainerState containerState = UNKNOWN; private NodeAttributes lastAttributesSet = null; private ContainerNodeSpec lastNodeSpec = null; private CpuUsageReporter lastCpuMetric = new CpuUsageReporter(); public NodeAgentImpl( final String hostName, final NodeRepository nodeRepository, final Orchestrator orchestrator, final DockerOperations dockerOperations, final StorageMaintainer storageMaintainer, final Runnable aclMaintainer, final Environment environment, final Clock clock, final Duration timeBetweenEachConverge) { this.containerName = ContainerName.fromHostname(hostName); this.logger = PrefixLogger.getNodeAgentLogger(NodeAgentImpl.class, containerName); this.hostname = hostName; this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.dockerOperations = dockerOperations; this.storageMaintainer = storageMaintainer; this.aclMaintainer = aclMaintainer; this.environment = environment; this.clock = clock; this.timeBetweenEachConverge = timeBetweenEachConverge; this.lastConverge = clock.instant(); this.loopThread = new Thread(() -> { while (!terminated.get()) tick(); }); this.loopThread.setName("tick-" + hostname); } @Override public boolean setFrozen(boolean frozen) { synchronized (monitor) { if (wantFrozen != frozen) { wantFrozen = frozen; addDebugMessage(wantFrozen ? "Freezing" : "Unfreezing"); signalWorkToBeDone(); } return isFrozen == frozen; } } private void addDebugMessage(String message) { synchronized (debugMessages) { while (debugMessages.size() > 1000) { debugMessages.pop(); } logger.debug(message); debugMessages.add("[" + sdf.format(new Date()) + "] " + message); } } @Override public Map<String, Object> debugInfo() { Map<String, Object> debug = new LinkedHashMap<>(); debug.put("Hostname", hostname); debug.put("isFrozen", isFrozen); debug.put("wantFrozen", wantFrozen); debug.put("terminated", terminated); debug.put("workToDoNow", workToDoNow); synchronized (debugMessages) { debug.put("History", new LinkedList<>(debugMessages)); } debug.put("Node repo state", lastNodeSpec.nodeState.name()); return debug; } @Override public void start() { String message = "Starting with interval " + timeBetweenEachConverge.toMillis() + " ms"; logger.info(message); addDebugMessage(message); loopThread.start(); serviceRestarter = service -> { try { ProcessResult processResult = dockerOperations.executeCommandInContainerAsRoot( containerName, "service", service, "restart"); if (!processResult.isSuccess()) { logger.error("Failed to restart service " + service + ": " + processResult); } } catch (Exception e) { logger.error("Failed to restart service " + service, e); } }; } @Override public void stop() { addDebugMessage("Stopping"); filebeatRestarter.shutdown(); if (!terminated.compareAndSet(false, true)) { throw new RuntimeException("Can not re-stop a node agent."); } signalWorkToBeDone(); do { try { loopThread.join(); filebeatRestarter.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e) { logger.error("Interrupted while waiting for converge thread and filebeatRestarter scheduler to shutdown"); } } while (loopThread.isAlive() || !filebeatRestarter.isTerminated()); logger.info("Stopped"); } private void runLocalResumeScriptIfNeeded() { if (! resumeScriptRun) { addDebugMessage("Starting optional node program resume command"); dockerOperations.resumeNode(containerName); resumeScriptRun = true; } } private void updateNodeRepoWithCurrentAttributes(final ContainerNodeSpec nodeSpec) { final NodeAttributes nodeAttributes = new NodeAttributes() .withRestartGeneration(nodeSpec.wantedRestartGeneration.orElse(null)) .withRebootGeneration(nodeSpec.wantedRebootGeneration.orElse(0L)) .withDockerImage(nodeSpec.wantedDockerImage.filter(node -> containerState != ABSENT).orElse(new DockerImage(""))) .withVespaVersion(nodeSpec.wantedVespaVersion.filter(node -> containerState != ABSENT).orElse("")); publishStateToNodeRepoIfChanged(nodeAttributes); } private void publishStateToNodeRepoIfChanged(NodeAttributes currentAttributes) { if (!currentAttributes.equals(lastAttributesSet)) { logger.info("Publishing new set of attributes to node repo: " + lastAttributesSet + " -> " + currentAttributes); addDebugMessage("Publishing new set of attributes to node repo: {" + lastAttributesSet + "} -> {" + currentAttributes + "}"); nodeRepository.updateNodeAttributes(hostname, currentAttributes); lastAttributesSet = currentAttributes; } } private void startContainer(ContainerNodeSpec nodeSpec) { aclMaintainer.run(); dockerOperations.startContainer(containerName, nodeSpec); lastCpuMetric = new CpuUsageReporter(); currentFilebeatRestarter = filebeatRestarter.scheduleWithFixedDelay(() -> serviceRestarter.accept("filebeat"), 1, 1, TimeUnit.DAYS); storageMaintainer.writeMetricsConfig(containerName, nodeSpec); storageMaintainer.writeFilebeatConfig(containerName, nodeSpec); resumeScriptRun = false; containerState = UNKNOWN; logger.info("Container successfully started, new containerState is " + containerState); } private Optional<Container> removeContainerIfNeededUpdateContainerState(ContainerNodeSpec nodeSpec, Optional<Container> existingContainer) { return existingContainer .flatMap(container -> removeContainerIfNeeded(nodeSpec, container)) .map(container -> { shouldRestartServices(nodeSpec).ifPresent(restartReason -> { logger.info("Will restart services for container " + container + ": " + restartReason); restartServices(nodeSpec, container); }); return container; }); } private Optional<String> shouldRestartServices(ContainerNodeSpec nodeSpec) { if (!nodeSpec.wantedRestartGeneration.isPresent()) return Optional.empty(); if (!nodeSpec.currentRestartGeneration.isPresent() || nodeSpec.currentRestartGeneration.get() < nodeSpec.wantedRestartGeneration.get()) { return Optional.of("Restart requested - wanted restart generation has been bumped: " + nodeSpec.currentRestartGeneration.get() + " -> " + nodeSpec.wantedRestartGeneration.get()); } return Optional.empty(); } private void restartServices(ContainerNodeSpec nodeSpec, Container existingContainer) { if (existingContainer.state.isRunning() && nodeSpec.nodeState == Node.State.active) { ContainerName containerName = existingContainer.name; logger.info("Restarting services for " + containerName); orchestratorSuspendNode(); dockerOperations.restartVespaOnNode(containerName); } } @Override public void stopServices() { logger.info("Stopping services for " + containerName); dockerOperations.trySuspendNode(containerName); dockerOperations.stopServicesOnNode(containerName); } private Optional<String> shouldRemoveContainer(ContainerNodeSpec nodeSpec, Container existingContainer) { final Node.State nodeState = nodeSpec.nodeState; if (nodeState == Node.State.dirty || nodeState == Node.State.provisioned) { return Optional.of("Node in state " + nodeState + ", container should no longer be running"); } if (nodeSpec.wantedDockerImage.isPresent() && !nodeSpec.wantedDockerImage.get().equals(existingContainer.image)) { return Optional.of("The node is supposed to run a new Docker image: " + existingContainer + " -> " + nodeSpec.wantedDockerImage.get()); } if (!existingContainer.state.isRunning()) { return Optional.of("Container no longer running"); } ContainerResources wantedContainerResources = ContainerResources.from( nodeSpec.minCpuCores, nodeSpec.minMainMemoryAvailableGb); if (!wantedContainerResources.equals(existingContainer.resources)) { return Optional.of("Container should be running with different resource allocation, wanted: " + wantedContainerResources + ", actual: " + existingContainer.resources); } return Optional.empty(); } private Optional<Container> removeContainerIfNeeded(ContainerNodeSpec nodeSpec, Container existingContainer) { Optional<String> removeReason = shouldRemoveContainer(nodeSpec, existingContainer); if (removeReason.isPresent()) { logger.info("Will remove container " + existingContainer + ": " + removeReason.get()); if (existingContainer.state.isRunning()) { if (nodeSpec.nodeState == Node.State.active) { orchestratorSuspendNode(); } try { stopServices(); } catch (Exception e) { logger.info("Failed stopping services, ignoring", e); } } if (currentFilebeatRestarter != null) currentFilebeatRestarter.cancel(true); dockerOperations.removeContainer(existingContainer, nodeSpec); containerState = ABSENT; logger.info("Container successfully removed, new containerState is " + containerState); return Optional.empty(); } return Optional.of(existingContainer); } private void scheduleDownLoadIfNeeded(ContainerNodeSpec nodeSpec) { if (nodeSpec.currentDockerImage.equals(nodeSpec.wantedDockerImage)) return; if (dockerOperations.pullImageAsyncIfNeeded(nodeSpec.wantedDockerImage.get())) { imageBeingDownloaded = nodeSpec.wantedDockerImage.get(); } else if (imageBeingDownloaded != null) { imageBeingDownloaded = null; } } private void signalWorkToBeDone() { synchronized (monitor) { if (!workToDoNow) { workToDoNow = true; addDebugMessage("Signaling work to be done"); monitor.notifyAll(); } } } void tick() { boolean isFrozenCopy; synchronized (monitor) { while (!workToDoNow) { long remainder = timeBetweenEachConverge .minus(Duration.between(lastConverge, clock.instant())) .toMillis(); if (remainder > 0) { try { monitor.wait(remainder); } catch (InterruptedException e) { logger.error("Interrupted, but ignoring this: " + hostname); } } else break; } lastConverge = clock.instant(); workToDoNow = false; if (isFrozen != wantFrozen) { isFrozen = wantFrozen; logger.info("Updated NodeAgent's frozen state, new value: isFrozen: " + isFrozen); } isFrozenCopy = isFrozen; } if (isFrozenCopy) { addDebugMessage("tick: isFrozen"); } else { try { converge(); } catch (OrchestratorException e) { logger.info(e.getMessage()); addDebugMessage(e.getMessage()); } catch (DockerException e) { numberOfUnhandledException++; containerState = UNKNOWN; logger.error("Caught a DockerException, resetting containerState to " + containerState, e); } catch (Exception e) { numberOfUnhandledException++; logger.error("Unhandled exception, ignoring.", e); addDebugMessage(e.getMessage()); } catch (Throwable t) { logger.error("Unhandled throwable, taking down system.", t); System.exit(234); } } } @SuppressWarnings("unchecked") public void updateContainerNodeMetrics() { final ContainerNodeSpec nodeSpec = lastNodeSpec; if (nodeSpec == null || containerState == ABSENT) return; Optional<Docker.ContainerStats> containerStats = dockerOperations.getContainerStats(containerName); if (!containerStats.isPresent()) return; Dimensions.Builder dimensionsBuilder = new Dimensions.Builder() .add("host", hostname) .add("role", "tenants") .add("state", nodeSpec.nodeState.toString()) .add("parentHostname", environment.getParentHostHostname()); Dimensions dimensions = dimensionsBuilder.build(); Docker.ContainerStats stats = containerStats.get(); final String APP = MetricReceiverWrapper.APPLICATION_NODE; final int totalNumCpuCores = ((List<Number>) ((Map) stats.getCpuStats().get("cpu_usage")).get("percpu_usage")).size(); final long cpuContainerKernelTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("usage_in_kernelmode")).longValue(); final long cpuContainerTotalTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("total_usage")).longValue(); final long cpuSystemTotalTime = ((Number) stats.getCpuStats().get("system_cpu_usage")).longValue(); final long memoryTotalBytes = ((Number) stats.getMemoryStats().get("limit")).longValue(); final long memoryTotalBytesUsage = ((Number) stats.getMemoryStats().get("usage")).longValue(); final long memoryTotalBytesCache = ((Number) ((Map) stats.getMemoryStats().get("stats")).get("cache")).longValue(); final long diskTotalBytes = (long) (nodeSpec.minDiskAvailableGb * BYTES_IN_GB); final Optional<Long> diskTotalBytesUsed = storageMaintainer.getDiskUsageFor(containerName); lastCpuMetric.updateCpuDeltas(cpuSystemTotalTime, cpuContainerTotalTime, cpuContainerKernelTime); final double allocatedCpuRatio = nodeSpec.minCpuCores / totalNumCpuCores; double cpuUsageRatioOfAllocated = lastCpuMetric.getCpuUsageRatio() / allocatedCpuRatio; double cpuKernelUsageRatioOfAllocated = lastCpuMetric.getCpuKernelUsageRatio() / allocatedCpuRatio; long memoryTotalBytesUsed = memoryTotalBytesUsage - memoryTotalBytesCache; double memoryUsageRatio = (double) memoryTotalBytesUsed / memoryTotalBytes; Optional<Double> diskUsageRatio = diskTotalBytesUsed.map(used -> (double) used / diskTotalBytes); List<DimensionMetrics> metrics = new ArrayList<>(); DimensionMetrics.Builder systemMetricsBuilder = new DimensionMetrics.Builder(APP, dimensions) .withMetric("mem.limit", memoryTotalBytes) .withMetric("mem.used", memoryTotalBytesUsed) .withMetric("mem.util", 100 * memoryUsageRatio) .withMetric("cpu.util", 100 * cpuUsageRatioOfAllocated) .withMetric("cpu.sys.util", 100 * cpuKernelUsageRatioOfAllocated) .withMetric("disk.limit", diskTotalBytes); diskTotalBytesUsed.ifPresent(diskUsed -> systemMetricsBuilder.withMetric("disk.used", diskUsed)); diskUsageRatio.ifPresent(diskRatio -> systemMetricsBuilder.withMetric("disk.util", 100 * diskRatio)); metrics.add(systemMetricsBuilder.build()); stats.getNetworks().forEach((interfaceName, interfaceStats) -> { Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build(); Map<String, Number> infStats = (Map<String, Number>) interfaceStats; DimensionMetrics networkMetrics = new DimensionMetrics.Builder(APP, netDims) .withMetric("net.in.bytes", infStats.get("rx_bytes").longValue()) .withMetric("net.in.errors", infStats.get("rx_errors").longValue()) .withMetric("net.in.dropped", infStats.get("rx_dropped").longValue()) .withMetric("net.out.bytes", infStats.get("tx_bytes").longValue()) .withMetric("net.out.errors", infStats.get("tx_errors").longValue()) .withMetric("net.out.dropped", infStats.get("tx_dropped").longValue()) .build(); metrics.add(networkMetrics); }); pushMetricsToContainer(metrics); } private void pushMetricsToContainer(List<DimensionMetrics> metrics) { StringBuilder params = new StringBuilder(); try { for (DimensionMetrics dimensionMetrics : metrics) { params.append(dimensionMetrics.toSecretAgentReport()); } String wrappedMetrics = "s:" + params.toString(); String[] command = {"vespa-rpc-invoke", "-t", "2", "tcp/localhost:19091", "setExtraMetrics", wrappedMetrics}; dockerOperations.executeCommandInContainerAsRoot(containerName, 5L, command); } catch (DockerExecTimeoutException | JsonProcessingException e) { logger.warning("Unable to push metrics to container: " + containerName, e); } } private Optional<Container> getContainer() { if (containerState == ABSENT) return Optional.empty(); Optional<Container> container = dockerOperations.getContainer(containerName); if (! container.isPresent()) containerState = ABSENT; return container; } @Override public String getHostname() { return hostname; } @Override public boolean isDownloadingImage() { return imageBeingDownloaded != null; } @Override public int getAndResetNumberOfUnhandledExceptions() { int temp = numberOfUnhandledException; numberOfUnhandledException = 0; return temp; } class CpuUsageReporter { private long containerKernelUsage = 0; private long totalContainerUsage = 0; private long totalSystemUsage = 0; private long deltaContainerKernelUsage; private long deltaContainerUsage; private long deltaSystemUsage; private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) { deltaSystemUsage = this.totalSystemUsage == 0 ? 0 : (totalSystemUsage - this.totalSystemUsage); deltaContainerUsage = totalContainerUsage - this.totalContainerUsage; deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage; this.totalSystemUsage = totalSystemUsage; this.totalContainerUsage = totalContainerUsage; this.containerKernelUsage = containerKernelUsage; } /** * Returns the CPU usage ratio for the docker container that this NodeAgent is managing * in the time between the last two times updateCpuDeltas() was called. This is calculated * by dividing the CPU time used by the container with the CPU time used by the entire system. */ double getCpuUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerUsage / deltaSystemUsage; } double getCpuKernelUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerKernelUsage / deltaSystemUsage; } } private void orchestratorSuspendNode() { logger.info("Ask Orchestrator for permission to suspend node " + hostname); orchestrator.suspend(hostname); } }
class NodeAgentImpl implements NodeAgent { private static final long BYTES_IN_GB = 1_000_000_000L; private final AtomicBoolean terminated = new AtomicBoolean(false); private boolean isFrozen = true; private boolean wantFrozen = false; private boolean workToDoNow = true; private boolean expectNodeNotInNodeRepo = false; private final Object monitor = new Object(); private final PrefixLogger logger; private DockerImage imageBeingDownloaded = null; private final ContainerName containerName; private final String hostname; private final NodeRepository nodeRepository; private final Orchestrator orchestrator; private final DockerOperations dockerOperations; private final StorageMaintainer storageMaintainer; private final Runnable aclMaintainer; private final Environment environment; private final Clock clock; private final Duration timeBetweenEachConverge; private final SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); private final LinkedList<String> debugMessages = new LinkedList<>(); private int numberOfUnhandledException = 0; private Instant lastConverge; private final Thread loopThread; private final ScheduledExecutorService filebeatRestarter = Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("filebeatrestarter")); private Consumer<String> serviceRestarter; private Future<?> currentFilebeatRestarter; private boolean resumeScriptRun = false; /** * ABSENT means container is definitely absent - A container that was absent will not suddenly appear without * NodeAgent explicitly starting it. * Otherwise we can't be certain. A container that was running a minute ago may no longer be running without * NodeAgent doing anything (container could have crashed). Therefore we always have to ask docker daemon * to get updated state of the container. */ enum ContainerState { ABSENT, UNKNOWN } private ContainerState containerState = UNKNOWN; private NodeAttributes lastAttributesSet = null; private ContainerNodeSpec lastNodeSpec = null; private CpuUsageReporter lastCpuMetric = new CpuUsageReporter(); public NodeAgentImpl( final String hostName, final NodeRepository nodeRepository, final Orchestrator orchestrator, final DockerOperations dockerOperations, final StorageMaintainer storageMaintainer, final Runnable aclMaintainer, final Environment environment, final Clock clock, final Duration timeBetweenEachConverge) { this.containerName = ContainerName.fromHostname(hostName); this.logger = PrefixLogger.getNodeAgentLogger(NodeAgentImpl.class, containerName); this.hostname = hostName; this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.dockerOperations = dockerOperations; this.storageMaintainer = storageMaintainer; this.aclMaintainer = aclMaintainer; this.environment = environment; this.clock = clock; this.timeBetweenEachConverge = timeBetweenEachConverge; this.lastConverge = clock.instant(); this.loopThread = new Thread(() -> { while (!terminated.get()) tick(); }); this.loopThread.setName("tick-" + hostname); } @Override public boolean setFrozen(boolean frozen) { synchronized (monitor) { if (wantFrozen != frozen) { wantFrozen = frozen; addDebugMessage(wantFrozen ? "Freezing" : "Unfreezing"); signalWorkToBeDone(); } return isFrozen == frozen; } } private void addDebugMessage(String message) { synchronized (debugMessages) { while (debugMessages.size() > 1000) { debugMessages.pop(); } logger.debug(message); debugMessages.add("[" + sdf.format(new Date()) + "] " + message); } } @Override public Map<String, Object> debugInfo() { Map<String, Object> debug = new LinkedHashMap<>(); debug.put("Hostname", hostname); debug.put("isFrozen", isFrozen); debug.put("wantFrozen", wantFrozen); debug.put("terminated", terminated); debug.put("workToDoNow", workToDoNow); synchronized (debugMessages) { debug.put("History", new LinkedList<>(debugMessages)); } debug.put("Node repo state", lastNodeSpec.nodeState.name()); return debug; } @Override public void start() { String message = "Starting with interval " + timeBetweenEachConverge.toMillis() + " ms"; logger.info(message); addDebugMessage(message); loopThread.start(); serviceRestarter = service -> { try { ProcessResult processResult = dockerOperations.executeCommandInContainerAsRoot( containerName, "service", service, "restart"); if (!processResult.isSuccess()) { logger.error("Failed to restart service " + service + ": " + processResult); } } catch (Exception e) { logger.error("Failed to restart service " + service, e); } }; } @Override public void stop() { addDebugMessage("Stopping"); filebeatRestarter.shutdown(); if (!terminated.compareAndSet(false, true)) { throw new RuntimeException("Can not re-stop a node agent."); } signalWorkToBeDone(); do { try { loopThread.join(); filebeatRestarter.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e) { logger.error("Interrupted while waiting for converge thread and filebeatRestarter scheduler to shutdown"); } } while (loopThread.isAlive() || !filebeatRestarter.isTerminated()); logger.info("Stopped"); } private void runLocalResumeScriptIfNeeded() { if (! resumeScriptRun) { addDebugMessage("Starting optional node program resume command"); dockerOperations.resumeNode(containerName); resumeScriptRun = true; } } private void updateNodeRepoWithCurrentAttributes(final ContainerNodeSpec nodeSpec) { final NodeAttributes nodeAttributes = new NodeAttributes() .withRestartGeneration(nodeSpec.wantedRestartGeneration.orElse(null)) .withRebootGeneration(nodeSpec.wantedRebootGeneration.orElse(0L)) .withDockerImage(nodeSpec.wantedDockerImage.filter(node -> containerState != ABSENT).orElse(new DockerImage(""))) .withVespaVersion(nodeSpec.wantedVespaVersion.filter(node -> containerState != ABSENT).orElse("")); publishStateToNodeRepoIfChanged(nodeAttributes); } private void publishStateToNodeRepoIfChanged(NodeAttributes currentAttributes) { if (!currentAttributes.equals(lastAttributesSet)) { logger.info("Publishing new set of attributes to node repo: " + lastAttributesSet + " -> " + currentAttributes); addDebugMessage("Publishing new set of attributes to node repo: {" + lastAttributesSet + "} -> {" + currentAttributes + "}"); nodeRepository.updateNodeAttributes(hostname, currentAttributes); lastAttributesSet = currentAttributes; } } private void startContainer(ContainerNodeSpec nodeSpec) { aclMaintainer.run(); dockerOperations.startContainer(containerName, nodeSpec); lastCpuMetric = new CpuUsageReporter(); currentFilebeatRestarter = filebeatRestarter.scheduleWithFixedDelay(() -> serviceRestarter.accept("filebeat"), 1, 1, TimeUnit.DAYS); storageMaintainer.writeMetricsConfig(containerName, nodeSpec); storageMaintainer.writeFilebeatConfig(containerName, nodeSpec); resumeScriptRun = false; containerState = UNKNOWN; logger.info("Container successfully started, new containerState is " + containerState); } private Optional<Container> removeContainerIfNeededUpdateContainerState(ContainerNodeSpec nodeSpec, Optional<Container> existingContainer) { return existingContainer .flatMap(container -> removeContainerIfNeeded(nodeSpec, container)) .map(container -> { shouldRestartServices(nodeSpec).ifPresent(restartReason -> { logger.info("Will restart services for container " + container + ": " + restartReason); restartServices(nodeSpec, container); }); return container; }); } private Optional<String> shouldRestartServices(ContainerNodeSpec nodeSpec) { if (!nodeSpec.wantedRestartGeneration.isPresent()) return Optional.empty(); if (!nodeSpec.currentRestartGeneration.isPresent() || nodeSpec.currentRestartGeneration.get() < nodeSpec.wantedRestartGeneration.get()) { return Optional.of("Restart requested - wanted restart generation has been bumped: " + nodeSpec.currentRestartGeneration.get() + " -> " + nodeSpec.wantedRestartGeneration.get()); } return Optional.empty(); } private void restartServices(ContainerNodeSpec nodeSpec, Container existingContainer) { if (existingContainer.state.isRunning() && nodeSpec.nodeState == Node.State.active) { ContainerName containerName = existingContainer.name; logger.info("Restarting services for " + containerName); orchestratorSuspendNode(); dockerOperations.restartVespaOnNode(containerName); } } @Override public void stopServices() { logger.info("Stopping services for " + containerName); dockerOperations.trySuspendNode(containerName); dockerOperations.stopServicesOnNode(containerName); } private Optional<String> shouldRemoveContainer(ContainerNodeSpec nodeSpec, Container existingContainer) { final Node.State nodeState = nodeSpec.nodeState; if (nodeState == Node.State.dirty || nodeState == Node.State.provisioned) { return Optional.of("Node in state " + nodeState + ", container should no longer be running"); } if (nodeSpec.wantedDockerImage.isPresent() && !nodeSpec.wantedDockerImage.get().equals(existingContainer.image)) { return Optional.of("The node is supposed to run a new Docker image: " + existingContainer + " -> " + nodeSpec.wantedDockerImage.get()); } if (!existingContainer.state.isRunning()) { return Optional.of("Container no longer running"); } ContainerResources wantedContainerResources = ContainerResources.from( nodeSpec.minCpuCores, nodeSpec.minMainMemoryAvailableGb); if (!wantedContainerResources.equals(existingContainer.resources)) { return Optional.of("Container should be running with different resource allocation, wanted: " + wantedContainerResources + ", actual: " + existingContainer.resources); } return Optional.empty(); } private Optional<Container> removeContainerIfNeeded(ContainerNodeSpec nodeSpec, Container existingContainer) { Optional<String> removeReason = shouldRemoveContainer(nodeSpec, existingContainer); if (removeReason.isPresent()) { logger.info("Will remove container " + existingContainer + ": " + removeReason.get()); if (existingContainer.state.isRunning()) { if (nodeSpec.nodeState == Node.State.active) { orchestratorSuspendNode(); } try { stopServices(); } catch (Exception e) { logger.info("Failed stopping services, ignoring", e); } } if (currentFilebeatRestarter != null) currentFilebeatRestarter.cancel(true); dockerOperations.removeContainer(existingContainer, nodeSpec); containerState = ABSENT; logger.info("Container successfully removed, new containerState is " + containerState); return Optional.empty(); } return Optional.of(existingContainer); } private void scheduleDownLoadIfNeeded(ContainerNodeSpec nodeSpec) { if (nodeSpec.currentDockerImage.equals(nodeSpec.wantedDockerImage)) return; if (dockerOperations.pullImageAsyncIfNeeded(nodeSpec.wantedDockerImage.get())) { imageBeingDownloaded = nodeSpec.wantedDockerImage.get(); } else if (imageBeingDownloaded != null) { imageBeingDownloaded = null; } } private void signalWorkToBeDone() { synchronized (monitor) { if (!workToDoNow) { workToDoNow = true; addDebugMessage("Signaling work to be done"); monitor.notifyAll(); } } } void tick() { boolean isFrozenCopy; synchronized (monitor) { while (!workToDoNow) { long remainder = timeBetweenEachConverge .minus(Duration.between(lastConverge, clock.instant())) .toMillis(); if (remainder > 0) { try { monitor.wait(remainder); } catch (InterruptedException e) { logger.error("Interrupted, but ignoring this: " + hostname); } } else break; } lastConverge = clock.instant(); workToDoNow = false; if (isFrozen != wantFrozen) { isFrozen = wantFrozen; logger.info("Updated NodeAgent's frozen state, new value: isFrozen: " + isFrozen); } isFrozenCopy = isFrozen; } if (isFrozenCopy) { addDebugMessage("tick: isFrozen"); } else { try { converge(); } catch (OrchestratorException e) { logger.info(e.getMessage()); addDebugMessage(e.getMessage()); } catch (DockerException e) { numberOfUnhandledException++; containerState = UNKNOWN; logger.error("Caught a DockerException, resetting containerState to " + containerState, e); } catch (Exception e) { numberOfUnhandledException++; logger.error("Unhandled exception, ignoring.", e); addDebugMessage(e.getMessage()); } catch (Throwable t) { logger.error("Unhandled throwable, taking down system.", t); System.exit(234); } } } @SuppressWarnings("unchecked") public void updateContainerNodeMetrics() { final ContainerNodeSpec nodeSpec = lastNodeSpec; if (nodeSpec == null || containerState == ABSENT) return; Optional<Docker.ContainerStats> containerStats = dockerOperations.getContainerStats(containerName); if (!containerStats.isPresent()) return; Dimensions.Builder dimensionsBuilder = new Dimensions.Builder() .add("host", hostname) .add("role", "tenants") .add("state", nodeSpec.nodeState.toString()) .add("parentHostname", environment.getParentHostHostname()); Dimensions dimensions = dimensionsBuilder.build(); Docker.ContainerStats stats = containerStats.get(); final String APP = MetricReceiverWrapper.APPLICATION_NODE; final int totalNumCpuCores = ((List<Number>) ((Map) stats.getCpuStats().get("cpu_usage")).get("percpu_usage")).size(); final long cpuContainerKernelTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("usage_in_kernelmode")).longValue(); final long cpuContainerTotalTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("total_usage")).longValue(); final long cpuSystemTotalTime = ((Number) stats.getCpuStats().get("system_cpu_usage")).longValue(); final long memoryTotalBytes = ((Number) stats.getMemoryStats().get("limit")).longValue(); final long memoryTotalBytesUsage = ((Number) stats.getMemoryStats().get("usage")).longValue(); final long memoryTotalBytesCache = ((Number) ((Map) stats.getMemoryStats().get("stats")).get("cache")).longValue(); final long diskTotalBytes = (long) (nodeSpec.minDiskAvailableGb * BYTES_IN_GB); final Optional<Long> diskTotalBytesUsed = storageMaintainer.getDiskUsageFor(containerName); lastCpuMetric.updateCpuDeltas(cpuSystemTotalTime, cpuContainerTotalTime, cpuContainerKernelTime); final double allocatedCpuRatio = nodeSpec.minCpuCores / totalNumCpuCores; double cpuUsageRatioOfAllocated = lastCpuMetric.getCpuUsageRatio() / allocatedCpuRatio; double cpuKernelUsageRatioOfAllocated = lastCpuMetric.getCpuKernelUsageRatio() / allocatedCpuRatio; long memoryTotalBytesUsed = memoryTotalBytesUsage - memoryTotalBytesCache; double memoryUsageRatio = (double) memoryTotalBytesUsed / memoryTotalBytes; Optional<Double> diskUsageRatio = diskTotalBytesUsed.map(used -> (double) used / diskTotalBytes); List<DimensionMetrics> metrics = new ArrayList<>(); DimensionMetrics.Builder systemMetricsBuilder = new DimensionMetrics.Builder(APP, dimensions) .withMetric("mem.limit", memoryTotalBytes) .withMetric("mem.used", memoryTotalBytesUsed) .withMetric("mem.util", 100 * memoryUsageRatio) .withMetric("cpu.util", 100 * cpuUsageRatioOfAllocated) .withMetric("cpu.sys.util", 100 * cpuKernelUsageRatioOfAllocated) .withMetric("disk.limit", diskTotalBytes); diskTotalBytesUsed.ifPresent(diskUsed -> systemMetricsBuilder.withMetric("disk.used", diskUsed)); diskUsageRatio.ifPresent(diskRatio -> systemMetricsBuilder.withMetric("disk.util", 100 * diskRatio)); metrics.add(systemMetricsBuilder.build()); stats.getNetworks().forEach((interfaceName, interfaceStats) -> { Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build(); Map<String, Number> infStats = (Map<String, Number>) interfaceStats; DimensionMetrics networkMetrics = new DimensionMetrics.Builder(APP, netDims) .withMetric("net.in.bytes", infStats.get("rx_bytes").longValue()) .withMetric("net.in.errors", infStats.get("rx_errors").longValue()) .withMetric("net.in.dropped", infStats.get("rx_dropped").longValue()) .withMetric("net.out.bytes", infStats.get("tx_bytes").longValue()) .withMetric("net.out.errors", infStats.get("tx_errors").longValue()) .withMetric("net.out.dropped", infStats.get("tx_dropped").longValue()) .build(); metrics.add(networkMetrics); }); pushMetricsToContainer(metrics); } private void pushMetricsToContainer(List<DimensionMetrics> metrics) { StringBuilder params = new StringBuilder(); try { for (DimensionMetrics dimensionMetrics : metrics) { params.append(dimensionMetrics.toSecretAgentReport()); } String wrappedMetrics = "s:" + params.toString(); String[] command = {"vespa-rpc-invoke", "-t", "2", "tcp/localhost:19091", "setExtraMetrics", wrappedMetrics}; dockerOperations.executeCommandInContainerAsRoot(containerName, 5L, command); } catch (DockerExecTimeoutException | JsonProcessingException e) { logger.warning("Unable to push metrics to container: " + containerName, e); } } private Optional<Container> getContainer() { if (containerState == ABSENT) return Optional.empty(); Optional<Container> container = dockerOperations.getContainer(containerName); if (! container.isPresent()) containerState = ABSENT; return container; } @Override public String getHostname() { return hostname; } @Override public boolean isDownloadingImage() { return imageBeingDownloaded != null; } @Override public int getAndResetNumberOfUnhandledExceptions() { int temp = numberOfUnhandledException; numberOfUnhandledException = 0; return temp; } class CpuUsageReporter { private long containerKernelUsage = 0; private long totalContainerUsage = 0; private long totalSystemUsage = 0; private long deltaContainerKernelUsage; private long deltaContainerUsage; private long deltaSystemUsage; private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) { deltaSystemUsage = this.totalSystemUsage == 0 ? 0 : (totalSystemUsage - this.totalSystemUsage); deltaContainerUsage = totalContainerUsage - this.totalContainerUsage; deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage; this.totalSystemUsage = totalSystemUsage; this.totalContainerUsage = totalContainerUsage; this.containerKernelUsage = containerKernelUsage; } /** * Returns the CPU usage ratio for the docker container that this NodeAgent is managing * in the time between the last two times updateCpuDeltas() was called. This is calculated * by dividing the CPU time used by the container with the CPU time used by the entire system. */ double getCpuUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerUsage / deltaSystemUsage; } double getCpuKernelUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerKernelUsage / deltaSystemUsage; } } private void orchestratorSuspendNode() { logger.info("Ask Orchestrator for permission to suspend node " + hostname); orchestrator.suspend(hostname); } }
I guess this returns false if the property is not found?
public PropertySetter buildPropertySetter(HttpRequest request) { String routeParam = null; double timeoutParam = -1; String priorityParam = null; String abortOnDocErrorParam = null; String abortOnFeedErrorParam = null; String loadTypeStr = null; String traceStr = null; String createIfNonExistentParam = null; boolean useConstantTimeout = false; if (request != null) { routeParam = request.getProperty("route"); String timeoutStr = request.getProperty("timeout"); if (timeoutStr != null) { timeoutParam = Double.parseDouble(timeoutStr); } useConstantTimeout = request.getBooleanProperty("useconstanttimeout"); priorityParam = request.getProperty("priority"); traceStr = request.getProperty("tracelevel"); abortOnDocErrorParam = request.getProperty("abortondocumenterror"); abortOnFeedErrorParam = request.getProperty("abortonfeederror"); loadTypeStr = request.getProperty("loadtype"); createIfNonExistentParam = request.getProperty("createifnonexistent"); } Route route = (routeParam != null ? Route.parse(routeParam) : null); long timeout; boolean retry; boolean abortOnDocumentError; boolean abortOnFeedError; boolean createIfNonExistent; synchronized (this) { if (route == null) { route = defaultRoute; } timeout = (timeoutParam < 0 ? defaultTimeoutMillis : (long)(timeoutParam * 1000)); retry = retryEnabled; abortOnDocumentError = (abortOnDocErrorParam == null ? defaultAbortOnDocumentError : (!"false".equals(abortOnDocErrorParam))); abortOnFeedError = (abortOnFeedErrorParam == null ? defaultAbortOnSendError : (!"false".equals(abortOnFeedErrorParam))); createIfNonExistent = (createIfNonExistentParam == null ? defaultCreateIfNonExistent : ("true".equals(createIfNonExistentParam))); } DocumentProtocol.Priority priority = null; if (priorityParam != null) { priority = DocumentProtocol.getPriorityByName(priorityParam); } LoadType loadType = null; if (loadTypes != null && loadTypeStr != null) { loadType = loadTypes.getNameMap().get(loadTypeStr); } if (loadType == null) { loadType = LoadType.DEFAULT; } return new PropertySetter(useConstantTimeout, route, timeout, priority, loadType, retry, abortOnDocumentError, abortOnFeedError, createIfNonExistent, traceStr != null ? Integer.parseInt(traceStr) : 0); }
useConstantTimeout = request.getBooleanProperty("useconstanttimeout");
public PropertySetter buildPropertySetter(HttpRequest request) { String routeParam = null; double timeoutParam = -1; String priorityParam = null; String abortOnDocErrorParam = null; String abortOnFeedErrorParam = null; String loadTypeStr = null; String traceStr = null; String createIfNonExistentParam = null; Double totalTimeoutParam = null; if (request != null) { routeParam = request.getProperty("route"); String timeoutStr = request.getProperty("timeout"); if (timeoutStr != null) { timeoutParam = Double.parseDouble(timeoutStr); } timeoutStr = request.getProperty("totaltimeout"); if (timeoutStr != null) { totalTimeoutParam = Double.parseDouble(timeoutStr); } priorityParam = request.getProperty("priority"); traceStr = request.getProperty("tracelevel"); abortOnDocErrorParam = request.getProperty("abortondocumenterror"); abortOnFeedErrorParam = request.getProperty("abortonfeederror"); loadTypeStr = request.getProperty("loadtype"); createIfNonExistentParam = request.getProperty("createifnonexistent"); } Route route = (routeParam != null ? Route.parse(routeParam) : null); long timeout; boolean retry; boolean abortOnDocumentError; boolean abortOnFeedError; boolean createIfNonExistent; synchronized (this) { if (route == null) { route = defaultRoute; } timeout = (timeoutParam < 0 ? defaultTimeoutMillis : (long)(timeoutParam * 1000)); retry = retryEnabled; abortOnDocumentError = (abortOnDocErrorParam == null ? defaultAbortOnDocumentError : (!"false".equals(abortOnDocErrorParam))); abortOnFeedError = (abortOnFeedErrorParam == null ? defaultAbortOnSendError : (!"false".equals(abortOnFeedErrorParam))); createIfNonExistent = (createIfNonExistentParam == null ? defaultCreateIfNonExistent : ("true".equals(createIfNonExistentParam))); } long totalTimeout = (totalTimeoutParam == null) ? timeout : (long)(totalTimeoutParam*1000); DocumentProtocol.Priority priority = null; if (priorityParam != null) { priority = DocumentProtocol.getPriorityByName(priorityParam); } LoadType loadType = null; if (loadTypes != null && loadTypeStr != null) { loadType = loadTypes.getNameMap().get(loadTypeStr); } if (loadType == null) { loadType = LoadType.DEFAULT; } return new PropertySetter(route, timeout, totalTimeout, priority, loadType, retry, abortOnDocumentError, abortOnFeedError, createIfNonExistent, traceStr != null ? Integer.parseInt(traceStr) : 0); }
class MessagePropertyProcessor implements ConfigSubscriber.SingleSubscriber<FeederConfig> { private static final Logger log = Logger.getLogger(MessagePropertyProcessor.class.getName()); private FeederOptions feederOptions = null; private Route defaultRoute = null; private long defaultTimeoutMillis = 0; private boolean retryEnabled = true; private String defaultDocprocChain = null; private boolean defaultAbortOnDocumentError = true; private boolean defaultAbortOnSendError = true; private boolean defaultCreateIfNonExistent = false; private LoadTypeSet loadTypes = null; private boolean configChanged = false; public MessagePropertyProcessor(String configId, String loadTypeConfig) { new ConfigSubscriber().subscribe(this, FeederConfig.class, configId); loadTypes = new LoadTypeSet(loadTypeConfig); } public MessagePropertyProcessor(FeederConfig config, LoadTypeConfig loadTypeCfg) { loadTypes = new LoadTypeSet(); configure(config, loadTypeCfg); } public void setRoute(String routeOverride) { defaultRoute = Route.parse(routeOverride); } private synchronized String getDocprocChainParameter(HttpRequest request) { String docprocChainParam = request.getProperty("docprocchain"); return (docprocChainParam == null ? defaultDocprocChain : docprocChainParam); } public synchronized DocprocService getDocprocChain(HttpRequest request) { ComponentRegistry<DocprocService> services = getDocprocServiceRegistry(request); String docprocChain = getDocprocChainParameter(request); if (docprocChain == null) { return null; } return services.getComponent(docprocChain); } public synchronized ComponentRegistry<DocprocService> getDocprocServiceRegistry(HttpRequest request) { String docprocChain = getDocprocChainParameter(request); if (docprocChain == null) { return null; } Container container = Container.get(); if (container == null) { throw new IllegalStateException("Could not get Container instance."); } ComponentRegistry<RequestHandler> requestHandlerRegistry = container.getRequestHandlerRegistry(); if (requestHandlerRegistry == null) { throw new IllegalStateException("Could not get requesthandlerregistry."); } DocumentProcessingHandler handler = (DocumentProcessingHandler) requestHandlerRegistry .getComponent(DocumentProcessingHandler.class.getName()); if (handler == null) { return null; } ComponentRegistry<DocprocService> services = handler.getDocprocServiceRegistry(); if (services == null) { throw new IllegalStateException("Could not get DocprocServiceRegistry."); } return services; } public long getDefaultTimeoutMillis() { return defaultTimeoutMillis; } public synchronized boolean configChanged() { return configChanged; } public synchronized void setConfigChanged(boolean configChanged) { this.configChanged = configChanged; } public synchronized FeederOptions getFeederOptions() { return feederOptions; } public synchronized void configure(FeederConfig config, LoadTypeConfig loadTypeConfig) { loadTypes.configure(loadTypeConfig); configure(config); } public LoadTypeSet getLoadTypes() { return loadTypes; } public synchronized void configure(FeederConfig config) { if (feederOptions != null) { setConfigChanged(true); } feederOptions = new FeederOptions(config); if (feederOptions.getRoute() != null) { defaultRoute = Route.parse(feederOptions.getRoute()); } else { defaultRoute = null; } defaultTimeoutMillis = (long) (feederOptions.getTimeout() * 1000); retryEnabled = feederOptions.getRetryEnabled(); defaultAbortOnDocumentError = feederOptions.abortOnDocumentError(); defaultAbortOnSendError = feederOptions.abortOnSendError(); if (!"".equals(feederOptions.getDocprocChain())) { defaultDocprocChain = feederOptions.getDocprocChain(); } else { defaultDocprocChain = null; } if (log.isLoggable(LogLevel.DEBUG)) { log.log(LogLevel.DEBUG, "Received new config (" + "route: " + (defaultRoute != null ? defaultRoute : "<none>") + ", timeout: " + defaultTimeoutMillis + " ms, retry enabled: " + retryEnabled + ", docproc chain: " + (defaultDocprocChain != null ? defaultDocprocChain : "<none>") + ", abort on doc error: " + defaultAbortOnDocumentError + ", abort on feed error: " + defaultAbortOnSendError + ")"); } } public class PropertySetter implements MessageProcessor { /** Route either set by configuration or by explicit request override. May be null */ private Route route; /** Timeout (in milliseconds) */ private long timeout; private long startTime; boolean useConstantTimeout; /** Explicit priority set. May be null */ private DocumentProtocol.Priority priority; private boolean retryEnabled; private boolean abortOnDocumentError; private boolean abortOnFeedError; private boolean createIfNonExistent; private LoadType loadType; private int traceLevel; public PropertySetter(boolean useConstantTimeout, Route route, long timeout, DocumentProtocol.Priority priority, LoadType loadType, boolean retryEnabled, boolean abortOnDocumentError, boolean abortOnFeedError, boolean createIfNonExistent, int traceLevel) { this.useConstantTimeout = useConstantTimeout; this.route = route; this.timeout = timeout; this.priority = priority; this.loadType = loadType; this.retryEnabled = retryEnabled; this.abortOnDocumentError = abortOnDocumentError; this.abortOnFeedError = abortOnFeedError; this.createIfNonExistent = createIfNonExistent; this.traceLevel = traceLevel; this.startTime = SystemTimer.INSTANCE.milliTime(); } private long getTimeRemaining() { return useConstantTimeout ? timeout : timeout - (SystemTimer.INSTANCE.milliTime() - startTime); } public Route getRoute() { return route; } public void setRoute(Route route) { this.route = route; } public long getTimeout() { return timeout; } public void setTimeout(long timeout) { this.timeout = timeout; } public DocumentProtocol.Priority getPriority() { return priority; } public void setPriority(DocumentProtocol.Priority priority) { this.priority = priority; } public LoadType getLoadType() { return loadType; } public void setLoadType(LoadType loadType) { this.loadType = loadType; } public boolean getAbortOnDocumentError() { return abortOnDocumentError; } public boolean getAbortOnFeedError() { return abortOnFeedError; } public boolean getCreateIfNonExistent() { return createIfNonExistent; } @Override public void process(Message msg) { if (route != null) { msg.setRoute(route); } msg.setTimeRemaining(getTimeRemaining()); msg.setRetryEnabled(retryEnabled); msg.getTrace().setLevel(Math.max(getFeederOptions().getTraceLevel(), traceLevel)); if (loadType != null) { ((DocumentMessage) msg).setLoadType(loadType); ((DocumentMessage) msg).setPriority(loadType.getPriority()); } if (priority != null) { ((DocumentMessage) msg).setPriority(priority); } } public void process(VisitorParameters params) { if (route != null) { params.setRoute(route); } params.setTimeoutMs(timeout); params.setTraceLevel(Math.max(getFeederOptions().getTraceLevel(), traceLevel)); if (loadType != null) { params.setLoadType(loadType); params.setPriority(loadType.getPriority()); } if (priority != null) { params.setPriority(priority); } } } }
class MessagePropertyProcessor implements ConfigSubscriber.SingleSubscriber<FeederConfig> { private static final Logger log = Logger.getLogger(MessagePropertyProcessor.class.getName()); private FeederOptions feederOptions = null; private Route defaultRoute = null; private long defaultTimeoutMillis = 0; private boolean retryEnabled = true; private String defaultDocprocChain = null; private boolean defaultAbortOnDocumentError = true; private boolean defaultAbortOnSendError = true; private boolean defaultCreateIfNonExistent = false; private LoadTypeSet loadTypes = null; private boolean configChanged = false; public MessagePropertyProcessor(String configId, String loadTypeConfig) { new ConfigSubscriber().subscribe(this, FeederConfig.class, configId); loadTypes = new LoadTypeSet(loadTypeConfig); } public MessagePropertyProcessor(FeederConfig config, LoadTypeConfig loadTypeCfg) { loadTypes = new LoadTypeSet(); configure(config, loadTypeCfg); } public void setRoute(String routeOverride) { defaultRoute = Route.parse(routeOverride); } private synchronized String getDocprocChainParameter(HttpRequest request) { String docprocChainParam = request.getProperty("docprocchain"); return (docprocChainParam == null ? defaultDocprocChain : docprocChainParam); } public synchronized DocprocService getDocprocChain(HttpRequest request) { ComponentRegistry<DocprocService> services = getDocprocServiceRegistry(request); String docprocChain = getDocprocChainParameter(request); if (docprocChain == null) { return null; } return services.getComponent(docprocChain); } public synchronized ComponentRegistry<DocprocService> getDocprocServiceRegistry(HttpRequest request) { String docprocChain = getDocprocChainParameter(request); if (docprocChain == null) { return null; } Container container = Container.get(); if (container == null) { throw new IllegalStateException("Could not get Container instance."); } ComponentRegistry<RequestHandler> requestHandlerRegistry = container.getRequestHandlerRegistry(); if (requestHandlerRegistry == null) { throw new IllegalStateException("Could not get requesthandlerregistry."); } DocumentProcessingHandler handler = (DocumentProcessingHandler) requestHandlerRegistry .getComponent(DocumentProcessingHandler.class.getName()); if (handler == null) { return null; } ComponentRegistry<DocprocService> services = handler.getDocprocServiceRegistry(); if (services == null) { throw new IllegalStateException("Could not get DocprocServiceRegistry."); } return services; } public long getDefaultTimeoutMillis() { return defaultTimeoutMillis; } public synchronized boolean configChanged() { return configChanged; } public synchronized void setConfigChanged(boolean configChanged) { this.configChanged = configChanged; } public synchronized FeederOptions getFeederOptions() { return feederOptions; } public synchronized void configure(FeederConfig config, LoadTypeConfig loadTypeConfig) { loadTypes.configure(loadTypeConfig); configure(config); } public LoadTypeSet getLoadTypes() { return loadTypes; } public synchronized void configure(FeederConfig config) { if (feederOptions != null) { setConfigChanged(true); } feederOptions = new FeederOptions(config); if (feederOptions.getRoute() != null) { defaultRoute = Route.parse(feederOptions.getRoute()); } else { defaultRoute = null; } defaultTimeoutMillis = (long) (feederOptions.getTimeout() * 1000); retryEnabled = feederOptions.getRetryEnabled(); defaultAbortOnDocumentError = feederOptions.abortOnDocumentError(); defaultAbortOnSendError = feederOptions.abortOnSendError(); if (!"".equals(feederOptions.getDocprocChain())) { defaultDocprocChain = feederOptions.getDocprocChain(); } else { defaultDocprocChain = null; } if (log.isLoggable(LogLevel.DEBUG)) { log.log(LogLevel.DEBUG, "Received new config (" + "route: " + (defaultRoute != null ? defaultRoute : "<none>") + ", timeout: " + defaultTimeoutMillis + " ms, retry enabled: " + retryEnabled + ", docproc chain: " + (defaultDocprocChain != null ? defaultDocprocChain : "<none>") + ", abort on doc error: " + defaultAbortOnDocumentError + ", abort on feed error: " + defaultAbortOnSendError + ")"); } } public class PropertySetter implements MessageProcessor { /** Route either set by configuration or by explicit request override. May be null */ private Route route; /** Timeout (in milliseconds) */ private long timeout; private long totalTimeout; private long startTime; /** Explicit priority set. May be null */ private DocumentProtocol.Priority priority; private boolean retryEnabled; private boolean abortOnDocumentError; private boolean abortOnFeedError; private boolean createIfNonExistent; private LoadType loadType; private int traceLevel; public PropertySetter(Route route, long timeout, long totalTimeout, DocumentProtocol.Priority priority, LoadType loadType, boolean retryEnabled, boolean abortOnDocumentError, boolean abortOnFeedError, boolean createIfNonExistent, int traceLevel) { this.route = route; this.timeout = timeout; this.totalTimeout = totalTimeout; this.priority = priority; this.loadType = loadType; this.retryEnabled = retryEnabled; this.abortOnDocumentError = abortOnDocumentError; this.abortOnFeedError = abortOnFeedError; this.createIfNonExistent = createIfNonExistent; this.traceLevel = traceLevel; this.startTime = SystemTimer.INSTANCE.milliTime(); } private long getTimeRemaining() { return (totalTimeout < 0L) ? timeout : Math.min(timeout, totalTimeout - (SystemTimer.INSTANCE.milliTime() - startTime)); } public Route getRoute() { return route; } public void setRoute(Route route) { this.route = route; } public long getTimeout() { return timeout; } public void setTimeout(long timeout) { this.timeout = timeout; } public DocumentProtocol.Priority getPriority() { return priority; } public void setPriority(DocumentProtocol.Priority priority) { this.priority = priority; } public LoadType getLoadType() { return loadType; } public void setLoadType(LoadType loadType) { this.loadType = loadType; } public boolean getAbortOnDocumentError() { return abortOnDocumentError; } public boolean getAbortOnFeedError() { return abortOnFeedError; } public boolean getCreateIfNonExistent() { return createIfNonExistent; } @Override public void process(Message msg) { if (route != null) { msg.setRoute(route); } msg.setTimeRemaining(getTimeRemaining()); msg.setRetryEnabled(retryEnabled); msg.getTrace().setLevel(Math.max(getFeederOptions().getTraceLevel(), traceLevel)); if (loadType != null) { ((DocumentMessage) msg).setLoadType(loadType); ((DocumentMessage) msg).setPriority(loadType.getPriority()); } if (priority != null) { ((DocumentMessage) msg).setPriority(priority); } } public void process(VisitorParameters params) { if (route != null) { params.setRoute(route); } params.setTimeoutMs(timeout); params.setTraceLevel(Math.max(getFeederOptions().getTraceLevel(), traceLevel)); if (loadType != null) { params.setLoadType(loadType); params.setPriority(loadType.getPriority()); } if (priority != null) { params.setPriority(priority); } } } }
Shouldn't this `return ifExists(this::getAttributes);`? Although they do catch different exception.
public Optional<FileAttributes> getAttributesIfExists() { try { return Optional.of(getAttributes()); } catch (UncheckedIOException e) { if (e.getCause() instanceof NoSuchFileException) { return Optional.empty(); } throw e; } }
return Optional.of(getAttributes());
public Optional<FileAttributes> getAttributesIfExists() { return IOExceptionUtil.ifExists(() -> getAttributes()); }
class UnixPath { private final Path path; public UnixPath(Path path) { this.path = path; } public UnixPath(String path) { this(Paths.get(path)); } public Path toPath() { return path; } public boolean createParents() { Path parent = path.getParent(); if (Files.isDirectory(parent)) { return false; } uncheck(() -> Files.createDirectories(parent)); return true; } public String readUtf8File() { byte[] byteContent = uncheck(() -> Files.readAllBytes(path)); return new String(byteContent, StandardCharsets.UTF_8); } public void writeUtf8File(String content, OpenOption... options) { byte[] contentInUtf8 = content.getBytes(StandardCharsets.UTF_8); uncheck(() -> Files.write(path, contentInUtf8, options)); } public String getPermissions() { return getAttributes().permissions(); } /** * @param permissions Example: "rwxr-x---" means rwx for owner, rx for group, * and no permissions for others. */ public void setPermissions(String permissions) { Set<PosixFilePermission> permissionSet; try { permissionSet = PosixFilePermissions.fromString(permissions); } catch (IllegalArgumentException e) { throw new IllegalArgumentException("Failed to set permissions '" + permissions + "' on path " + path, e); } uncheck(() -> Files.setPosixFilePermissions(path, permissionSet)); } public String getOwner() { return getAttributes().owner(); } public void setOwner(String owner) { UserPrincipalLookupService service = path.getFileSystem().getUserPrincipalLookupService(); UserPrincipal principal = uncheck(() -> service.lookupPrincipalByName(owner)); uncheck(() -> Files.setOwner(path, principal)); } public String getGroup() { return getAttributes().group(); } public void setGroup(String group) { UserPrincipalLookupService service = path.getFileSystem().getUserPrincipalLookupService(); GroupPrincipal principal = uncheck(() -> service.lookupPrincipalByGroupName(group)); uncheck(() -> Files.getFileAttributeView(path, PosixFileAttributeView.class).setGroup(principal)); } public Instant getLastModifiedTime() { return getAttributes().lastModifiedTime(); } public FileAttributes getAttributes() { PosixFileAttributes attributes = uncheck(() -> Files.getFileAttributeView(path, PosixFileAttributeView.class).readAttributes()); return new FileAttributes(attributes); } @Override public String toString() { return path.toString(); } }
class UnixPath { private final Path path; public UnixPath(Path path) { this.path = path; } public UnixPath(String path) { this(Paths.get(path)); } public Path toPath() { return path; } public boolean createParents() { Path parent = path.getParent(); if (Files.isDirectory(parent)) { return false; } uncheck(() -> Files.createDirectories(parent)); return true; } public String readUtf8File() { byte[] byteContent = uncheck(() -> Files.readAllBytes(path)); return new String(byteContent, StandardCharsets.UTF_8); } public void writeUtf8File(String content, OpenOption... options) { byte[] contentInUtf8 = content.getBytes(StandardCharsets.UTF_8); uncheck(() -> Files.write(path, contentInUtf8, options)); } public String getPermissions() { return getAttributes().permissions(); } /** * @param permissions Example: "rwxr-x---" means rwx for owner, rx for group, * and no permissions for others. */ public void setPermissions(String permissions) { Set<PosixFilePermission> permissionSet; try { permissionSet = PosixFilePermissions.fromString(permissions); } catch (IllegalArgumentException e) { throw new IllegalArgumentException("Failed to set permissions '" + permissions + "' on path " + path, e); } uncheck(() -> Files.setPosixFilePermissions(path, permissionSet)); } public String getOwner() { return getAttributes().owner(); } public void setOwner(String owner) { UserPrincipalLookupService service = path.getFileSystem().getUserPrincipalLookupService(); UserPrincipal principal = uncheck(() -> service.lookupPrincipalByName(owner)); uncheck(() -> Files.setOwner(path, principal)); } public String getGroup() { return getAttributes().group(); } public void setGroup(String group) { UserPrincipalLookupService service = path.getFileSystem().getUserPrincipalLookupService(); GroupPrincipal principal = uncheck(() -> service.lookupPrincipalByGroupName(group)); uncheck(() -> Files.getFileAttributeView(path, PosixFileAttributeView.class).setGroup(principal)); } public Instant getLastModifiedTime() { return getAttributes().lastModifiedTime(); } public FileAttributes getAttributes() { PosixFileAttributes attributes = uncheck(() -> Files.getFileAttributeView(path, PosixFileAttributeView.class).readAttributes()); return new FileAttributes(attributes); } @Override public String toString() { return path.toString(); } }
Thanks! Looks like I forgot to switch.
public Optional<FileAttributes> getAttributesIfExists() { try { return Optional.of(getAttributes()); } catch (UncheckedIOException e) { if (e.getCause() instanceof NoSuchFileException) { return Optional.empty(); } throw e; } }
return Optional.of(getAttributes());
public Optional<FileAttributes> getAttributesIfExists() { return IOExceptionUtil.ifExists(() -> getAttributes()); }
class UnixPath { private final Path path; public UnixPath(Path path) { this.path = path; } public UnixPath(String path) { this(Paths.get(path)); } public Path toPath() { return path; } public boolean createParents() { Path parent = path.getParent(); if (Files.isDirectory(parent)) { return false; } uncheck(() -> Files.createDirectories(parent)); return true; } public String readUtf8File() { byte[] byteContent = uncheck(() -> Files.readAllBytes(path)); return new String(byteContent, StandardCharsets.UTF_8); } public void writeUtf8File(String content, OpenOption... options) { byte[] contentInUtf8 = content.getBytes(StandardCharsets.UTF_8); uncheck(() -> Files.write(path, contentInUtf8, options)); } public String getPermissions() { return getAttributes().permissions(); } /** * @param permissions Example: "rwxr-x---" means rwx for owner, rx for group, * and no permissions for others. */ public void setPermissions(String permissions) { Set<PosixFilePermission> permissionSet; try { permissionSet = PosixFilePermissions.fromString(permissions); } catch (IllegalArgumentException e) { throw new IllegalArgumentException("Failed to set permissions '" + permissions + "' on path " + path, e); } uncheck(() -> Files.setPosixFilePermissions(path, permissionSet)); } public String getOwner() { return getAttributes().owner(); } public void setOwner(String owner) { UserPrincipalLookupService service = path.getFileSystem().getUserPrincipalLookupService(); UserPrincipal principal = uncheck(() -> service.lookupPrincipalByName(owner)); uncheck(() -> Files.setOwner(path, principal)); } public String getGroup() { return getAttributes().group(); } public void setGroup(String group) { UserPrincipalLookupService service = path.getFileSystem().getUserPrincipalLookupService(); GroupPrincipal principal = uncheck(() -> service.lookupPrincipalByGroupName(group)); uncheck(() -> Files.getFileAttributeView(path, PosixFileAttributeView.class).setGroup(principal)); } public Instant getLastModifiedTime() { return getAttributes().lastModifiedTime(); } public FileAttributes getAttributes() { PosixFileAttributes attributes = uncheck(() -> Files.getFileAttributeView(path, PosixFileAttributeView.class).readAttributes()); return new FileAttributes(attributes); } @Override public String toString() { return path.toString(); } }
class UnixPath { private final Path path; public UnixPath(Path path) { this.path = path; } public UnixPath(String path) { this(Paths.get(path)); } public Path toPath() { return path; } public boolean createParents() { Path parent = path.getParent(); if (Files.isDirectory(parent)) { return false; } uncheck(() -> Files.createDirectories(parent)); return true; } public String readUtf8File() { byte[] byteContent = uncheck(() -> Files.readAllBytes(path)); return new String(byteContent, StandardCharsets.UTF_8); } public void writeUtf8File(String content, OpenOption... options) { byte[] contentInUtf8 = content.getBytes(StandardCharsets.UTF_8); uncheck(() -> Files.write(path, contentInUtf8, options)); } public String getPermissions() { return getAttributes().permissions(); } /** * @param permissions Example: "rwxr-x---" means rwx for owner, rx for group, * and no permissions for others. */ public void setPermissions(String permissions) { Set<PosixFilePermission> permissionSet; try { permissionSet = PosixFilePermissions.fromString(permissions); } catch (IllegalArgumentException e) { throw new IllegalArgumentException("Failed to set permissions '" + permissions + "' on path " + path, e); } uncheck(() -> Files.setPosixFilePermissions(path, permissionSet)); } public String getOwner() { return getAttributes().owner(); } public void setOwner(String owner) { UserPrincipalLookupService service = path.getFileSystem().getUserPrincipalLookupService(); UserPrincipal principal = uncheck(() -> service.lookupPrincipalByName(owner)); uncheck(() -> Files.setOwner(path, principal)); } public String getGroup() { return getAttributes().group(); } public void setGroup(String group) { UserPrincipalLookupService service = path.getFileSystem().getUserPrincipalLookupService(); GroupPrincipal principal = uncheck(() -> service.lookupPrincipalByGroupName(group)); uncheck(() -> Files.getFileAttributeView(path, PosixFileAttributeView.class).setGroup(principal)); } public Instant getLastModifiedTime() { return getAttributes().lastModifiedTime(); } public FileAttributes getAttributes() { PosixFileAttributes attributes = uncheck(() -> Files.getFileAttributeView(path, PosixFileAttributeView.class).readAttributes()); return new FileAttributes(attributes); } @Override public String toString() { return path.toString(); } }
Maybe extract a package-private method for this, and use it in VespaAccessLog as well?
private static String getNormalizedURI(String rawPath, String rawQuery) { return rawQuery != null ? rawPath + "?" + rawQuery : rawPath; }
return rawQuery != null ? rawPath + "?" + rawQuery : rawPath;
private static String getNormalizedURI(String rawPath, String rawQuery) { return rawQuery != null ? rawPath + "?" + rawQuery : rawPath; }
class JSONFormatter { private AccessLogEntry accessLogEntry; private final JsonFactory generatorFactory; private static Logger logger = Logger.getLogger(JSONFormatter.class.getName()); public JSONFormatter(final AccessLogEntry entry) { accessLogEntry = entry; generatorFactory = new JsonFactory(); generatorFactory.setCodec(new ObjectMapper()); } /** * The main method for formatting the associated {@link AccessLogEntry} as a Vespa JSON access log string * * @return The Vespa JSON access log string without trailing newline */ public String format() { ByteArrayOutputStream logLine = new ByteArrayOutputStream(); try { JsonGenerator generator = generatorFactory.createGenerator(logLine, JsonEncoding.UTF8); generator.writeStartObject(); generator.writeStringField("ip", accessLogEntry.getIpV4Address()); generator.writeNumberField("time", toTimestampInSeconds(accessLogEntry.getTimeStampMillis())); generator.writeNumberField("duration", durationAsSeconds(accessLogEntry.getDurationBetweenRequestResponseMillis())); generator.writeNumberField("responsesize", accessLogEntry.getReturnedContentSize()); generator.writeNumberField("code", accessLogEntry.getStatusCode()); generator.writeStringField("method", accessLogEntry.getHttpMethod()); generator.writeStringField("uri", getNormalizedURI(accessLogEntry.getRawPath(), accessLogEntry.getRawQuery().orElse(null))); generator.writeStringField("version", accessLogEntry.getHttpVersion()); generator.writeStringField("agent", accessLogEntry.getUserAgent()); generator.writeStringField("host", accessLogEntry.getHostString()); generator.writeStringField("scheme", accessLogEntry.getScheme()); generator.writeNumberField("localport", accessLogEntry.getLocalPort()); Principal principal = accessLogEntry.getUserPrincipal(); if (principal != null) { generator.writeStringField("user-principal", principal.getName()); } Principal sslPrincipal = accessLogEntry.getSslPrincipal(); if (sslPrincipal != null) { generator.writeStringField("ssl-principal", sslPrincipal.getName()); } if (remoteAddressDiffers(accessLogEntry.getIpV4Address(), accessLogEntry.getRemoteAddress())) { generator.writeStringField("remoteaddr", accessLogEntry.getRemoteAddress()); if (accessLogEntry.getRemotePort() > 0) { generator.writeNumberField("remoteport", accessLogEntry.getRemotePort()); } } if (accessLogEntry.getPeerAddress() != null) { generator.writeStringField("peeraddr", accessLogEntry.getPeerAddress()); int peerPort = accessLogEntry.getPeerPort(); if (peerPort > 0 && peerPort != accessLogEntry.getRemotePort()) { generator.writeNumberField("peerport", peerPort); } } if (isSearchRequest(accessLogEntry)) { generator.writeObjectFieldStart("search"); generator.writeNumberField("totalhits", getTotalHitCount(accessLogEntry.getHitCounts())); generator.writeNumberField("hits", getRetrievedHitCount(accessLogEntry.getHitCounts())); generator.writeEndObject(); } Map<String,List<String>> keyValues = accessLogEntry.getKeyValues(); if (keyValues != null && !keyValues.isEmpty()) { generator.writeObjectFieldStart("attributes"); for (Map.Entry<String,List<String>> entry : keyValues.entrySet()) { if (entry.getValue().size() == 1) { generator.writeStringField(entry.getKey(), entry.getValue().get(0)); } else { generator.writeFieldName(entry.getKey()); generator.writeStartArray(); for (String s : entry.getValue()) { generator.writeString(s); } generator.writeEndArray(); } } generator.writeEndObject(); } generator.writeEndObject(); generator.close(); } catch (IOException e) { logger.log(Level.WARNING, "Unable to generate JSON access log entry: " + e.getMessage()); } return logLine.toString(); } private boolean remoteAddressDiffers(String ipV4Address, String remoteAddress) { return remoteAddress != null && !Objects.equals(ipV4Address, remoteAddress); } private boolean isSearchRequest(AccessLogEntry logEntry) { return logEntry != null && (logEntry.getHitCounts() != null); } private long getTotalHitCount(HitCounts counts) { if (counts == null) { return 0; } return counts.getTotalHitCount(); } private int getRetrievedHitCount(HitCounts counts) { if (counts == null) { return 0; } return counts.getRetrievedHitCount(); } private BigDecimal toTimestampInSeconds(long numMillisSince1Jan1970AtMidnightUTC) { BigDecimal timestampInSeconds = new BigDecimal(numMillisSince1Jan1970AtMidnightUTC).divide(BigDecimal.valueOf(1000)); if (numMillisSince1Jan1970AtMidnightUTC/1000 > 0x7fffffff) { logger.log(Level.WARNING, "A year 2038 problem occurred."); logger.log(Level.INFO, "numMillisSince1Jan1970AtMidnightUTC: " + numMillisSince1Jan1970AtMidnightUTC); timestampInSeconds = new BigDecimal(numMillisSince1Jan1970AtMidnightUTC) .divide(BigDecimal.valueOf(1000)) .remainder(BigDecimal.valueOf(0x7fffffff)); } return timestampInSeconds.setScale(3, RoundingMode.HALF_UP); } private BigDecimal durationAsSeconds(long timeInMillis) { BigDecimal duration = new BigDecimal(timeInMillis).divide(BigDecimal.valueOf(1000)); if (timeInMillis > 0xffffffffL) { logger.log(Level.WARNING, "Duration too long: " + timeInMillis); duration = new BigDecimal(0xffffffff); } return duration.setScale(3, BigDecimal.ROUND_HALF_UP); } }
class JSONFormatter { private AccessLogEntry accessLogEntry; private final JsonFactory generatorFactory; private static Logger logger = Logger.getLogger(JSONFormatter.class.getName()); public JSONFormatter(final AccessLogEntry entry) { accessLogEntry = entry; generatorFactory = new JsonFactory(); generatorFactory.setCodec(new ObjectMapper()); } /** * The main method for formatting the associated {@link AccessLogEntry} as a Vespa JSON access log string * * @return The Vespa JSON access log string without trailing newline */ public String format() { ByteArrayOutputStream logLine = new ByteArrayOutputStream(); try { JsonGenerator generator = generatorFactory.createGenerator(logLine, JsonEncoding.UTF8); generator.writeStartObject(); generator.writeStringField("ip", accessLogEntry.getIpV4Address()); generator.writeNumberField("time", toTimestampInSeconds(accessLogEntry.getTimeStampMillis())); generator.writeNumberField("duration", durationAsSeconds(accessLogEntry.getDurationBetweenRequestResponseMillis())); generator.writeNumberField("responsesize", accessLogEntry.getReturnedContentSize()); generator.writeNumberField("code", accessLogEntry.getStatusCode()); generator.writeStringField("method", accessLogEntry.getHttpMethod()); generator.writeStringField("uri", getNormalizedURI(accessLogEntry.getRawPath(), accessLogEntry.getRawQuery().orElse(null))); generator.writeStringField("version", accessLogEntry.getHttpVersion()); generator.writeStringField("agent", accessLogEntry.getUserAgent()); generator.writeStringField("host", accessLogEntry.getHostString()); generator.writeStringField("scheme", accessLogEntry.getScheme()); generator.writeNumberField("localport", accessLogEntry.getLocalPort()); Principal principal = accessLogEntry.getUserPrincipal(); if (principal != null) { generator.writeStringField("user-principal", principal.getName()); } Principal sslPrincipal = accessLogEntry.getSslPrincipal(); if (sslPrincipal != null) { generator.writeStringField("ssl-principal", sslPrincipal.getName()); } if (remoteAddressDiffers(accessLogEntry.getIpV4Address(), accessLogEntry.getRemoteAddress())) { generator.writeStringField("remoteaddr", accessLogEntry.getRemoteAddress()); if (accessLogEntry.getRemotePort() > 0) { generator.writeNumberField("remoteport", accessLogEntry.getRemotePort()); } } if (accessLogEntry.getPeerAddress() != null) { generator.writeStringField("peeraddr", accessLogEntry.getPeerAddress()); int peerPort = accessLogEntry.getPeerPort(); if (peerPort > 0 && peerPort != accessLogEntry.getRemotePort()) { generator.writeNumberField("peerport", peerPort); } } if (isSearchRequest(accessLogEntry)) { generator.writeObjectFieldStart("search"); generator.writeNumberField("totalhits", getTotalHitCount(accessLogEntry.getHitCounts())); generator.writeNumberField("hits", getRetrievedHitCount(accessLogEntry.getHitCounts())); generator.writeEndObject(); } Map<String,List<String>> keyValues = accessLogEntry.getKeyValues(); if (keyValues != null && !keyValues.isEmpty()) { generator.writeObjectFieldStart("attributes"); for (Map.Entry<String,List<String>> entry : keyValues.entrySet()) { if (entry.getValue().size() == 1) { generator.writeStringField(entry.getKey(), entry.getValue().get(0)); } else { generator.writeFieldName(entry.getKey()); generator.writeStartArray(); for (String s : entry.getValue()) { generator.writeString(s); } generator.writeEndArray(); } } generator.writeEndObject(); } generator.writeEndObject(); generator.close(); } catch (IOException e) { logger.log(Level.WARNING, "Unable to generate JSON access log entry: " + e.getMessage()); } return logLine.toString(); } private boolean remoteAddressDiffers(String ipV4Address, String remoteAddress) { return remoteAddress != null && !Objects.equals(ipV4Address, remoteAddress); } private boolean isSearchRequest(AccessLogEntry logEntry) { return logEntry != null && (logEntry.getHitCounts() != null); } private long getTotalHitCount(HitCounts counts) { if (counts == null) { return 0; } return counts.getTotalHitCount(); } private int getRetrievedHitCount(HitCounts counts) { if (counts == null) { return 0; } return counts.getRetrievedHitCount(); } private BigDecimal toTimestampInSeconds(long numMillisSince1Jan1970AtMidnightUTC) { BigDecimal timestampInSeconds = new BigDecimal(numMillisSince1Jan1970AtMidnightUTC).divide(BigDecimal.valueOf(1000)); if (numMillisSince1Jan1970AtMidnightUTC/1000 > 0x7fffffff) { logger.log(Level.WARNING, "A year 2038 problem occurred."); logger.log(Level.INFO, "numMillisSince1Jan1970AtMidnightUTC: " + numMillisSince1Jan1970AtMidnightUTC); timestampInSeconds = new BigDecimal(numMillisSince1Jan1970AtMidnightUTC) .divide(BigDecimal.valueOf(1000)) .remainder(BigDecimal.valueOf(0x7fffffff)); } return timestampInSeconds.setScale(3, RoundingMode.HALF_UP); } private BigDecimal durationAsSeconds(long timeInMillis) { BigDecimal duration = new BigDecimal(timeInMillis).divide(BigDecimal.valueOf(1000)); if (timeInMillis > 0xffffffffL) { logger.log(Level.WARNING, "Duration too long: " + timeInMillis); duration = new BigDecimal(0xffffffff); } return duration.setScale(3, BigDecimal.ROUND_HALF_UP); } }
"RequetLog" -> "RequestLog"
public void log(final Request request, final Response response) { try { final AccessLogEntry accessLogEntryFromServletRequest = (AccessLogEntry) request.getAttribute( JDiscHttpServlet.ATTRIBUTE_NAME_ACCESS_LOG_ENTRY); final AccessLogEntry accessLogEntry; if (accessLogEntryFromServletRequest != null) { accessLogEntry = accessLogEntryFromServletRequest; } else { accessLogEntry = new AccessLogEntry(); populateAccessLogEntryFromHttpServletRequest(request, accessLogEntry); } final long startTime = request.getTimeStamp(); final long endTime = System.currentTimeMillis(); accessLogEntry.setTimeStamp(startTime); accessLogEntry.setDurationBetweenRequestResponse(endTime - startTime); accessLogEntry.setReturnedContentSize(response.getContentCount()); accessLogEntry.setStatusCode(response.getStatus()); accessLog.log(accessLogEntry); } catch (Exception e) { logger.log(Level.SEVERE, "Failed to log access log entry: " + e.getMessage(), e); } }
public void log(final Request request, final Response response) { try { final AccessLogEntry accessLogEntryFromServletRequest = (AccessLogEntry) request.getAttribute( JDiscHttpServlet.ATTRIBUTE_NAME_ACCESS_LOG_ENTRY); final AccessLogEntry accessLogEntry; if (accessLogEntryFromServletRequest != null) { accessLogEntry = accessLogEntryFromServletRequest; } else { accessLogEntry = new AccessLogEntry(); populateAccessLogEntryFromHttpServletRequest(request, accessLogEntry); } final long startTime = request.getTimeStamp(); final long endTime = System.currentTimeMillis(); accessLogEntry.setTimeStamp(startTime); accessLogEntry.setDurationBetweenRequestResponse(endTime - startTime); accessLogEntry.setReturnedContentSize(response.getContentCount()); accessLogEntry.setStatusCode(response.getStatus()); accessLog.log(accessLogEntry); } catch (Exception e) { logger.log(Level.SEVERE, "Failed to log access log entry: " + e.getMessage(), e); } }
class AccessLogRequestLog extends AbstractLifeCycle implements RequestLog { private static final Logger logger = Logger.getLogger(AccessLogRequestLog.class.getName()); private static final String HEADER_NAME_Y_RA = "y-ra"; private static final String HEADER_NAME_Y_RP = "y-rp"; private static final String HEADER_NAME_YAHOOREMOTEIP = "yahooremoteip"; private static final String HEADER_NAME_X_FORWARDED_FOR = "x-forwarded-for"; private static final String HEADER_NAME_CLIENT_IP = "client-ip"; private final AccessLog accessLog; public AccessLogRequestLog(final AccessLog accessLog) { this.accessLog = accessLog; } @Override /* * Collecting all log entry population based on extracting information from HttpServletRequest in one method * means that this may easily be moved to another location, e.g. if we want to populate this at instantiation * time rather than at logging time. We may, for example, want to set things such as http headers and ip * addresses up-front and make it illegal for request handlers to modify these later. */ public static void populateAccessLogEntryFromHttpServletRequest( final HttpServletRequest request, final AccessLogEntry accessLogEntry) { final String quotedPath = request.getRequestURI(); final String quotedQuery = request.getQueryString(); try { final StringBuilder uriBuffer = new StringBuilder(); uriBuffer.append(quotedPath); if (quotedQuery != null) { uriBuffer.append('?').append(quotedQuery); } final URI uri = new URI(uriBuffer.toString()); accessLogEntry.setURI(uri); } catch (URISyntaxException e) { setUriFromMalformedInput(accessLogEntry, quotedPath, quotedQuery); } final String remoteAddress = getRemoteAddress(request); final int remotePort = getRemotePort(request); final String peerAddress = request.getRemoteAddr(); final int peerPort = request.getRemotePort(); accessLogEntry.setUserAgent(request.getHeader("User-Agent")); accessLogEntry.setHttpMethod(request.getMethod()); accessLogEntry.setHostString(request.getHeader("Host")); accessLogEntry.setReferer(request.getHeader("Referer")); accessLogEntry.setIpV4Address(peerAddress); accessLogEntry.setRemoteAddress(remoteAddress); accessLogEntry.setRemotePort(remotePort); if (!Objects.equal(remoteAddress, peerAddress)) { accessLogEntry.setPeerAddress(peerAddress); } if (remotePort != peerPort) { accessLogEntry.setPeerPort(peerPort); } accessLogEntry.setHttpVersion(request.getProtocol()); accessLogEntry.setScheme(request.getScheme()); accessLogEntry.setLocalPort(request.getLocalPort()); Principal principal = (Principal) request.getAttribute(ServletRequest.JDISC_REQUEST_PRINCIPAL); if (principal != null) { accessLogEntry.setUserPrincipal(principal); } X509Certificate[] clientCert = (X509Certificate[]) request.getAttribute("javax.servlet.request.X509Certificate"); if (clientCert != null && clientCert.length > 0) { accessLogEntry.setSslPrincipal(clientCert[0].getSubjectX500Principal()); } } private static String getRemoteAddress(final HttpServletRequest request) { return Alternative.preferred(request.getHeader(HEADER_NAME_Y_RA)) .alternatively(() -> request.getHeader(HEADER_NAME_YAHOOREMOTEIP)) .alternatively(() -> request.getHeader(HEADER_NAME_X_FORWARDED_FOR)) .alternatively(() -> request.getHeader(HEADER_NAME_CLIENT_IP)) .orElseGet(request::getRemoteAddr); } private static int getRemotePort(final HttpServletRequest request) { return Optional.ofNullable(request.getHeader(HEADER_NAME_Y_RP)) .map(Integer::valueOf) .orElseGet(request::getRemotePort); } private static void setUriFromMalformedInput(final AccessLogEntry accessLogEntry, final String quotedPath, final String quotedQuery) { try { final String scheme = null; final String authority = null; final String fragment = null; final URI uri = new URI(scheme, authority, unquote(quotedPath), unquote(quotedQuery), fragment); accessLogEntry.setURI(uri); } catch (URISyntaxException e) { logger.log(Level.WARNING, "Could not convert String URI to URI object", e); } } private static String unquote(final String quotedQuery) { if (quotedQuery == null) { return null; } try { return URLDecoder.decode(quotedQuery, StandardCharsets.UTF_8.name()); } catch (IllegalArgumentException e) { return quotedQuery; } catch (UnsupportedEncodingException e) { throw new RuntimeException(e); } } }
class AccessLogRequestLog extends AbstractLifeCycle implements RequestLog { private static final Logger logger = Logger.getLogger(AccessLogRequestLog.class.getName()); private static final String HEADER_NAME_Y_RA = "y-ra"; private static final String HEADER_NAME_Y_RP = "y-rp"; private static final String HEADER_NAME_YAHOOREMOTEIP = "yahooremoteip"; private static final String HEADER_NAME_X_FORWARDED_FOR = "x-forwarded-for"; private static final String HEADER_NAME_CLIENT_IP = "client-ip"; private final AccessLog accessLog; public AccessLogRequestLog(final AccessLog accessLog) { this.accessLog = accessLog; } @Override /* * Collecting all log entry population based on extracting information from HttpServletRequest in one method * means that this may easily be moved to another location, e.g. if we want to populate this at instantiation * time rather than at logging time. We may, for example, want to set things such as http headers and ip * addresses up-front and make it illegal for request handlers to modify these later. */ public static void populateAccessLogEntryFromHttpServletRequest( final HttpServletRequest request, final AccessLogEntry accessLogEntry) { final String quotedPath = request.getRequestURI(); final String quotedQuery = request.getQueryString(); try { final StringBuilder uriBuffer = new StringBuilder(); uriBuffer.append(quotedPath); if (quotedQuery != null) { uriBuffer.append('?').append(quotedQuery); } final URI uri = new URI(uriBuffer.toString()); accessLogEntry.setURI(uri); } catch (URISyntaxException e) { setUriFromMalformedInput(accessLogEntry, quotedPath, quotedQuery); } final String remoteAddress = getRemoteAddress(request); final int remotePort = getRemotePort(request); final String peerAddress = request.getRemoteAddr(); final int peerPort = request.getRemotePort(); accessLogEntry.setUserAgent(request.getHeader("User-Agent")); accessLogEntry.setHttpMethod(request.getMethod()); accessLogEntry.setHostString(request.getHeader("Host")); accessLogEntry.setReferer(request.getHeader("Referer")); accessLogEntry.setIpV4Address(peerAddress); accessLogEntry.setRemoteAddress(remoteAddress); accessLogEntry.setRemotePort(remotePort); if (!Objects.equal(remoteAddress, peerAddress)) { accessLogEntry.setPeerAddress(peerAddress); } if (remotePort != peerPort) { accessLogEntry.setPeerPort(peerPort); } accessLogEntry.setHttpVersion(request.getProtocol()); accessLogEntry.setScheme(request.getScheme()); accessLogEntry.setLocalPort(request.getLocalPort()); Principal principal = (Principal) request.getAttribute(ServletRequest.JDISC_REQUEST_PRINCIPAL); if (principal != null) { accessLogEntry.setUserPrincipal(principal); } X509Certificate[] clientCert = (X509Certificate[]) request.getAttribute("javax.servlet.request.X509Certificate"); if (clientCert != null && clientCert.length > 0) { accessLogEntry.setSslPrincipal(clientCert[0].getSubjectX500Principal()); } } private static String getRemoteAddress(final HttpServletRequest request) { return Alternative.preferred(request.getHeader(HEADER_NAME_Y_RA)) .alternatively(() -> request.getHeader(HEADER_NAME_YAHOOREMOTEIP)) .alternatively(() -> request.getHeader(HEADER_NAME_X_FORWARDED_FOR)) .alternatively(() -> request.getHeader(HEADER_NAME_CLIENT_IP)) .orElseGet(request::getRemoteAddr); } private static int getRemotePort(final HttpServletRequest request) { return Optional.ofNullable(request.getHeader(HEADER_NAME_Y_RP)) .map(Integer::valueOf) .orElseGet(request::getRemotePort); } private static void setUriFromMalformedInput(final AccessLogEntry accessLogEntry, final String quotedPath, final String quotedQuery) { try { final String scheme = null; final String authority = null; final String fragment = null; final URI uri = new URI(scheme, authority, unquote(quotedPath), unquote(quotedQuery), fragment); accessLogEntry.setURI(uri); } catch (URISyntaxException e) { logger.log(Level.WARNING, "Could not convert String URI to URI object", e); } } private static String unquote(final String quotedQuery) { if (quotedQuery == null) { return null; } try { return URLDecoder.decode(quotedQuery, StandardCharsets.UTF_8.name()); } catch (IllegalArgumentException e) { return quotedQuery; } catch (UnsupportedEncodingException e) { throw new RuntimeException(e); } } }
Fixed :)
public void log(final Request request, final Response response) { try { final AccessLogEntry accessLogEntryFromServletRequest = (AccessLogEntry) request.getAttribute( JDiscHttpServlet.ATTRIBUTE_NAME_ACCESS_LOG_ENTRY); final AccessLogEntry accessLogEntry; if (accessLogEntryFromServletRequest != null) { accessLogEntry = accessLogEntryFromServletRequest; } else { accessLogEntry = new AccessLogEntry(); populateAccessLogEntryFromHttpServletRequest(request, accessLogEntry); } final long startTime = request.getTimeStamp(); final long endTime = System.currentTimeMillis(); accessLogEntry.setTimeStamp(startTime); accessLogEntry.setDurationBetweenRequestResponse(endTime - startTime); accessLogEntry.setReturnedContentSize(response.getContentCount()); accessLogEntry.setStatusCode(response.getStatus()); accessLog.log(accessLogEntry); } catch (Exception e) { logger.log(Level.SEVERE, "Failed to log access log entry: " + e.getMessage(), e); } }
public void log(final Request request, final Response response) { try { final AccessLogEntry accessLogEntryFromServletRequest = (AccessLogEntry) request.getAttribute( JDiscHttpServlet.ATTRIBUTE_NAME_ACCESS_LOG_ENTRY); final AccessLogEntry accessLogEntry; if (accessLogEntryFromServletRequest != null) { accessLogEntry = accessLogEntryFromServletRequest; } else { accessLogEntry = new AccessLogEntry(); populateAccessLogEntryFromHttpServletRequest(request, accessLogEntry); } final long startTime = request.getTimeStamp(); final long endTime = System.currentTimeMillis(); accessLogEntry.setTimeStamp(startTime); accessLogEntry.setDurationBetweenRequestResponse(endTime - startTime); accessLogEntry.setReturnedContentSize(response.getContentCount()); accessLogEntry.setStatusCode(response.getStatus()); accessLog.log(accessLogEntry); } catch (Exception e) { logger.log(Level.SEVERE, "Failed to log access log entry: " + e.getMessage(), e); } }
class AccessLogRequestLog extends AbstractLifeCycle implements RequestLog { private static final Logger logger = Logger.getLogger(AccessLogRequestLog.class.getName()); private static final String HEADER_NAME_Y_RA = "y-ra"; private static final String HEADER_NAME_Y_RP = "y-rp"; private static final String HEADER_NAME_YAHOOREMOTEIP = "yahooremoteip"; private static final String HEADER_NAME_X_FORWARDED_FOR = "x-forwarded-for"; private static final String HEADER_NAME_CLIENT_IP = "client-ip"; private final AccessLog accessLog; public AccessLogRequestLog(final AccessLog accessLog) { this.accessLog = accessLog; } @Override /* * Collecting all log entry population based on extracting information from HttpServletRequest in one method * means that this may easily be moved to another location, e.g. if we want to populate this at instantiation * time rather than at logging time. We may, for example, want to set things such as http headers and ip * addresses up-front and make it illegal for request handlers to modify these later. */ public static void populateAccessLogEntryFromHttpServletRequest( final HttpServletRequest request, final AccessLogEntry accessLogEntry) { final String quotedPath = request.getRequestURI(); final String quotedQuery = request.getQueryString(); try { final StringBuilder uriBuffer = new StringBuilder(); uriBuffer.append(quotedPath); if (quotedQuery != null) { uriBuffer.append('?').append(quotedQuery); } final URI uri = new URI(uriBuffer.toString()); accessLogEntry.setURI(uri); } catch (URISyntaxException e) { setUriFromMalformedInput(accessLogEntry, quotedPath, quotedQuery); } final String remoteAddress = getRemoteAddress(request); final int remotePort = getRemotePort(request); final String peerAddress = request.getRemoteAddr(); final int peerPort = request.getRemotePort(); accessLogEntry.setUserAgent(request.getHeader("User-Agent")); accessLogEntry.setHttpMethod(request.getMethod()); accessLogEntry.setHostString(request.getHeader("Host")); accessLogEntry.setReferer(request.getHeader("Referer")); accessLogEntry.setIpV4Address(peerAddress); accessLogEntry.setRemoteAddress(remoteAddress); accessLogEntry.setRemotePort(remotePort); if (!Objects.equal(remoteAddress, peerAddress)) { accessLogEntry.setPeerAddress(peerAddress); } if (remotePort != peerPort) { accessLogEntry.setPeerPort(peerPort); } accessLogEntry.setHttpVersion(request.getProtocol()); accessLogEntry.setScheme(request.getScheme()); accessLogEntry.setLocalPort(request.getLocalPort()); Principal principal = (Principal) request.getAttribute(ServletRequest.JDISC_REQUEST_PRINCIPAL); if (principal != null) { accessLogEntry.setUserPrincipal(principal); } X509Certificate[] clientCert = (X509Certificate[]) request.getAttribute("javax.servlet.request.X509Certificate"); if (clientCert != null && clientCert.length > 0) { accessLogEntry.setSslPrincipal(clientCert[0].getSubjectX500Principal()); } } private static String getRemoteAddress(final HttpServletRequest request) { return Alternative.preferred(request.getHeader(HEADER_NAME_Y_RA)) .alternatively(() -> request.getHeader(HEADER_NAME_YAHOOREMOTEIP)) .alternatively(() -> request.getHeader(HEADER_NAME_X_FORWARDED_FOR)) .alternatively(() -> request.getHeader(HEADER_NAME_CLIENT_IP)) .orElseGet(request::getRemoteAddr); } private static int getRemotePort(final HttpServletRequest request) { return Optional.ofNullable(request.getHeader(HEADER_NAME_Y_RP)) .map(Integer::valueOf) .orElseGet(request::getRemotePort); } private static void setUriFromMalformedInput(final AccessLogEntry accessLogEntry, final String quotedPath, final String quotedQuery) { try { final String scheme = null; final String authority = null; final String fragment = null; final URI uri = new URI(scheme, authority, unquote(quotedPath), unquote(quotedQuery), fragment); accessLogEntry.setURI(uri); } catch (URISyntaxException e) { logger.log(Level.WARNING, "Could not convert String URI to URI object", e); } } private static String unquote(final String quotedQuery) { if (quotedQuery == null) { return null; } try { return URLDecoder.decode(quotedQuery, StandardCharsets.UTF_8.name()); } catch (IllegalArgumentException e) { return quotedQuery; } catch (UnsupportedEncodingException e) { throw new RuntimeException(e); } } }
class AccessLogRequestLog extends AbstractLifeCycle implements RequestLog { private static final Logger logger = Logger.getLogger(AccessLogRequestLog.class.getName()); private static final String HEADER_NAME_Y_RA = "y-ra"; private static final String HEADER_NAME_Y_RP = "y-rp"; private static final String HEADER_NAME_YAHOOREMOTEIP = "yahooremoteip"; private static final String HEADER_NAME_X_FORWARDED_FOR = "x-forwarded-for"; private static final String HEADER_NAME_CLIENT_IP = "client-ip"; private final AccessLog accessLog; public AccessLogRequestLog(final AccessLog accessLog) { this.accessLog = accessLog; } @Override /* * Collecting all log entry population based on extracting information from HttpServletRequest in one method * means that this may easily be moved to another location, e.g. if we want to populate this at instantiation * time rather than at logging time. We may, for example, want to set things such as http headers and ip * addresses up-front and make it illegal for request handlers to modify these later. */ public static void populateAccessLogEntryFromHttpServletRequest( final HttpServletRequest request, final AccessLogEntry accessLogEntry) { final String quotedPath = request.getRequestURI(); final String quotedQuery = request.getQueryString(); try { final StringBuilder uriBuffer = new StringBuilder(); uriBuffer.append(quotedPath); if (quotedQuery != null) { uriBuffer.append('?').append(quotedQuery); } final URI uri = new URI(uriBuffer.toString()); accessLogEntry.setURI(uri); } catch (URISyntaxException e) { setUriFromMalformedInput(accessLogEntry, quotedPath, quotedQuery); } final String remoteAddress = getRemoteAddress(request); final int remotePort = getRemotePort(request); final String peerAddress = request.getRemoteAddr(); final int peerPort = request.getRemotePort(); accessLogEntry.setUserAgent(request.getHeader("User-Agent")); accessLogEntry.setHttpMethod(request.getMethod()); accessLogEntry.setHostString(request.getHeader("Host")); accessLogEntry.setReferer(request.getHeader("Referer")); accessLogEntry.setIpV4Address(peerAddress); accessLogEntry.setRemoteAddress(remoteAddress); accessLogEntry.setRemotePort(remotePort); if (!Objects.equal(remoteAddress, peerAddress)) { accessLogEntry.setPeerAddress(peerAddress); } if (remotePort != peerPort) { accessLogEntry.setPeerPort(peerPort); } accessLogEntry.setHttpVersion(request.getProtocol()); accessLogEntry.setScheme(request.getScheme()); accessLogEntry.setLocalPort(request.getLocalPort()); Principal principal = (Principal) request.getAttribute(ServletRequest.JDISC_REQUEST_PRINCIPAL); if (principal != null) { accessLogEntry.setUserPrincipal(principal); } X509Certificate[] clientCert = (X509Certificate[]) request.getAttribute("javax.servlet.request.X509Certificate"); if (clientCert != null && clientCert.length > 0) { accessLogEntry.setSslPrincipal(clientCert[0].getSubjectX500Principal()); } } private static String getRemoteAddress(final HttpServletRequest request) { return Alternative.preferred(request.getHeader(HEADER_NAME_Y_RA)) .alternatively(() -> request.getHeader(HEADER_NAME_YAHOOREMOTEIP)) .alternatively(() -> request.getHeader(HEADER_NAME_X_FORWARDED_FOR)) .alternatively(() -> request.getHeader(HEADER_NAME_CLIENT_IP)) .orElseGet(request::getRemoteAddr); } private static int getRemotePort(final HttpServletRequest request) { return Optional.ofNullable(request.getHeader(HEADER_NAME_Y_RP)) .map(Integer::valueOf) .orElseGet(request::getRemotePort); } private static void setUriFromMalformedInput(final AccessLogEntry accessLogEntry, final String quotedPath, final String quotedQuery) { try { final String scheme = null; final String authority = null; final String fragment = null; final URI uri = new URI(scheme, authority, unquote(quotedPath), unquote(quotedQuery), fragment); accessLogEntry.setURI(uri); } catch (URISyntaxException e) { logger.log(Level.WARNING, "Could not convert String URI to URI object", e); } } private static String unquote(final String quotedQuery) { if (quotedQuery == null) { return null; } try { return URLDecoder.decode(quotedQuery, StandardCharsets.UTF_8.name()); } catch (IllegalArgumentException e) { return quotedQuery; } catch (UnsupportedEncodingException e) { throw new RuntimeException(e); } } }
Won't this become racy now and possibly result in multiple unlocks of the same lock?
public void release() { if (curatorLock.hasLock()) { curatorLock.unlock(); } }
curatorLock.unlock();
public void release() { if (curatorLock.hasLock()) { curatorLock.unlock(); } }
class ActivateLock { private static final String ACTIVATE_LOCK_NAME = "activateLock"; private final CuratorLock curatorLock; public ActivateLock(Curator curator, Path rootPath) { this.curatorLock = new CuratorLock(curator, rootPath.append(ACTIVATE_LOCK_NAME).getAbsolute()); } public boolean acquire(TimeoutBudget timeoutBudget, boolean ignoreLockError) { try { return curatorLock.tryLock(timeoutBudget.timeLeft().toMillis(), TimeUnit.MILLISECONDS); } catch (Exception e) { if (!ignoreLockError) { throw new RuntimeException(e); } return false; } } @Override public String toString() { return "ActivateLock (" + curatorLock + "), has lock: " + curatorLock.hasLock(); } }
class ActivateLock { private static final String ACTIVATE_LOCK_NAME = "activateLock"; private final CuratorLock curatorLock; public ActivateLock(Curator curator, Path rootPath) { this.curatorLock = new CuratorLock(curator, rootPath.append(ACTIVATE_LOCK_NAME).getAbsolute()); } public boolean acquire(TimeoutBudget timeoutBudget, boolean ignoreLockError) { try { return curatorLock.tryLock(timeoutBudget.timeLeft().toMillis(), TimeUnit.MILLISECONDS); } catch (Exception e) { if (!ignoreLockError) { throw new RuntimeException(e); } return false; } } @Override public String toString() { return "ActivateLock (" + curatorLock + "), has lock: " + curatorLock.hasLock(); } }
Is `createApplication` going to be called for each new application instance?
public Application createApplication(ApplicationId id, Optional<NToken> token) { if ( ! (id.instance().value().equals("default") || id.instance().value().matches("\\d+"))) throw new UnsupportedOperationException("Only the instance names 'default' and names which are just the PR number are supported at the moment"); try (Lock lock = lock(id)) { com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value()); Optional<Tenant> tenant = controller.tenants().tenant(new TenantId(id.tenant().value())); if ( ! tenant.isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist"); if (get(id).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application already exists"); if (get(dashToUnderscore(id)).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists"); if (tenant.get().isAthensTenant()) { if ( ! token.isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': No NToken provided"); ZmsClient zmsClient = zmsClientFactory.createZmsClientWithAuthorizedServiceToken(token.get()); try { zmsClient.deleteApplication(tenant.get().getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); } catch (ZmsException ignored) { } zmsClient.addApplication(tenant.get().getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); } LockedApplication application = new LockedApplication(new Application(id), lock); store(application); log.info("Created " + application); return application; } }
if ( ! (id.instance().value().equals("default") || id.instance().value().matches("\\d+")))
public Application createApplication(ApplicationId id, Optional<NToken> token) { if ( ! (id.instance().isDefault() || id.instance().value().matches("\\d+"))) throw new UnsupportedOperationException("Only the instance names 'default' and names which are just the PR number are supported at the moment"); try (Lock lock = lock(id)) { if (asList(id.tenant()).stream().noneMatch(application -> application.id().application().equals(id.application()))) com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value()); Optional<Tenant> tenant = controller.tenants().tenant(new TenantId(id.tenant().value())); if ( ! tenant.isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist"); if (get(id).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application already exists"); if (get(dashToUnderscore(id)).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists"); if (id.instance().isDefault() && tenant.get().isAthensTenant()) { if ( ! token.isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': No NToken provided"); ZmsClient zmsClient = zmsClientFactory.createZmsClientWithAuthorizedServiceToken(token.get()); zmsClient.addApplication(tenant.get().getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); } LockedApplication application = new LockedApplication(new Application(id), lock); store(application); log.info("Created " + application); return application; } }
class ApplicationController { private static final Logger log = Logger.getLogger(ApplicationController.class.getName()); /** The controller owning this */ private final Controller controller; /** For permanent storage */ private final ControllerDb db; /** For working memory storage and sharing between controllers */ private final CuratorDb curator; private final ArtifactRepository artifactRepository; private final RotationRepository rotationRepository; private final AthenzClientFactory zmsClientFactory; private final NameService nameService; private final ConfigServerClient configserverClient; private final RoutingGenerator routingGenerator; private final Clock clock; private final DeploymentTrigger deploymentTrigger; ApplicationController(Controller controller, ControllerDb db, CuratorDb curator, AthenzClientFactory zmsClientFactory, RotationsConfig rotationsConfig, NameService nameService, ConfigServerClient configserverClient, ArtifactRepository artifactRepository, RoutingGenerator routingGenerator, Clock clock) { this.controller = controller; this.db = db; this.curator = curator; this.zmsClientFactory = zmsClientFactory; this.nameService = nameService; this.configserverClient = configserverClient; this.routingGenerator = routingGenerator; this.clock = clock; this.artifactRepository = artifactRepository; this.rotationRepository = new RotationRepository(rotationsConfig, this, curator); this.deploymentTrigger = new DeploymentTrigger(controller, curator, clock); for (Application application : db.listApplications()) { lockIfPresent(application.id(), this::store); } } /** Returns the application with the given id, or null if it is not present */ public Optional<Application> get(ApplicationId id) { return db.getApplication(id); } /** * Returns the application with the given id * * @throws IllegalArgumentException if it does not exist */ public Application require(ApplicationId id) { return get(id).orElseThrow(() -> new IllegalArgumentException(id + " not found")); } /** Returns a snapshot of all applications */ public List<Application> asList() { return db.listApplications(); } /** Returns all applications of a tenant */ public List<Application> asList(TenantName tenant) { return db.listApplications(new TenantId(tenant.value())); } /** * Set the rotations marked as 'global' either 'in' or 'out of' service. * * @return The canonical endpoint altered if any * @throws IOException if rotation status cannot be updated */ public List<String> setGlobalRotationStatus(DeploymentId deploymentId, EndpointStatus status) throws IOException { List<String> rotations = new ArrayList<>(); Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId); if (endpoint.isPresent()) { configserverClient.setGlobalRotationStatus(deploymentId, endpoint.get(), status); rotations.add(endpoint.get()); } return rotations; } /** * Get the endpoint status for the global endpoint of this application * * @return Map between the endpoint and the rotation status * @throws IOException if global rotation status cannot be determined */ public Map<String, EndpointStatus> getGlobalRotationStatus(DeploymentId deploymentId) throws IOException { Map<String, EndpointStatus> result = new HashMap<>(); Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId); if (endpoint.isPresent()) { EndpointStatus status = configserverClient.getGlobalRotationStatus(deploymentId, endpoint.get()); result.put(endpoint.get(), status); } return result; } /** * Global rotations (plural as we can have aliases) map to exactly one service endpoint. * This method finds that one service endpoint and strips the URI part that * the routingGenerator is wrapping around the endpoint. * * @param deploymentId The deployment to retrieve global service endpoint for * @return Empty if no global endpoint exist, otherwise the service endpoint ([clustername.]app.tenant.region.env) */ Optional<String> getCanonicalGlobalEndpoint(DeploymentId deploymentId) throws IOException { Map<String, RoutingEndpoint> hostToGlobalEndpoint = new HashMap<>(); Map<String, String> hostToCanonicalEndpoint = new HashMap<>(); for (RoutingEndpoint endpoint : routingGenerator.endpoints(deploymentId)) { try { URI uri = new URI(endpoint.getEndpoint()); String serviceEndpoint = uri.getHost(); if (serviceEndpoint == null) { throw new IOException("Unexpected endpoints returned from the Routing Generator"); } String canonicalEndpoint = serviceEndpoint.replaceAll(".vespa.yahooapis.com", ""); String hostname = endpoint.getHostname(); if (hostname != null) { if (endpoint.isGlobal()) { hostToGlobalEndpoint.put(hostname, endpoint); } else { hostToCanonicalEndpoint.put(hostname, canonicalEndpoint); } if (hostToGlobalEndpoint.containsKey(hostname) && hostToCanonicalEndpoint.containsKey(hostname)) { return Optional.of(hostToCanonicalEndpoint.get(hostname)); } } } catch (URISyntaxException use) { throw new IOException(use); } } return Optional.empty(); } /** * Creates a new application for an existing tenant. * * @throws IllegalArgumentException if the application already exists */ /** Deploys an application. If the application does not exist it is created. */ public ActivateResult deployApplication(ApplicationId applicationId, ZoneId zone, Optional<ApplicationPackage> applicationPackageFromDeployer, DeployOptions options, Optional<NToken> token) { try (Lock lock = lock(applicationId)) { LockedApplication application = get(applicationId) .map(app -> new LockedApplication(app, lock)) .orElseGet(() -> new LockedApplication(createApplication(applicationId, token), lock)); Version version; if (options.deployCurrentVersion) { version = application.versionIn(zone, controller); } else if (canDeployDirectlyTo(zone, options)) { version = options.vespaVersion.map(Version::new).orElse(controller.systemVersion()); } else if (! application.change().isPresent() && ! zone.environment().isManuallyDeployed()) { return unexpectedDeployment(applicationId, zone, applicationPackageFromDeployer); } else { version = application.deployVersionIn(zone, controller); } ApplicationVersion applicationVersion; ApplicationPackage applicationPackage; Optional<DeploymentJobs.JobType> job = DeploymentJobs.JobType.from(controller.system(), zone); if (canDownloadReportedApplicationVersion(application) && !canDeployDirectlyTo(zone, options)) { if (!job.isPresent()) { throw new IllegalArgumentException("Cannot determine job for zone " + zone); } applicationVersion = application.deployApplicationVersion(job.get(), controller, options.deployCurrentVersion) .orElseThrow(() -> new IllegalArgumentException("Cannot determine application version for " + applicationId)); if (canDownloadArtifact(applicationVersion)) { applicationPackage = new ApplicationPackage( artifactRepository.getApplicationPackage(applicationId, applicationVersion.id()) ); } else { applicationPackage = applicationPackageFromDeployer.orElseThrow( () -> new IllegalArgumentException("Application package with version " + applicationVersion.id() + " cannot be downloaded, and " + "no package was given by deployer")); } } else { applicationPackage = applicationPackageFromDeployer.orElseThrow( () -> new IllegalArgumentException("Application package must be given as new application " + "version is not known for " + applicationId) ); applicationVersion = toApplicationPackageRevision(applicationPackage, options.screwdriverBuildJob); } validate(applicationPackage.deploymentSpec()); if (!options.deployCurrentVersion && !canDownloadReportedApplicationVersion(application)) { if (application.change().application().isPresent()) { application = application.withChange(application.change().with(applicationVersion)); } if (!canDeployDirectlyTo(zone, options) && job.isPresent()) { JobStatus.JobRun triggering = getOrCreateTriggering(application, version, job.get()); application = application.withJobTriggering(job.get(), application.change(), triggering.at(), version, applicationVersion, triggering.reason()); } } if (!options.deployCurrentVersion) { application = application.with(applicationPackage.deploymentSpec()); application = application.with(applicationPackage.validationOverrides()); application = deleteRemovedDeployments(application); application = deleteUnreferencedDeploymentJobs(application); store(application); } if (!canDeployDirectlyTo(zone, options)) { if (!application.deploymentJobs().isDeployableTo(zone.environment(), application.change())) { throw new IllegalArgumentException("Rejecting deployment of " + application + " to " + zone + " as " + application.change() + " is not tested"); } Deployment existingDeployment = application.deployments().get(zone); if (zone.environment().isProduction() && existingDeployment != null && existingDeployment.version().isAfter(version)) { throw new IllegalArgumentException("Rejecting deployment of " + application + " to " + zone + " as the requested version " + version + " is older than" + " the current version " + existingDeployment.version()); } } application = withRotation(application, zone); Set<String> rotationNames = new HashSet<>(); Set<String> cnames = new HashSet<>(); application.rotation().ifPresent(applicationRotation -> { rotationNames.add(applicationRotation.id().asString()); cnames.add(applicationRotation.dnsName()); cnames.add(applicationRotation.secureDnsName()); }); options = withVersion(version, options); ConfigServerClient.PreparedApplication preparedApplication = configserverClient.prepare(new DeploymentId(applicationId, zone), options, cnames, rotationNames, applicationPackage.zippedContent()); preparedApplication.activate(); application = application.withNewDeployment(zone, applicationVersion, version, clock.instant()); store(application); return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse(), applicationPackage.zippedContent().length); } } /** Makes sure the application has a global rotation, if eligible. */ private LockedApplication withRotation(LockedApplication application, ZoneId zone) { if (zone.environment() == Environment.prod && application.deploymentSpec().globalServiceId().isPresent()) { try (RotationLock rotationLock = rotationRepository.lock()) { Rotation rotation = rotationRepository.getRotation(application, rotationLock); application = application.with(rotation.id()); store(application); registerRotationInDns(rotation, application.rotation().get().dnsName()); registerRotationInDns(rotation, application.rotation().get().secureDnsName()); } } return application; } private ActivateResult unexpectedDeployment(ApplicationId applicationId, ZoneId zone, Optional<ApplicationPackage> applicationPackage) { Log logEntry = new Log(); logEntry.level = "WARNING"; logEntry.time = clock.instant().toEpochMilli(); logEntry.message = "Ignoring deployment of " + require(applicationId) + " to " + zone + " as a deployment is not currently expected"; PrepareResponse prepareResponse = new PrepareResponse(); prepareResponse.log = Collections.singletonList(logEntry); prepareResponse.configChangeActions = new ConfigChangeActions(Collections.emptyList(), Collections.emptyList()); return new ActivateResult(new RevisionId(applicationPackage.map(ApplicationPackage::hash) .orElse("0")), prepareResponse, applicationPackage.map(a -> a.zippedContent().length).orElse(0)); } private LockedApplication deleteRemovedDeployments(LockedApplication application) { List<Deployment> deploymentsToRemove = application.productionDeployments().values().stream() .filter(deployment -> ! application.deploymentSpec().includes(deployment.zone().environment(), Optional.of(deployment.zone().region()))) .collect(Collectors.toList()); if (deploymentsToRemove.isEmpty()) return application; if ( ! application.validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant())) throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application + " is deployed in " + deploymentsToRemove.stream() .map(deployment -> deployment.zone().region().value()) .collect(Collectors.joining(", ")) + ", but does not include " + (deploymentsToRemove.size() > 1 ? "these zones" : "this zone") + " in deployment.xml"); LockedApplication applicationWithRemoval = application; for (Deployment deployment : deploymentsToRemove) applicationWithRemoval = deactivate(applicationWithRemoval, deployment.zone()); return applicationWithRemoval; } private LockedApplication deleteUnreferencedDeploymentJobs(LockedApplication application) { for (DeploymentJobs.JobType job : application.deploymentJobs().jobStatus().keySet()) { Optional<ZoneId> zone = job.zone(controller.system()); if ( ! job.isProduction() || (zone.isPresent() && application.deploymentSpec().includes(zone.get().environment(), zone.map(ZoneId::region)))) continue; application = application.withoutDeploymentJob(job); } return application; } /** * Returns the existing triggering of the given type from this application, * or an incomplete one created in this method if none is present * This is needed (only) in the case where some external entity triggers a job. */ private JobStatus.JobRun getOrCreateTriggering(Application application, Version version, DeploymentJobs.JobType jobType) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return incompleteTriggeringEvent(version); if ( ! status.lastTriggered().isPresent()) return incompleteTriggeringEvent(version); return status.lastTriggered().get(); } private JobStatus.JobRun incompleteTriggeringEvent(Version version) { return new JobStatus.JobRun(-1, version, ApplicationVersion.unknown, false, "", clock.instant()); } private DeployOptions withVersion(Version version, DeployOptions options) { return new DeployOptions(options.screwdriverBuildJob, Optional.of(version), options.ignoreValidationErrors, options.deployCurrentVersion); } private ApplicationVersion toApplicationPackageRevision(ApplicationPackage applicationPackage, Optional<ScrewdriverBuildJob> buildJob) { if ( ! buildJob.isPresent()) return ApplicationVersion.from(applicationPackage.hash()); GitRevision gitRevision = buildJob.get().gitRevision; if (gitRevision.repository == null || gitRevision.branch == null || gitRevision.commit == null) return ApplicationVersion.from(applicationPackage.hash()); return ApplicationVersion.from(applicationPackage.hash(), new SourceRevision(gitRevision.repository.id(), gitRevision.branch.id(), gitRevision.commit.id())); } /** Register a DNS name for rotation */ private void registerRotationInDns(Rotation rotation, String dnsName) { try { Optional<Record> record = nameService.findRecord(Record.Type.CNAME, RecordName.from(dnsName)); RecordData rotationName = RecordData.fqdn(rotation.name()); if (record.isPresent()) { if ( ! record.get().data().equals(rotationName)) { nameService.updateRecord(record.get().id(), rotationName); log.info("Updated mapping for record ID " + record.get().id().asString() + ": '" + dnsName + "' -> '" + rotation.name() + "'"); } } else { RecordId id = nameService.createCname(RecordName.from(dnsName), rotationName); log.info("Registered mapping with record ID " + id.asString() + ": '" + dnsName + "' -> '" + rotation.name() + "'"); } } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to register CNAME", e); } } /** Returns the endpoints of the deployment, or empty if obtaining them failed */ public Optional<InstanceEndpoints> getDeploymentEndpoints(DeploymentId deploymentId) { try { List<RoutingEndpoint> endpoints = routingGenerator.endpoints(deploymentId); List<URI> endPointUrls = new ArrayList<>(); for (RoutingEndpoint endpoint : endpoints) { try { endPointUrls.add(new URI(endpoint.getEndpoint())); } catch (URISyntaxException e) { throw new RuntimeException("Routing generator returned illegal url's", e); } } return Optional.of(new InstanceEndpoints(endPointUrls)); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to get endpoint information for " + deploymentId + ": " + Exceptions.toMessageString(e)); return Optional.empty(); } } /** * Deletes the the given application. All known instances of the applications will be deleted, * including PR instances. * * @throws IllegalArgumentException if the application has deployments or the caller is not authorized * @throws NotExistsException if no instances of the application exist */ public void deleteApplication(ApplicationId applicationId, Optional<NToken> token) { List<ApplicationId> instances = controller.applications().asList(applicationId.tenant()) .stream() .map(Application::id) .filter(id -> id.application().equals(applicationId.application()) && id.tenant().equals(applicationId.tenant())) .collect(Collectors.toList()); if (instances.isEmpty()) { throw new NotExistsException("Could not delete application '" + applicationId + "': Application not found"); } instances.forEach(id -> lockOrThrow(id, application -> { if ( ! application.deployments().isEmpty()) throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments"); Tenant tenant = controller.tenants().tenant(new TenantId(id.tenant().value())).get(); if (tenant.isAthensTenant() && ! token.isPresent()) throw new IllegalArgumentException("Could not delete '" + application + "': No NToken provided"); if (id.instance().isDefault() && tenant.isAthensTenant()) { zmsClientFactory.createZmsClientWithAuthorizedServiceToken(token.get()) .deleteApplication(tenant.getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); } db.deleteApplication(id); log.info("Deleted " + application); })); } /** * Replace any previous version of this application by this instance * * @param application a locked application to store */ public void store(LockedApplication application) { db.store(application); } /** * Acquire a locked application to modify and store, if there is an application with the given id. * * @param applicationId ID of the application to lock and get. * @param action Function which acts on the locked application. */ public void lockIfPresent(ApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { get(applicationId).map(application -> new LockedApplication(application, lock)).ifPresent(action); } } /** * Acquire a locked application to modify and store, or throw an exception if no application has the given id. * * @param applicationId ID of the application to lock and require. * @param action Function which acts on the locked application. * @throws IllegalArgumentException when application does not exist. */ public void lockOrThrow(ApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { action.accept(new LockedApplication(require(applicationId), lock)); } } public void notifyJobCompletion(JobReport report) { if ( ! get(report.applicationId()).isPresent()) { log.log(Level.WARNING, "Ignoring completion of job of project '" + report.projectId() + "': Unknown application '" + report.applicationId() + "'"); return; } deploymentTrigger.triggerFromCompletion(report); } /** * Tells config server to schedule a restart of all nodes in this deployment * * @param hostname If non-empty, restart will only be scheduled for this host */ public void restart(DeploymentId deploymentId, Optional<Hostname> hostname) { try { configserverClient.restart(deploymentId, hostname); } catch (NoInstanceException e) { throw new IllegalArgumentException("Could not restart " + deploymentId + ": No such deployment"); } } /** Deactivate application in the given zone */ public void deactivate(Application application, ZoneId zone) { deactivate(application, zone, Optional.empty(), false); } /** Deactivate a known deployment of the given application */ public void deactivate(Application application, Deployment deployment, boolean requireThatDeploymentHasExpired) { deactivate(application, deployment.zone(), Optional.of(deployment), requireThatDeploymentHasExpired); } private void deactivate(Application application, ZoneId zone, Optional<Deployment> deployment, boolean requireThatDeploymentHasExpired) { if (requireThatDeploymentHasExpired && deployment.isPresent() && ! DeploymentExpirer.hasExpired(controller.zoneRegistry(), deployment.get(), clock.instant())) return; lockOrThrow(application.id(), lockedApplication -> store(deactivate(lockedApplication, zone))); } /** * Deactivates a locked application without storing it * * @return the application with the deployment in the given zone removed */ private LockedApplication deactivate(LockedApplication application, ZoneId zone) { try { configserverClient.deactivate(new DeploymentId(application.id(), zone)); } catch (NoInstanceException ignored) { } return application.withoutDeploymentIn(zone); } public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; } private ApplicationId dashToUnderscore(ApplicationId id) { return ApplicationId.from(id.tenant().value(), id.application().value().replaceAll("-", "_"), id.instance().value()); } public ConfigServerClient configserverClient() { return configserverClient; } /** * Returns a lock which provides exclusive rights to changing this application. * Any operation which stores an application need to first acquire this lock, then read, modify * and store the application, and finally release (close) the lock. */ Lock lock(ApplicationId application) { return curator.lock(application, Duration.ofMinutes(10)); } /** Returns whether a direct deployment to given zone is allowed */ private static boolean canDeployDirectlyTo(ZoneId zone, DeployOptions options) { return ! options.screwdriverBuildJob.isPresent() || options.screwdriverBuildJob.get().screwdriverId == null || zone.environment().isManuallyDeployed(); } /** Returns whether artifact for given version number is available in artifact repository */ private static boolean canDownloadArtifact(ApplicationVersion applicationVersion) { return applicationVersion.buildNumber().isPresent() && applicationVersion.source().isPresent(); } /** Returns whether component has reported a version number that is availabe in artifact repository */ private static boolean canDownloadReportedApplicationVersion(Application application) { return Optional.ofNullable(application.deploymentJobs().jobStatus().get(DeploymentJobs.JobType.component)) .flatMap(JobStatus::lastSuccess) .map(JobStatus.JobRun::applicationVersion) .filter(ApplicationController::canDownloadArtifact) .isPresent(); } /** Verify that each of the production zones listed in the deployment spec exist in this system. */ private void validate(DeploymentSpec deploymentSpec) { deploymentSpec.zones().stream() .filter(zone -> zone.environment() == Environment.prod) .forEach(zone -> { if ( ! controller.zoneRegistry().hasZone(ZoneId.from(zone.environment(), zone.region().orElse(null)))) throw new IllegalArgumentException("Zone " + zone + " in deployment spec was not found in this system!"); }); } public RotationRepository rotationRepository() { return rotationRepository; } }
class ApplicationController { private static final Logger log = Logger.getLogger(ApplicationController.class.getName()); /** The controller owning this */ private final Controller controller; /** For permanent storage */ private final ControllerDb db; /** For working memory storage and sharing between controllers */ private final CuratorDb curator; private final ArtifactRepository artifactRepository; private final RotationRepository rotationRepository; private final AthenzClientFactory zmsClientFactory; private final NameService nameService; private final ConfigServerClient configserverClient; private final RoutingGenerator routingGenerator; private final Clock clock; private final DeploymentTrigger deploymentTrigger; ApplicationController(Controller controller, ControllerDb db, CuratorDb curator, AthenzClientFactory zmsClientFactory, RotationsConfig rotationsConfig, NameService nameService, ConfigServerClient configserverClient, ArtifactRepository artifactRepository, RoutingGenerator routingGenerator, Clock clock) { this.controller = controller; this.db = db; this.curator = curator; this.zmsClientFactory = zmsClientFactory; this.nameService = nameService; this.configserverClient = configserverClient; this.routingGenerator = routingGenerator; this.clock = clock; this.artifactRepository = artifactRepository; this.rotationRepository = new RotationRepository(rotationsConfig, this, curator); this.deploymentTrigger = new DeploymentTrigger(controller, curator, clock); for (Application application : db.listApplications()) { lockIfPresent(application.id(), this::store); } } /** Returns the application with the given id, or null if it is not present */ public Optional<Application> get(ApplicationId id) { return db.getApplication(id); } /** * Returns the application with the given id * * @throws IllegalArgumentException if it does not exist */ public Application require(ApplicationId id) { return get(id).orElseThrow(() -> new IllegalArgumentException(id + " not found")); } /** Returns a snapshot of all applications */ public List<Application> asList() { return db.listApplications(); } /** Returns all applications of a tenant */ public List<Application> asList(TenantName tenant) { return db.listApplications(new TenantId(tenant.value())); } /** * Set the rotations marked as 'global' either 'in' or 'out of' service. * * @return The canonical endpoint altered if any * @throws IOException if rotation status cannot be updated */ public List<String> setGlobalRotationStatus(DeploymentId deploymentId, EndpointStatus status) throws IOException { List<String> rotations = new ArrayList<>(); Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId); if (endpoint.isPresent()) { configserverClient.setGlobalRotationStatus(deploymentId, endpoint.get(), status); rotations.add(endpoint.get()); } return rotations; } /** * Get the endpoint status for the global endpoint of this application * * @return Map between the endpoint and the rotation status * @throws IOException if global rotation status cannot be determined */ public Map<String, EndpointStatus> getGlobalRotationStatus(DeploymentId deploymentId) throws IOException { Map<String, EndpointStatus> result = new HashMap<>(); Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId); if (endpoint.isPresent()) { EndpointStatus status = configserverClient.getGlobalRotationStatus(deploymentId, endpoint.get()); result.put(endpoint.get(), status); } return result; } /** * Global rotations (plural as we can have aliases) map to exactly one service endpoint. * This method finds that one service endpoint and strips the URI part that * the routingGenerator is wrapping around the endpoint. * * @param deploymentId The deployment to retrieve global service endpoint for * @return Empty if no global endpoint exist, otherwise the service endpoint ([clustername.]app.tenant.region.env) */ Optional<String> getCanonicalGlobalEndpoint(DeploymentId deploymentId) throws IOException { Map<String, RoutingEndpoint> hostToGlobalEndpoint = new HashMap<>(); Map<String, String> hostToCanonicalEndpoint = new HashMap<>(); for (RoutingEndpoint endpoint : routingGenerator.endpoints(deploymentId)) { try { URI uri = new URI(endpoint.getEndpoint()); String serviceEndpoint = uri.getHost(); if (serviceEndpoint == null) { throw new IOException("Unexpected endpoints returned from the Routing Generator"); } String canonicalEndpoint = serviceEndpoint.replaceAll(".vespa.yahooapis.com", ""); String hostname = endpoint.getHostname(); if (hostname != null) { if (endpoint.isGlobal()) { hostToGlobalEndpoint.put(hostname, endpoint); } else { hostToCanonicalEndpoint.put(hostname, canonicalEndpoint); } if (hostToGlobalEndpoint.containsKey(hostname) && hostToCanonicalEndpoint.containsKey(hostname)) { return Optional.of(hostToCanonicalEndpoint.get(hostname)); } } } catch (URISyntaxException use) { throw new IOException(use); } } return Optional.empty(); } /** * Creates a new application for an existing tenant. * * @throws IllegalArgumentException if the application already exists */ /** Deploys an application. If the application does not exist it is created. */ public ActivateResult deployApplication(ApplicationId applicationId, ZoneId zone, Optional<ApplicationPackage> applicationPackageFromDeployer, DeployOptions options) { try (Lock lock = lock(applicationId)) { LockedApplication application = get(applicationId) .map(app -> new LockedApplication(app, lock)) .orElseGet(() -> new LockedApplication(createApplication(applicationId, Optional.empty()), lock)); Version version; if (options.deployCurrentVersion) { version = application.versionIn(zone, controller); } else if (canDeployDirectlyTo(zone, options)) { version = options.vespaVersion.map(Version::new).orElse(controller.systemVersion()); } else if (! application.change().isPresent() && ! zone.environment().isManuallyDeployed()) { return unexpectedDeployment(applicationId, zone, applicationPackageFromDeployer); } else { version = application.deployVersionIn(zone, controller); } ApplicationVersion applicationVersion; ApplicationPackage applicationPackage; Optional<DeploymentJobs.JobType> job = DeploymentJobs.JobType.from(controller.system(), zone); if (canDownloadReportedApplicationVersion(application) && !canDeployDirectlyTo(zone, options)) { if (!job.isPresent()) { throw new IllegalArgumentException("Cannot determine job for zone " + zone); } applicationVersion = application.deployApplicationVersion(job.get(), controller, options.deployCurrentVersion) .orElseThrow(() -> new IllegalArgumentException("Cannot determine application version for " + applicationId)); if (canDownloadArtifact(applicationVersion)) { applicationPackage = new ApplicationPackage( artifactRepository.getApplicationPackage(applicationId, applicationVersion.id()) ); } else { applicationPackage = applicationPackageFromDeployer.orElseThrow( () -> new IllegalArgumentException("Application package with version " + applicationVersion.id() + " cannot be downloaded, and " + "no package was given by deployer")); } } else { applicationPackage = applicationPackageFromDeployer.orElseThrow( () -> new IllegalArgumentException("Application package must be given as new application " + "version is not known for " + applicationId) ); applicationVersion = toApplicationPackageRevision(applicationPackage, options.screwdriverBuildJob); } validate(applicationPackage.deploymentSpec()); if (!options.deployCurrentVersion && !canDownloadReportedApplicationVersion(application)) { if (application.change().application().isPresent()) { application = application.withChange(application.change().with(applicationVersion)); } if (!canDeployDirectlyTo(zone, options) && job.isPresent()) { JobStatus.JobRun triggering = getOrCreateTriggering(application, version, job.get()); application = application.withJobTriggering(job.get(), application.change(), triggering.at(), version, applicationVersion, triggering.reason()); } } if (!options.deployCurrentVersion) { application = application.with(applicationPackage.deploymentSpec()); application = application.with(applicationPackage.validationOverrides()); application = deleteRemovedDeployments(application); application = deleteUnreferencedDeploymentJobs(application); store(application); } if (!canDeployDirectlyTo(zone, options)) { if (!application.deploymentJobs().isDeployableTo(zone.environment(), application.change())) { throw new IllegalArgumentException("Rejecting deployment of " + application + " to " + zone + " as " + application.change() + " is not tested"); } Deployment existingDeployment = application.deployments().get(zone); if (zone.environment().isProduction() && existingDeployment != null && existingDeployment.version().isAfter(version)) { throw new IllegalArgumentException("Rejecting deployment of " + application + " to " + zone + " as the requested version " + version + " is older than" + " the current version " + existingDeployment.version()); } } application = withRotation(application, zone); Set<String> rotationNames = new HashSet<>(); Set<String> cnames = new HashSet<>(); application.rotation().ifPresent(applicationRotation -> { rotationNames.add(applicationRotation.id().asString()); cnames.add(applicationRotation.dnsName()); cnames.add(applicationRotation.secureDnsName()); }); options = withVersion(version, options); ConfigServerClient.PreparedApplication preparedApplication = configserverClient.prepare(new DeploymentId(applicationId, zone), options, cnames, rotationNames, applicationPackage.zippedContent()); preparedApplication.activate(); application = application.withNewDeployment(zone, applicationVersion, version, clock.instant()); store(application); return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse(), applicationPackage.zippedContent().length); } } /** Makes sure the application has a global rotation, if eligible. */ private LockedApplication withRotation(LockedApplication application, ZoneId zone) { if (zone.environment() == Environment.prod && application.deploymentSpec().globalServiceId().isPresent()) { try (RotationLock rotationLock = rotationRepository.lock()) { Rotation rotation = rotationRepository.getRotation(application, rotationLock); application = application.with(rotation.id()); store(application); registerRotationInDns(rotation, application.rotation().get().dnsName()); registerRotationInDns(rotation, application.rotation().get().secureDnsName()); } } return application; } private ActivateResult unexpectedDeployment(ApplicationId applicationId, ZoneId zone, Optional<ApplicationPackage> applicationPackage) { Log logEntry = new Log(); logEntry.level = "WARNING"; logEntry.time = clock.instant().toEpochMilli(); logEntry.message = "Ignoring deployment of " + require(applicationId) + " to " + zone + " as a deployment is not currently expected"; PrepareResponse prepareResponse = new PrepareResponse(); prepareResponse.log = Collections.singletonList(logEntry); prepareResponse.configChangeActions = new ConfigChangeActions(Collections.emptyList(), Collections.emptyList()); return new ActivateResult(new RevisionId(applicationPackage.map(ApplicationPackage::hash) .orElse("0")), prepareResponse, applicationPackage.map(a -> a.zippedContent().length).orElse(0)); } private LockedApplication deleteRemovedDeployments(LockedApplication application) { List<Deployment> deploymentsToRemove = application.productionDeployments().values().stream() .filter(deployment -> ! application.deploymentSpec().includes(deployment.zone().environment(), Optional.of(deployment.zone().region()))) .collect(Collectors.toList()); if (deploymentsToRemove.isEmpty()) return application; if ( ! application.validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant())) throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application + " is deployed in " + deploymentsToRemove.stream() .map(deployment -> deployment.zone().region().value()) .collect(Collectors.joining(", ")) + ", but does not include " + (deploymentsToRemove.size() > 1 ? "these zones" : "this zone") + " in deployment.xml"); LockedApplication applicationWithRemoval = application; for (Deployment deployment : deploymentsToRemove) applicationWithRemoval = deactivate(applicationWithRemoval, deployment.zone()); return applicationWithRemoval; } private LockedApplication deleteUnreferencedDeploymentJobs(LockedApplication application) { for (DeploymentJobs.JobType job : application.deploymentJobs().jobStatus().keySet()) { Optional<ZoneId> zone = job.zone(controller.system()); if ( ! job.isProduction() || (zone.isPresent() && application.deploymentSpec().includes(zone.get().environment(), zone.map(ZoneId::region)))) continue; application = application.withoutDeploymentJob(job); } return application; } /** * Returns the existing triggering of the given type from this application, * or an incomplete one created in this method if none is present * This is needed (only) in the case where some external entity triggers a job. */ private JobStatus.JobRun getOrCreateTriggering(Application application, Version version, DeploymentJobs.JobType jobType) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return incompleteTriggeringEvent(version); if ( ! status.lastTriggered().isPresent()) return incompleteTriggeringEvent(version); return status.lastTriggered().get(); } private JobStatus.JobRun incompleteTriggeringEvent(Version version) { return new JobStatus.JobRun(-1, version, ApplicationVersion.unknown, false, "", clock.instant()); } private DeployOptions withVersion(Version version, DeployOptions options) { return new DeployOptions(options.screwdriverBuildJob, Optional.of(version), options.ignoreValidationErrors, options.deployCurrentVersion); } private ApplicationVersion toApplicationPackageRevision(ApplicationPackage applicationPackage, Optional<ScrewdriverBuildJob> buildJob) { if ( ! buildJob.isPresent()) return ApplicationVersion.from(applicationPackage.hash()); GitRevision gitRevision = buildJob.get().gitRevision; if (gitRevision.repository == null || gitRevision.branch == null || gitRevision.commit == null) return ApplicationVersion.from(applicationPackage.hash()); return ApplicationVersion.from(applicationPackage.hash(), new SourceRevision(gitRevision.repository.id(), gitRevision.branch.id(), gitRevision.commit.id())); } /** Register a DNS name for rotation */ private void registerRotationInDns(Rotation rotation, String dnsName) { try { Optional<Record> record = nameService.findRecord(Record.Type.CNAME, RecordName.from(dnsName)); RecordData rotationName = RecordData.fqdn(rotation.name()); if (record.isPresent()) { if ( ! record.get().data().equals(rotationName)) { nameService.updateRecord(record.get().id(), rotationName); log.info("Updated mapping for record ID " + record.get().id().asString() + ": '" + dnsName + "' -> '" + rotation.name() + "'"); } } else { RecordId id = nameService.createCname(RecordName.from(dnsName), rotationName); log.info("Registered mapping with record ID " + id.asString() + ": '" + dnsName + "' -> '" + rotation.name() + "'"); } } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to register CNAME", e); } } /** Returns the endpoints of the deployment, or empty if obtaining them failed */ public Optional<InstanceEndpoints> getDeploymentEndpoints(DeploymentId deploymentId) { try { List<RoutingEndpoint> endpoints = routingGenerator.endpoints(deploymentId); List<URI> endPointUrls = new ArrayList<>(); for (RoutingEndpoint endpoint : endpoints) { try { endPointUrls.add(new URI(endpoint.getEndpoint())); } catch (URISyntaxException e) { throw new RuntimeException("Routing generator returned illegal url's", e); } } return Optional.of(new InstanceEndpoints(endPointUrls)); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to get endpoint information for " + deploymentId + ": " + Exceptions.toMessageString(e)); return Optional.empty(); } } /** * Deletes the the given application. All known instances of the applications will be deleted, * including PR instances. * * @throws IllegalArgumentException if the application has deployments or the caller is not authorized * @throws NotExistsException if no instances of the application exist */ public void deleteApplication(ApplicationId applicationId, Optional<NToken> token) { List<ApplicationId> instances = controller.applications().asList(applicationId.tenant()) .stream() .map(Application::id) .filter(id -> id.application().equals(applicationId.application()) && id.tenant().equals(applicationId.tenant())) .collect(Collectors.toList()); if (instances.isEmpty()) { throw new NotExistsException("Could not delete application '" + applicationId + "': Application not found"); } instances.forEach(id -> lockOrThrow(id, application -> { if ( ! application.deployments().isEmpty()) throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments"); Tenant tenant = controller.tenants().tenant(new TenantId(id.tenant().value())).get(); if (tenant.isAthensTenant() && ! token.isPresent()) throw new IllegalArgumentException("Could not delete '" + application + "': No NToken provided"); if (id.instance().isDefault() && tenant.isAthensTenant()) { zmsClientFactory.createZmsClientWithAuthorizedServiceToken(token.get()) .deleteApplication(tenant.getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); } db.deleteApplication(id); log.info("Deleted " + application); })); } /** * Replace any previous version of this application by this instance * * @param application a locked application to store */ public void store(LockedApplication application) { db.store(application); } /** * Acquire a locked application to modify and store, if there is an application with the given id. * * @param applicationId ID of the application to lock and get. * @param action Function which acts on the locked application. */ public void lockIfPresent(ApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { get(applicationId).map(application -> new LockedApplication(application, lock)).ifPresent(action); } } /** * Acquire a locked application to modify and store, or throw an exception if no application has the given id. * * @param applicationId ID of the application to lock and require. * @param action Function which acts on the locked application. * @throws IllegalArgumentException when application does not exist. */ public void lockOrThrow(ApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { action.accept(new LockedApplication(require(applicationId), lock)); } } public void notifyJobCompletion(JobReport report) { if ( ! get(report.applicationId()).isPresent()) { log.log(Level.WARNING, "Ignoring completion of job of project '" + report.projectId() + "': Unknown application '" + report.applicationId() + "'"); return; } deploymentTrigger.triggerFromCompletion(report); } /** * Tells config server to schedule a restart of all nodes in this deployment * * @param hostname If non-empty, restart will only be scheduled for this host */ public void restart(DeploymentId deploymentId, Optional<Hostname> hostname) { try { configserverClient.restart(deploymentId, hostname); } catch (NoInstanceException e) { throw new IllegalArgumentException("Could not restart " + deploymentId + ": No such deployment"); } } /** Deactivate application in the given zone */ public void deactivate(Application application, ZoneId zone) { deactivate(application, zone, Optional.empty(), false); } /** Deactivate a known deployment of the given application */ public void deactivate(Application application, Deployment deployment, boolean requireThatDeploymentHasExpired) { deactivate(application, deployment.zone(), Optional.of(deployment), requireThatDeploymentHasExpired); } private void deactivate(Application application, ZoneId zone, Optional<Deployment> deployment, boolean requireThatDeploymentHasExpired) { if (requireThatDeploymentHasExpired && deployment.isPresent() && ! DeploymentExpirer.hasExpired(controller.zoneRegistry(), deployment.get(), clock.instant())) return; lockOrThrow(application.id(), lockedApplication -> store(deactivate(lockedApplication, zone))); } /** * Deactivates a locked application without storing it * * @return the application with the deployment in the given zone removed */ private LockedApplication deactivate(LockedApplication application, ZoneId zone) { try { configserverClient.deactivate(new DeploymentId(application.id(), zone)); } catch (NoInstanceException ignored) { } return application.withoutDeploymentIn(zone); } public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; } private ApplicationId dashToUnderscore(ApplicationId id) { return ApplicationId.from(id.tenant().value(), id.application().value().replaceAll("-", "_"), id.instance().value()); } public ConfigServerClient configserverClient() { return configserverClient; } /** * Returns a lock which provides exclusive rights to changing this application. * Any operation which stores an application need to first acquire this lock, then read, modify * and store the application, and finally release (close) the lock. */ Lock lock(ApplicationId application) { return curator.lock(application, Duration.ofMinutes(10)); } /** Returns whether a direct deployment to given zone is allowed */ private static boolean canDeployDirectlyTo(ZoneId zone, DeployOptions options) { return ! options.screwdriverBuildJob.isPresent() || options.screwdriverBuildJob.get().screwdriverId == null || zone.environment().isManuallyDeployed(); } /** Returns whether artifact for given version number is available in artifact repository */ private static boolean canDownloadArtifact(ApplicationVersion applicationVersion) { return applicationVersion.buildNumber().isPresent() && applicationVersion.source().isPresent(); } /** Returns whether component has reported a version number that is availabe in artifact repository */ private static boolean canDownloadReportedApplicationVersion(Application application) { return Optional.ofNullable(application.deploymentJobs().jobStatus().get(DeploymentJobs.JobType.component)) .flatMap(JobStatus::lastSuccess) .map(JobStatus.JobRun::applicationVersion) .filter(ApplicationController::canDownloadArtifact) .isPresent(); } /** Verify that each of the production zones listed in the deployment spec exist in this system. */ private void validate(DeploymentSpec deploymentSpec) { deploymentSpec.zones().stream() .filter(zone -> zone.environment() == Environment.prod) .forEach(zone -> { if ( ! controller.zoneRegistry().hasZone(ZoneId.from(zone.environment(), zone.region().orElse(null)))) throw new IllegalArgumentException("Zone " + zone + " in deployment spec was not found in this system!"); }); } public RotationRepository rotationRepository() { return rotationRepository; } }
The lock is taken and released in one method, if the lock is not acquired an exception is thrown, but finally is run anyway (trying to release the lock). If the lock was not acquired, you will get an exception stating that you do not own the lock (the locking implementation in InterProcessMutex keeps track of which thread owns the lock). Maybe it's better to only call release() when you have acquired the lock. I'll look into it.
public void release() { if (curatorLock.hasLock()) { curatorLock.unlock(); } }
curatorLock.unlock();
public void release() { if (curatorLock.hasLock()) { curatorLock.unlock(); } }
class ActivateLock { private static final String ACTIVATE_LOCK_NAME = "activateLock"; private final CuratorLock curatorLock; public ActivateLock(Curator curator, Path rootPath) { this.curatorLock = new CuratorLock(curator, rootPath.append(ACTIVATE_LOCK_NAME).getAbsolute()); } public boolean acquire(TimeoutBudget timeoutBudget, boolean ignoreLockError) { try { return curatorLock.tryLock(timeoutBudget.timeLeft().toMillis(), TimeUnit.MILLISECONDS); } catch (Exception e) { if (!ignoreLockError) { throw new RuntimeException(e); } return false; } } @Override public String toString() { return "ActivateLock (" + curatorLock + "), has lock: " + curatorLock.hasLock(); } }
class ActivateLock { private static final String ACTIVATE_LOCK_NAME = "activateLock"; private final CuratorLock curatorLock; public ActivateLock(Curator curator, Path rootPath) { this.curatorLock = new CuratorLock(curator, rootPath.append(ACTIVATE_LOCK_NAME).getAbsolute()); } public boolean acquire(TimeoutBudget timeoutBudget, boolean ignoreLockError) { try { return curatorLock.tryLock(timeoutBudget.timeLeft().toMillis(), TimeUnit.MILLISECONDS); } catch (Exception e) { if (!ignoreLockError) { throw new RuntimeException(e); } return false; } } @Override public String toString() { return "ActivateLock (" + curatorLock + "), has lock: " + curatorLock.hasLock(); } }
Validating information about the instance should not be done here afaik. Creating the application in Athenz should only be done once, not for each new instance.
public Application createApplication(ApplicationId id, Optional<NToken> token) { if ( ! (id.instance().value().equals("default") || id.instance().value().matches("\\d+"))) throw new UnsupportedOperationException("Only the instance names 'default' and names which are just the PR number are supported at the moment"); try (Lock lock = lock(id)) { com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value()); Optional<Tenant> tenant = controller.tenants().tenant(new TenantId(id.tenant().value())); if ( ! tenant.isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist"); if (get(id).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application already exists"); if (get(dashToUnderscore(id)).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists"); if (tenant.get().isAthensTenant()) { if ( ! token.isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': No NToken provided"); ZmsClient zmsClient = zmsClientFactory.createZmsClientWithAuthorizedServiceToken(token.get()); try { zmsClient.deleteApplication(tenant.get().getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); } catch (ZmsException ignored) { } zmsClient.addApplication(tenant.get().getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); } LockedApplication application = new LockedApplication(new Application(id), lock); store(application); log.info("Created " + application); return application; } }
if ( ! (id.instance().value().equals("default") || id.instance().value().matches("\\d+")))
public Application createApplication(ApplicationId id, Optional<NToken> token) { if ( ! (id.instance().isDefault() || id.instance().value().matches("\\d+"))) throw new UnsupportedOperationException("Only the instance names 'default' and names which are just the PR number are supported at the moment"); try (Lock lock = lock(id)) { if (asList(id.tenant()).stream().noneMatch(application -> application.id().application().equals(id.application()))) com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value()); Optional<Tenant> tenant = controller.tenants().tenant(new TenantId(id.tenant().value())); if ( ! tenant.isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist"); if (get(id).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application already exists"); if (get(dashToUnderscore(id)).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists"); if (id.instance().isDefault() && tenant.get().isAthensTenant()) { if ( ! token.isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': No NToken provided"); ZmsClient zmsClient = zmsClientFactory.createZmsClientWithAuthorizedServiceToken(token.get()); zmsClient.addApplication(tenant.get().getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); } LockedApplication application = new LockedApplication(new Application(id), lock); store(application); log.info("Created " + application); return application; } }
class ApplicationController { private static final Logger log = Logger.getLogger(ApplicationController.class.getName()); /** The controller owning this */ private final Controller controller; /** For permanent storage */ private final ControllerDb db; /** For working memory storage and sharing between controllers */ private final CuratorDb curator; private final ArtifactRepository artifactRepository; private final RotationRepository rotationRepository; private final AthenzClientFactory zmsClientFactory; private final NameService nameService; private final ConfigServerClient configserverClient; private final RoutingGenerator routingGenerator; private final Clock clock; private final DeploymentTrigger deploymentTrigger; ApplicationController(Controller controller, ControllerDb db, CuratorDb curator, AthenzClientFactory zmsClientFactory, RotationsConfig rotationsConfig, NameService nameService, ConfigServerClient configserverClient, ArtifactRepository artifactRepository, RoutingGenerator routingGenerator, Clock clock) { this.controller = controller; this.db = db; this.curator = curator; this.zmsClientFactory = zmsClientFactory; this.nameService = nameService; this.configserverClient = configserverClient; this.routingGenerator = routingGenerator; this.clock = clock; this.artifactRepository = artifactRepository; this.rotationRepository = new RotationRepository(rotationsConfig, this, curator); this.deploymentTrigger = new DeploymentTrigger(controller, curator, clock); for (Application application : db.listApplications()) { lockIfPresent(application.id(), this::store); } } /** Returns the application with the given id, or null if it is not present */ public Optional<Application> get(ApplicationId id) { return db.getApplication(id); } /** * Returns the application with the given id * * @throws IllegalArgumentException if it does not exist */ public Application require(ApplicationId id) { return get(id).orElseThrow(() -> new IllegalArgumentException(id + " not found")); } /** Returns a snapshot of all applications */ public List<Application> asList() { return db.listApplications(); } /** Returns all applications of a tenant */ public List<Application> asList(TenantName tenant) { return db.listApplications(new TenantId(tenant.value())); } /** * Set the rotations marked as 'global' either 'in' or 'out of' service. * * @return The canonical endpoint altered if any * @throws IOException if rotation status cannot be updated */ public List<String> setGlobalRotationStatus(DeploymentId deploymentId, EndpointStatus status) throws IOException { List<String> rotations = new ArrayList<>(); Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId); if (endpoint.isPresent()) { configserverClient.setGlobalRotationStatus(deploymentId, endpoint.get(), status); rotations.add(endpoint.get()); } return rotations; } /** * Get the endpoint status for the global endpoint of this application * * @return Map between the endpoint and the rotation status * @throws IOException if global rotation status cannot be determined */ public Map<String, EndpointStatus> getGlobalRotationStatus(DeploymentId deploymentId) throws IOException { Map<String, EndpointStatus> result = new HashMap<>(); Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId); if (endpoint.isPresent()) { EndpointStatus status = configserverClient.getGlobalRotationStatus(deploymentId, endpoint.get()); result.put(endpoint.get(), status); } return result; } /** * Global rotations (plural as we can have aliases) map to exactly one service endpoint. * This method finds that one service endpoint and strips the URI part that * the routingGenerator is wrapping around the endpoint. * * @param deploymentId The deployment to retrieve global service endpoint for * @return Empty if no global endpoint exist, otherwise the service endpoint ([clustername.]app.tenant.region.env) */ Optional<String> getCanonicalGlobalEndpoint(DeploymentId deploymentId) throws IOException { Map<String, RoutingEndpoint> hostToGlobalEndpoint = new HashMap<>(); Map<String, String> hostToCanonicalEndpoint = new HashMap<>(); for (RoutingEndpoint endpoint : routingGenerator.endpoints(deploymentId)) { try { URI uri = new URI(endpoint.getEndpoint()); String serviceEndpoint = uri.getHost(); if (serviceEndpoint == null) { throw new IOException("Unexpected endpoints returned from the Routing Generator"); } String canonicalEndpoint = serviceEndpoint.replaceAll(".vespa.yahooapis.com", ""); String hostname = endpoint.getHostname(); if (hostname != null) { if (endpoint.isGlobal()) { hostToGlobalEndpoint.put(hostname, endpoint); } else { hostToCanonicalEndpoint.put(hostname, canonicalEndpoint); } if (hostToGlobalEndpoint.containsKey(hostname) && hostToCanonicalEndpoint.containsKey(hostname)) { return Optional.of(hostToCanonicalEndpoint.get(hostname)); } } } catch (URISyntaxException use) { throw new IOException(use); } } return Optional.empty(); } /** * Creates a new application for an existing tenant. * * @throws IllegalArgumentException if the application already exists */ /** Deploys an application. If the application does not exist it is created. */ public ActivateResult deployApplication(ApplicationId applicationId, ZoneId zone, Optional<ApplicationPackage> applicationPackageFromDeployer, DeployOptions options, Optional<NToken> token) { try (Lock lock = lock(applicationId)) { LockedApplication application = get(applicationId) .map(app -> new LockedApplication(app, lock)) .orElseGet(() -> new LockedApplication(createApplication(applicationId, token), lock)); Version version; if (options.deployCurrentVersion) { version = application.versionIn(zone, controller); } else if (canDeployDirectlyTo(zone, options)) { version = options.vespaVersion.map(Version::new).orElse(controller.systemVersion()); } else if (! application.change().isPresent() && ! zone.environment().isManuallyDeployed()) { return unexpectedDeployment(applicationId, zone, applicationPackageFromDeployer); } else { version = application.deployVersionIn(zone, controller); } ApplicationVersion applicationVersion; ApplicationPackage applicationPackage; Optional<DeploymentJobs.JobType> job = DeploymentJobs.JobType.from(controller.system(), zone); if (canDownloadReportedApplicationVersion(application) && !canDeployDirectlyTo(zone, options)) { if (!job.isPresent()) { throw new IllegalArgumentException("Cannot determine job for zone " + zone); } applicationVersion = application.deployApplicationVersion(job.get(), controller, options.deployCurrentVersion) .orElseThrow(() -> new IllegalArgumentException("Cannot determine application version for " + applicationId)); if (canDownloadArtifact(applicationVersion)) { applicationPackage = new ApplicationPackage( artifactRepository.getApplicationPackage(applicationId, applicationVersion.id()) ); } else { applicationPackage = applicationPackageFromDeployer.orElseThrow( () -> new IllegalArgumentException("Application package with version " + applicationVersion.id() + " cannot be downloaded, and " + "no package was given by deployer")); } } else { applicationPackage = applicationPackageFromDeployer.orElseThrow( () -> new IllegalArgumentException("Application package must be given as new application " + "version is not known for " + applicationId) ); applicationVersion = toApplicationPackageRevision(applicationPackage, options.screwdriverBuildJob); } validate(applicationPackage.deploymentSpec()); if (!options.deployCurrentVersion && !canDownloadReportedApplicationVersion(application)) { if (application.change().application().isPresent()) { application = application.withChange(application.change().with(applicationVersion)); } if (!canDeployDirectlyTo(zone, options) && job.isPresent()) { JobStatus.JobRun triggering = getOrCreateTriggering(application, version, job.get()); application = application.withJobTriggering(job.get(), application.change(), triggering.at(), version, applicationVersion, triggering.reason()); } } if (!options.deployCurrentVersion) { application = application.with(applicationPackage.deploymentSpec()); application = application.with(applicationPackage.validationOverrides()); application = deleteRemovedDeployments(application); application = deleteUnreferencedDeploymentJobs(application); store(application); } if (!canDeployDirectlyTo(zone, options)) { if (!application.deploymentJobs().isDeployableTo(zone.environment(), application.change())) { throw new IllegalArgumentException("Rejecting deployment of " + application + " to " + zone + " as " + application.change() + " is not tested"); } Deployment existingDeployment = application.deployments().get(zone); if (zone.environment().isProduction() && existingDeployment != null && existingDeployment.version().isAfter(version)) { throw new IllegalArgumentException("Rejecting deployment of " + application + " to " + zone + " as the requested version " + version + " is older than" + " the current version " + existingDeployment.version()); } } application = withRotation(application, zone); Set<String> rotationNames = new HashSet<>(); Set<String> cnames = new HashSet<>(); application.rotation().ifPresent(applicationRotation -> { rotationNames.add(applicationRotation.id().asString()); cnames.add(applicationRotation.dnsName()); cnames.add(applicationRotation.secureDnsName()); }); options = withVersion(version, options); ConfigServerClient.PreparedApplication preparedApplication = configserverClient.prepare(new DeploymentId(applicationId, zone), options, cnames, rotationNames, applicationPackage.zippedContent()); preparedApplication.activate(); application = application.withNewDeployment(zone, applicationVersion, version, clock.instant()); store(application); return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse(), applicationPackage.zippedContent().length); } } /** Makes sure the application has a global rotation, if eligible. */ private LockedApplication withRotation(LockedApplication application, ZoneId zone) { if (zone.environment() == Environment.prod && application.deploymentSpec().globalServiceId().isPresent()) { try (RotationLock rotationLock = rotationRepository.lock()) { Rotation rotation = rotationRepository.getRotation(application, rotationLock); application = application.with(rotation.id()); store(application); registerRotationInDns(rotation, application.rotation().get().dnsName()); registerRotationInDns(rotation, application.rotation().get().secureDnsName()); } } return application; } private ActivateResult unexpectedDeployment(ApplicationId applicationId, ZoneId zone, Optional<ApplicationPackage> applicationPackage) { Log logEntry = new Log(); logEntry.level = "WARNING"; logEntry.time = clock.instant().toEpochMilli(); logEntry.message = "Ignoring deployment of " + require(applicationId) + " to " + zone + " as a deployment is not currently expected"; PrepareResponse prepareResponse = new PrepareResponse(); prepareResponse.log = Collections.singletonList(logEntry); prepareResponse.configChangeActions = new ConfigChangeActions(Collections.emptyList(), Collections.emptyList()); return new ActivateResult(new RevisionId(applicationPackage.map(ApplicationPackage::hash) .orElse("0")), prepareResponse, applicationPackage.map(a -> a.zippedContent().length).orElse(0)); } private LockedApplication deleteRemovedDeployments(LockedApplication application) { List<Deployment> deploymentsToRemove = application.productionDeployments().values().stream() .filter(deployment -> ! application.deploymentSpec().includes(deployment.zone().environment(), Optional.of(deployment.zone().region()))) .collect(Collectors.toList()); if (deploymentsToRemove.isEmpty()) return application; if ( ! application.validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant())) throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application + " is deployed in " + deploymentsToRemove.stream() .map(deployment -> deployment.zone().region().value()) .collect(Collectors.joining(", ")) + ", but does not include " + (deploymentsToRemove.size() > 1 ? "these zones" : "this zone") + " in deployment.xml"); LockedApplication applicationWithRemoval = application; for (Deployment deployment : deploymentsToRemove) applicationWithRemoval = deactivate(applicationWithRemoval, deployment.zone()); return applicationWithRemoval; } private LockedApplication deleteUnreferencedDeploymentJobs(LockedApplication application) { for (DeploymentJobs.JobType job : application.deploymentJobs().jobStatus().keySet()) { Optional<ZoneId> zone = job.zone(controller.system()); if ( ! job.isProduction() || (zone.isPresent() && application.deploymentSpec().includes(zone.get().environment(), zone.map(ZoneId::region)))) continue; application = application.withoutDeploymentJob(job); } return application; } /** * Returns the existing triggering of the given type from this application, * or an incomplete one created in this method if none is present * This is needed (only) in the case where some external entity triggers a job. */ private JobStatus.JobRun getOrCreateTriggering(Application application, Version version, DeploymentJobs.JobType jobType) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return incompleteTriggeringEvent(version); if ( ! status.lastTriggered().isPresent()) return incompleteTriggeringEvent(version); return status.lastTriggered().get(); } private JobStatus.JobRun incompleteTriggeringEvent(Version version) { return new JobStatus.JobRun(-1, version, ApplicationVersion.unknown, false, "", clock.instant()); } private DeployOptions withVersion(Version version, DeployOptions options) { return new DeployOptions(options.screwdriverBuildJob, Optional.of(version), options.ignoreValidationErrors, options.deployCurrentVersion); } private ApplicationVersion toApplicationPackageRevision(ApplicationPackage applicationPackage, Optional<ScrewdriverBuildJob> buildJob) { if ( ! buildJob.isPresent()) return ApplicationVersion.from(applicationPackage.hash()); GitRevision gitRevision = buildJob.get().gitRevision; if (gitRevision.repository == null || gitRevision.branch == null || gitRevision.commit == null) return ApplicationVersion.from(applicationPackage.hash()); return ApplicationVersion.from(applicationPackage.hash(), new SourceRevision(gitRevision.repository.id(), gitRevision.branch.id(), gitRevision.commit.id())); } /** Register a DNS name for rotation */ private void registerRotationInDns(Rotation rotation, String dnsName) { try { Optional<Record> record = nameService.findRecord(Record.Type.CNAME, RecordName.from(dnsName)); RecordData rotationName = RecordData.fqdn(rotation.name()); if (record.isPresent()) { if ( ! record.get().data().equals(rotationName)) { nameService.updateRecord(record.get().id(), rotationName); log.info("Updated mapping for record ID " + record.get().id().asString() + ": '" + dnsName + "' -> '" + rotation.name() + "'"); } } else { RecordId id = nameService.createCname(RecordName.from(dnsName), rotationName); log.info("Registered mapping with record ID " + id.asString() + ": '" + dnsName + "' -> '" + rotation.name() + "'"); } } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to register CNAME", e); } } /** Returns the endpoints of the deployment, or empty if obtaining them failed */ public Optional<InstanceEndpoints> getDeploymentEndpoints(DeploymentId deploymentId) { try { List<RoutingEndpoint> endpoints = routingGenerator.endpoints(deploymentId); List<URI> endPointUrls = new ArrayList<>(); for (RoutingEndpoint endpoint : endpoints) { try { endPointUrls.add(new URI(endpoint.getEndpoint())); } catch (URISyntaxException e) { throw new RuntimeException("Routing generator returned illegal url's", e); } } return Optional.of(new InstanceEndpoints(endPointUrls)); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to get endpoint information for " + deploymentId + ": " + Exceptions.toMessageString(e)); return Optional.empty(); } } /** * Deletes the the given application. All known instances of the applications will be deleted, * including PR instances. * * @throws IllegalArgumentException if the application has deployments or the caller is not authorized * @throws NotExistsException if no instances of the application exist */ public void deleteApplication(ApplicationId applicationId, Optional<NToken> token) { List<ApplicationId> instances = controller.applications().asList(applicationId.tenant()) .stream() .map(Application::id) .filter(id -> id.application().equals(applicationId.application()) && id.tenant().equals(applicationId.tenant())) .collect(Collectors.toList()); if (instances.isEmpty()) { throw new NotExistsException("Could not delete application '" + applicationId + "': Application not found"); } instances.forEach(id -> lockOrThrow(id, application -> { if ( ! application.deployments().isEmpty()) throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments"); Tenant tenant = controller.tenants().tenant(new TenantId(id.tenant().value())).get(); if (tenant.isAthensTenant() && ! token.isPresent()) throw new IllegalArgumentException("Could not delete '" + application + "': No NToken provided"); if (id.instance().isDefault() && tenant.isAthensTenant()) { zmsClientFactory.createZmsClientWithAuthorizedServiceToken(token.get()) .deleteApplication(tenant.getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); } db.deleteApplication(id); log.info("Deleted " + application); })); } /** * Replace any previous version of this application by this instance * * @param application a locked application to store */ public void store(LockedApplication application) { db.store(application); } /** * Acquire a locked application to modify and store, if there is an application with the given id. * * @param applicationId ID of the application to lock and get. * @param action Function which acts on the locked application. */ public void lockIfPresent(ApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { get(applicationId).map(application -> new LockedApplication(application, lock)).ifPresent(action); } } /** * Acquire a locked application to modify and store, or throw an exception if no application has the given id. * * @param applicationId ID of the application to lock and require. * @param action Function which acts on the locked application. * @throws IllegalArgumentException when application does not exist. */ public void lockOrThrow(ApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { action.accept(new LockedApplication(require(applicationId), lock)); } } public void notifyJobCompletion(JobReport report) { if ( ! get(report.applicationId()).isPresent()) { log.log(Level.WARNING, "Ignoring completion of job of project '" + report.projectId() + "': Unknown application '" + report.applicationId() + "'"); return; } deploymentTrigger.triggerFromCompletion(report); } /** * Tells config server to schedule a restart of all nodes in this deployment * * @param hostname If non-empty, restart will only be scheduled for this host */ public void restart(DeploymentId deploymentId, Optional<Hostname> hostname) { try { configserverClient.restart(deploymentId, hostname); } catch (NoInstanceException e) { throw new IllegalArgumentException("Could not restart " + deploymentId + ": No such deployment"); } } /** Deactivate application in the given zone */ public void deactivate(Application application, ZoneId zone) { deactivate(application, zone, Optional.empty(), false); } /** Deactivate a known deployment of the given application */ public void deactivate(Application application, Deployment deployment, boolean requireThatDeploymentHasExpired) { deactivate(application, deployment.zone(), Optional.of(deployment), requireThatDeploymentHasExpired); } private void deactivate(Application application, ZoneId zone, Optional<Deployment> deployment, boolean requireThatDeploymentHasExpired) { if (requireThatDeploymentHasExpired && deployment.isPresent() && ! DeploymentExpirer.hasExpired(controller.zoneRegistry(), deployment.get(), clock.instant())) return; lockOrThrow(application.id(), lockedApplication -> store(deactivate(lockedApplication, zone))); } /** * Deactivates a locked application without storing it * * @return the application with the deployment in the given zone removed */ private LockedApplication deactivate(LockedApplication application, ZoneId zone) { try { configserverClient.deactivate(new DeploymentId(application.id(), zone)); } catch (NoInstanceException ignored) { } return application.withoutDeploymentIn(zone); } public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; } private ApplicationId dashToUnderscore(ApplicationId id) { return ApplicationId.from(id.tenant().value(), id.application().value().replaceAll("-", "_"), id.instance().value()); } public ConfigServerClient configserverClient() { return configserverClient; } /** * Returns a lock which provides exclusive rights to changing this application. * Any operation which stores an application need to first acquire this lock, then read, modify * and store the application, and finally release (close) the lock. */ Lock lock(ApplicationId application) { return curator.lock(application, Duration.ofMinutes(10)); } /** Returns whether a direct deployment to given zone is allowed */ private static boolean canDeployDirectlyTo(ZoneId zone, DeployOptions options) { return ! options.screwdriverBuildJob.isPresent() || options.screwdriverBuildJob.get().screwdriverId == null || zone.environment().isManuallyDeployed(); } /** Returns whether artifact for given version number is available in artifact repository */ private static boolean canDownloadArtifact(ApplicationVersion applicationVersion) { return applicationVersion.buildNumber().isPresent() && applicationVersion.source().isPresent(); } /** Returns whether component has reported a version number that is availabe in artifact repository */ private static boolean canDownloadReportedApplicationVersion(Application application) { return Optional.ofNullable(application.deploymentJobs().jobStatus().get(DeploymentJobs.JobType.component)) .flatMap(JobStatus::lastSuccess) .map(JobStatus.JobRun::applicationVersion) .filter(ApplicationController::canDownloadArtifact) .isPresent(); } /** Verify that each of the production zones listed in the deployment spec exist in this system. */ private void validate(DeploymentSpec deploymentSpec) { deploymentSpec.zones().stream() .filter(zone -> zone.environment() == Environment.prod) .forEach(zone -> { if ( ! controller.zoneRegistry().hasZone(ZoneId.from(zone.environment(), zone.region().orElse(null)))) throw new IllegalArgumentException("Zone " + zone + " in deployment spec was not found in this system!"); }); } public RotationRepository rotationRepository() { return rotationRepository; } }
class ApplicationController { private static final Logger log = Logger.getLogger(ApplicationController.class.getName()); /** The controller owning this */ private final Controller controller; /** For permanent storage */ private final ControllerDb db; /** For working memory storage and sharing between controllers */ private final CuratorDb curator; private final ArtifactRepository artifactRepository; private final RotationRepository rotationRepository; private final AthenzClientFactory zmsClientFactory; private final NameService nameService; private final ConfigServerClient configserverClient; private final RoutingGenerator routingGenerator; private final Clock clock; private final DeploymentTrigger deploymentTrigger; ApplicationController(Controller controller, ControllerDb db, CuratorDb curator, AthenzClientFactory zmsClientFactory, RotationsConfig rotationsConfig, NameService nameService, ConfigServerClient configserverClient, ArtifactRepository artifactRepository, RoutingGenerator routingGenerator, Clock clock) { this.controller = controller; this.db = db; this.curator = curator; this.zmsClientFactory = zmsClientFactory; this.nameService = nameService; this.configserverClient = configserverClient; this.routingGenerator = routingGenerator; this.clock = clock; this.artifactRepository = artifactRepository; this.rotationRepository = new RotationRepository(rotationsConfig, this, curator); this.deploymentTrigger = new DeploymentTrigger(controller, curator, clock); for (Application application : db.listApplications()) { lockIfPresent(application.id(), this::store); } } /** Returns the application with the given id, or null if it is not present */ public Optional<Application> get(ApplicationId id) { return db.getApplication(id); } /** * Returns the application with the given id * * @throws IllegalArgumentException if it does not exist */ public Application require(ApplicationId id) { return get(id).orElseThrow(() -> new IllegalArgumentException(id + " not found")); } /** Returns a snapshot of all applications */ public List<Application> asList() { return db.listApplications(); } /** Returns all applications of a tenant */ public List<Application> asList(TenantName tenant) { return db.listApplications(new TenantId(tenant.value())); } /** * Set the rotations marked as 'global' either 'in' or 'out of' service. * * @return The canonical endpoint altered if any * @throws IOException if rotation status cannot be updated */ public List<String> setGlobalRotationStatus(DeploymentId deploymentId, EndpointStatus status) throws IOException { List<String> rotations = new ArrayList<>(); Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId); if (endpoint.isPresent()) { configserverClient.setGlobalRotationStatus(deploymentId, endpoint.get(), status); rotations.add(endpoint.get()); } return rotations; } /** * Get the endpoint status for the global endpoint of this application * * @return Map between the endpoint and the rotation status * @throws IOException if global rotation status cannot be determined */ public Map<String, EndpointStatus> getGlobalRotationStatus(DeploymentId deploymentId) throws IOException { Map<String, EndpointStatus> result = new HashMap<>(); Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId); if (endpoint.isPresent()) { EndpointStatus status = configserverClient.getGlobalRotationStatus(deploymentId, endpoint.get()); result.put(endpoint.get(), status); } return result; } /** * Global rotations (plural as we can have aliases) map to exactly one service endpoint. * This method finds that one service endpoint and strips the URI part that * the routingGenerator is wrapping around the endpoint. * * @param deploymentId The deployment to retrieve global service endpoint for * @return Empty if no global endpoint exist, otherwise the service endpoint ([clustername.]app.tenant.region.env) */ Optional<String> getCanonicalGlobalEndpoint(DeploymentId deploymentId) throws IOException { Map<String, RoutingEndpoint> hostToGlobalEndpoint = new HashMap<>(); Map<String, String> hostToCanonicalEndpoint = new HashMap<>(); for (RoutingEndpoint endpoint : routingGenerator.endpoints(deploymentId)) { try { URI uri = new URI(endpoint.getEndpoint()); String serviceEndpoint = uri.getHost(); if (serviceEndpoint == null) { throw new IOException("Unexpected endpoints returned from the Routing Generator"); } String canonicalEndpoint = serviceEndpoint.replaceAll(".vespa.yahooapis.com", ""); String hostname = endpoint.getHostname(); if (hostname != null) { if (endpoint.isGlobal()) { hostToGlobalEndpoint.put(hostname, endpoint); } else { hostToCanonicalEndpoint.put(hostname, canonicalEndpoint); } if (hostToGlobalEndpoint.containsKey(hostname) && hostToCanonicalEndpoint.containsKey(hostname)) { return Optional.of(hostToCanonicalEndpoint.get(hostname)); } } } catch (URISyntaxException use) { throw new IOException(use); } } return Optional.empty(); } /** * Creates a new application for an existing tenant. * * @throws IllegalArgumentException if the application already exists */ /** Deploys an application. If the application does not exist it is created. */ public ActivateResult deployApplication(ApplicationId applicationId, ZoneId zone, Optional<ApplicationPackage> applicationPackageFromDeployer, DeployOptions options) { try (Lock lock = lock(applicationId)) { LockedApplication application = get(applicationId) .map(app -> new LockedApplication(app, lock)) .orElseGet(() -> new LockedApplication(createApplication(applicationId, Optional.empty()), lock)); Version version; if (options.deployCurrentVersion) { version = application.versionIn(zone, controller); } else if (canDeployDirectlyTo(zone, options)) { version = options.vespaVersion.map(Version::new).orElse(controller.systemVersion()); } else if (! application.change().isPresent() && ! zone.environment().isManuallyDeployed()) { return unexpectedDeployment(applicationId, zone, applicationPackageFromDeployer); } else { version = application.deployVersionIn(zone, controller); } ApplicationVersion applicationVersion; ApplicationPackage applicationPackage; Optional<DeploymentJobs.JobType> job = DeploymentJobs.JobType.from(controller.system(), zone); if (canDownloadReportedApplicationVersion(application) && !canDeployDirectlyTo(zone, options)) { if (!job.isPresent()) { throw new IllegalArgumentException("Cannot determine job for zone " + zone); } applicationVersion = application.deployApplicationVersion(job.get(), controller, options.deployCurrentVersion) .orElseThrow(() -> new IllegalArgumentException("Cannot determine application version for " + applicationId)); if (canDownloadArtifact(applicationVersion)) { applicationPackage = new ApplicationPackage( artifactRepository.getApplicationPackage(applicationId, applicationVersion.id()) ); } else { applicationPackage = applicationPackageFromDeployer.orElseThrow( () -> new IllegalArgumentException("Application package with version " + applicationVersion.id() + " cannot be downloaded, and " + "no package was given by deployer")); } } else { applicationPackage = applicationPackageFromDeployer.orElseThrow( () -> new IllegalArgumentException("Application package must be given as new application " + "version is not known for " + applicationId) ); applicationVersion = toApplicationPackageRevision(applicationPackage, options.screwdriverBuildJob); } validate(applicationPackage.deploymentSpec()); if (!options.deployCurrentVersion && !canDownloadReportedApplicationVersion(application)) { if (application.change().application().isPresent()) { application = application.withChange(application.change().with(applicationVersion)); } if (!canDeployDirectlyTo(zone, options) && job.isPresent()) { JobStatus.JobRun triggering = getOrCreateTriggering(application, version, job.get()); application = application.withJobTriggering(job.get(), application.change(), triggering.at(), version, applicationVersion, triggering.reason()); } } if (!options.deployCurrentVersion) { application = application.with(applicationPackage.deploymentSpec()); application = application.with(applicationPackage.validationOverrides()); application = deleteRemovedDeployments(application); application = deleteUnreferencedDeploymentJobs(application); store(application); } if (!canDeployDirectlyTo(zone, options)) { if (!application.deploymentJobs().isDeployableTo(zone.environment(), application.change())) { throw new IllegalArgumentException("Rejecting deployment of " + application + " to " + zone + " as " + application.change() + " is not tested"); } Deployment existingDeployment = application.deployments().get(zone); if (zone.environment().isProduction() && existingDeployment != null && existingDeployment.version().isAfter(version)) { throw new IllegalArgumentException("Rejecting deployment of " + application + " to " + zone + " as the requested version " + version + " is older than" + " the current version " + existingDeployment.version()); } } application = withRotation(application, zone); Set<String> rotationNames = new HashSet<>(); Set<String> cnames = new HashSet<>(); application.rotation().ifPresent(applicationRotation -> { rotationNames.add(applicationRotation.id().asString()); cnames.add(applicationRotation.dnsName()); cnames.add(applicationRotation.secureDnsName()); }); options = withVersion(version, options); ConfigServerClient.PreparedApplication preparedApplication = configserverClient.prepare(new DeploymentId(applicationId, zone), options, cnames, rotationNames, applicationPackage.zippedContent()); preparedApplication.activate(); application = application.withNewDeployment(zone, applicationVersion, version, clock.instant()); store(application); return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse(), applicationPackage.zippedContent().length); } } /** Makes sure the application has a global rotation, if eligible. */ private LockedApplication withRotation(LockedApplication application, ZoneId zone) { if (zone.environment() == Environment.prod && application.deploymentSpec().globalServiceId().isPresent()) { try (RotationLock rotationLock = rotationRepository.lock()) { Rotation rotation = rotationRepository.getRotation(application, rotationLock); application = application.with(rotation.id()); store(application); registerRotationInDns(rotation, application.rotation().get().dnsName()); registerRotationInDns(rotation, application.rotation().get().secureDnsName()); } } return application; } private ActivateResult unexpectedDeployment(ApplicationId applicationId, ZoneId zone, Optional<ApplicationPackage> applicationPackage) { Log logEntry = new Log(); logEntry.level = "WARNING"; logEntry.time = clock.instant().toEpochMilli(); logEntry.message = "Ignoring deployment of " + require(applicationId) + " to " + zone + " as a deployment is not currently expected"; PrepareResponse prepareResponse = new PrepareResponse(); prepareResponse.log = Collections.singletonList(logEntry); prepareResponse.configChangeActions = new ConfigChangeActions(Collections.emptyList(), Collections.emptyList()); return new ActivateResult(new RevisionId(applicationPackage.map(ApplicationPackage::hash) .orElse("0")), prepareResponse, applicationPackage.map(a -> a.zippedContent().length).orElse(0)); } private LockedApplication deleteRemovedDeployments(LockedApplication application) { List<Deployment> deploymentsToRemove = application.productionDeployments().values().stream() .filter(deployment -> ! application.deploymentSpec().includes(deployment.zone().environment(), Optional.of(deployment.zone().region()))) .collect(Collectors.toList()); if (deploymentsToRemove.isEmpty()) return application; if ( ! application.validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant())) throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application + " is deployed in " + deploymentsToRemove.stream() .map(deployment -> deployment.zone().region().value()) .collect(Collectors.joining(", ")) + ", but does not include " + (deploymentsToRemove.size() > 1 ? "these zones" : "this zone") + " in deployment.xml"); LockedApplication applicationWithRemoval = application; for (Deployment deployment : deploymentsToRemove) applicationWithRemoval = deactivate(applicationWithRemoval, deployment.zone()); return applicationWithRemoval; } private LockedApplication deleteUnreferencedDeploymentJobs(LockedApplication application) { for (DeploymentJobs.JobType job : application.deploymentJobs().jobStatus().keySet()) { Optional<ZoneId> zone = job.zone(controller.system()); if ( ! job.isProduction() || (zone.isPresent() && application.deploymentSpec().includes(zone.get().environment(), zone.map(ZoneId::region)))) continue; application = application.withoutDeploymentJob(job); } return application; } /** * Returns the existing triggering of the given type from this application, * or an incomplete one created in this method if none is present * This is needed (only) in the case where some external entity triggers a job. */ private JobStatus.JobRun getOrCreateTriggering(Application application, Version version, DeploymentJobs.JobType jobType) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return incompleteTriggeringEvent(version); if ( ! status.lastTriggered().isPresent()) return incompleteTriggeringEvent(version); return status.lastTriggered().get(); } private JobStatus.JobRun incompleteTriggeringEvent(Version version) { return new JobStatus.JobRun(-1, version, ApplicationVersion.unknown, false, "", clock.instant()); } private DeployOptions withVersion(Version version, DeployOptions options) { return new DeployOptions(options.screwdriverBuildJob, Optional.of(version), options.ignoreValidationErrors, options.deployCurrentVersion); } private ApplicationVersion toApplicationPackageRevision(ApplicationPackage applicationPackage, Optional<ScrewdriverBuildJob> buildJob) { if ( ! buildJob.isPresent()) return ApplicationVersion.from(applicationPackage.hash()); GitRevision gitRevision = buildJob.get().gitRevision; if (gitRevision.repository == null || gitRevision.branch == null || gitRevision.commit == null) return ApplicationVersion.from(applicationPackage.hash()); return ApplicationVersion.from(applicationPackage.hash(), new SourceRevision(gitRevision.repository.id(), gitRevision.branch.id(), gitRevision.commit.id())); } /** Register a DNS name for rotation */ private void registerRotationInDns(Rotation rotation, String dnsName) { try { Optional<Record> record = nameService.findRecord(Record.Type.CNAME, RecordName.from(dnsName)); RecordData rotationName = RecordData.fqdn(rotation.name()); if (record.isPresent()) { if ( ! record.get().data().equals(rotationName)) { nameService.updateRecord(record.get().id(), rotationName); log.info("Updated mapping for record ID " + record.get().id().asString() + ": '" + dnsName + "' -> '" + rotation.name() + "'"); } } else { RecordId id = nameService.createCname(RecordName.from(dnsName), rotationName); log.info("Registered mapping with record ID " + id.asString() + ": '" + dnsName + "' -> '" + rotation.name() + "'"); } } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to register CNAME", e); } } /** Returns the endpoints of the deployment, or empty if obtaining them failed */ public Optional<InstanceEndpoints> getDeploymentEndpoints(DeploymentId deploymentId) { try { List<RoutingEndpoint> endpoints = routingGenerator.endpoints(deploymentId); List<URI> endPointUrls = new ArrayList<>(); for (RoutingEndpoint endpoint : endpoints) { try { endPointUrls.add(new URI(endpoint.getEndpoint())); } catch (URISyntaxException e) { throw new RuntimeException("Routing generator returned illegal url's", e); } } return Optional.of(new InstanceEndpoints(endPointUrls)); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to get endpoint information for " + deploymentId + ": " + Exceptions.toMessageString(e)); return Optional.empty(); } } /** * Deletes the the given application. All known instances of the applications will be deleted, * including PR instances. * * @throws IllegalArgumentException if the application has deployments or the caller is not authorized * @throws NotExistsException if no instances of the application exist */ public void deleteApplication(ApplicationId applicationId, Optional<NToken> token) { List<ApplicationId> instances = controller.applications().asList(applicationId.tenant()) .stream() .map(Application::id) .filter(id -> id.application().equals(applicationId.application()) && id.tenant().equals(applicationId.tenant())) .collect(Collectors.toList()); if (instances.isEmpty()) { throw new NotExistsException("Could not delete application '" + applicationId + "': Application not found"); } instances.forEach(id -> lockOrThrow(id, application -> { if ( ! application.deployments().isEmpty()) throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments"); Tenant tenant = controller.tenants().tenant(new TenantId(id.tenant().value())).get(); if (tenant.isAthensTenant() && ! token.isPresent()) throw new IllegalArgumentException("Could not delete '" + application + "': No NToken provided"); if (id.instance().isDefault() && tenant.isAthensTenant()) { zmsClientFactory.createZmsClientWithAuthorizedServiceToken(token.get()) .deleteApplication(tenant.getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); } db.deleteApplication(id); log.info("Deleted " + application); })); } /** * Replace any previous version of this application by this instance * * @param application a locked application to store */ public void store(LockedApplication application) { db.store(application); } /** * Acquire a locked application to modify and store, if there is an application with the given id. * * @param applicationId ID of the application to lock and get. * @param action Function which acts on the locked application. */ public void lockIfPresent(ApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { get(applicationId).map(application -> new LockedApplication(application, lock)).ifPresent(action); } } /** * Acquire a locked application to modify and store, or throw an exception if no application has the given id. * * @param applicationId ID of the application to lock and require. * @param action Function which acts on the locked application. * @throws IllegalArgumentException when application does not exist. */ public void lockOrThrow(ApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { action.accept(new LockedApplication(require(applicationId), lock)); } } public void notifyJobCompletion(JobReport report) { if ( ! get(report.applicationId()).isPresent()) { log.log(Level.WARNING, "Ignoring completion of job of project '" + report.projectId() + "': Unknown application '" + report.applicationId() + "'"); return; } deploymentTrigger.triggerFromCompletion(report); } /** * Tells config server to schedule a restart of all nodes in this deployment * * @param hostname If non-empty, restart will only be scheduled for this host */ public void restart(DeploymentId deploymentId, Optional<Hostname> hostname) { try { configserverClient.restart(deploymentId, hostname); } catch (NoInstanceException e) { throw new IllegalArgumentException("Could not restart " + deploymentId + ": No such deployment"); } } /** Deactivate application in the given zone */ public void deactivate(Application application, ZoneId zone) { deactivate(application, zone, Optional.empty(), false); } /** Deactivate a known deployment of the given application */ public void deactivate(Application application, Deployment deployment, boolean requireThatDeploymentHasExpired) { deactivate(application, deployment.zone(), Optional.of(deployment), requireThatDeploymentHasExpired); } private void deactivate(Application application, ZoneId zone, Optional<Deployment> deployment, boolean requireThatDeploymentHasExpired) { if (requireThatDeploymentHasExpired && deployment.isPresent() && ! DeploymentExpirer.hasExpired(controller.zoneRegistry(), deployment.get(), clock.instant())) return; lockOrThrow(application.id(), lockedApplication -> store(deactivate(lockedApplication, zone))); } /** * Deactivates a locked application without storing it * * @return the application with the deployment in the given zone removed */ private LockedApplication deactivate(LockedApplication application, ZoneId zone) { try { configserverClient.deactivate(new DeploymentId(application.id(), zone)); } catch (NoInstanceException ignored) { } return application.withoutDeploymentIn(zone); } public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; } private ApplicationId dashToUnderscore(ApplicationId id) { return ApplicationId.from(id.tenant().value(), id.application().value().replaceAll("-", "_"), id.instance().value()); } public ConfigServerClient configserverClient() { return configserverClient; } /** * Returns a lock which provides exclusive rights to changing this application. * Any operation which stores an application need to first acquire this lock, then read, modify * and store the application, and finally release (close) the lock. */ Lock lock(ApplicationId application) { return curator.lock(application, Duration.ofMinutes(10)); } /** Returns whether a direct deployment to given zone is allowed */ private static boolean canDeployDirectlyTo(ZoneId zone, DeployOptions options) { return ! options.screwdriverBuildJob.isPresent() || options.screwdriverBuildJob.get().screwdriverId == null || zone.environment().isManuallyDeployed(); } /** Returns whether artifact for given version number is available in artifact repository */ private static boolean canDownloadArtifact(ApplicationVersion applicationVersion) { return applicationVersion.buildNumber().isPresent() && applicationVersion.source().isPresent(); } /** Returns whether component has reported a version number that is availabe in artifact repository */ private static boolean canDownloadReportedApplicationVersion(Application application) { return Optional.ofNullable(application.deploymentJobs().jobStatus().get(DeploymentJobs.JobType.component)) .flatMap(JobStatus::lastSuccess) .map(JobStatus.JobRun::applicationVersion) .filter(ApplicationController::canDownloadArtifact) .isPresent(); } /** Verify that each of the production zones listed in the deployment spec exist in this system. */ private void validate(DeploymentSpec deploymentSpec) { deploymentSpec.zones().stream() .filter(zone -> zone.environment() == Environment.prod) .forEach(zone -> { if ( ! controller.zoneRegistry().hasZone(ZoneId.from(zone.environment(), zone.region().orElse(null)))) throw new IllegalArgumentException("Zone " + zone + " in deployment spec was not found in this system!"); }); } public RotationRepository rotationRepository() { return rotationRepository; } }
Fix identation
public Application createApplication(ApplicationId id, Optional<NToken> token) { if ( ! (id.instance().value().equals("default") || id.instance().value().matches("\\d+"))) throw new UnsupportedOperationException("Only the instance names 'default' and names which are just the PR number are supported at the moment"); try (Lock lock = lock(id)) { com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value()); Optional<Tenant> tenant = controller.tenants().tenant(new TenantId(id.tenant().value())); if ( ! tenant.isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist"); if (get(id).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application already exists"); if (get(dashToUnderscore(id)).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists"); if (tenant.get().isAthensTenant()) { if ( ! token.isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': No NToken provided"); ZmsClient zmsClient = zmsClientFactory.createZmsClientWithAuthorizedServiceToken(token.get()); try { zmsClient.deleteApplication(tenant.get().getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); } catch (ZmsException ignored) { } zmsClient.addApplication(tenant.get().getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); } LockedApplication application = new LockedApplication(new Application(id), lock); store(application); log.info("Created " + application); return application; } }
throw new IllegalArgumentException("Could not create '" + id + "': No NToken provided");
public Application createApplication(ApplicationId id, Optional<NToken> token) { if ( ! (id.instance().isDefault() || id.instance().value().matches("\\d+"))) throw new UnsupportedOperationException("Only the instance names 'default' and names which are just the PR number are supported at the moment"); try (Lock lock = lock(id)) { if (asList(id.tenant()).stream().noneMatch(application -> application.id().application().equals(id.application()))) com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value()); Optional<Tenant> tenant = controller.tenants().tenant(new TenantId(id.tenant().value())); if ( ! tenant.isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist"); if (get(id).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application already exists"); if (get(dashToUnderscore(id)).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists"); if (id.instance().isDefault() && tenant.get().isAthensTenant()) { if ( ! token.isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': No NToken provided"); ZmsClient zmsClient = zmsClientFactory.createZmsClientWithAuthorizedServiceToken(token.get()); zmsClient.addApplication(tenant.get().getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); } LockedApplication application = new LockedApplication(new Application(id), lock); store(application); log.info("Created " + application); return application; } }
class ApplicationController { private static final Logger log = Logger.getLogger(ApplicationController.class.getName()); /** The controller owning this */ private final Controller controller; /** For permanent storage */ private final ControllerDb db; /** For working memory storage and sharing between controllers */ private final CuratorDb curator; private final ArtifactRepository artifactRepository; private final RotationRepository rotationRepository; private final AthenzClientFactory zmsClientFactory; private final NameService nameService; private final ConfigServerClient configserverClient; private final RoutingGenerator routingGenerator; private final Clock clock; private final DeploymentTrigger deploymentTrigger; ApplicationController(Controller controller, ControllerDb db, CuratorDb curator, AthenzClientFactory zmsClientFactory, RotationsConfig rotationsConfig, NameService nameService, ConfigServerClient configserverClient, ArtifactRepository artifactRepository, RoutingGenerator routingGenerator, Clock clock) { this.controller = controller; this.db = db; this.curator = curator; this.zmsClientFactory = zmsClientFactory; this.nameService = nameService; this.configserverClient = configserverClient; this.routingGenerator = routingGenerator; this.clock = clock; this.artifactRepository = artifactRepository; this.rotationRepository = new RotationRepository(rotationsConfig, this, curator); this.deploymentTrigger = new DeploymentTrigger(controller, curator, clock); for (Application application : db.listApplications()) { lockIfPresent(application.id(), this::store); } } /** Returns the application with the given id, or null if it is not present */ public Optional<Application> get(ApplicationId id) { return db.getApplication(id); } /** * Returns the application with the given id * * @throws IllegalArgumentException if it does not exist */ public Application require(ApplicationId id) { return get(id).orElseThrow(() -> new IllegalArgumentException(id + " not found")); } /** Returns a snapshot of all applications */ public List<Application> asList() { return db.listApplications(); } /** Returns all applications of a tenant */ public List<Application> asList(TenantName tenant) { return db.listApplications(new TenantId(tenant.value())); } /** * Set the rotations marked as 'global' either 'in' or 'out of' service. * * @return The canonical endpoint altered if any * @throws IOException if rotation status cannot be updated */ public List<String> setGlobalRotationStatus(DeploymentId deploymentId, EndpointStatus status) throws IOException { List<String> rotations = new ArrayList<>(); Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId); if (endpoint.isPresent()) { configserverClient.setGlobalRotationStatus(deploymentId, endpoint.get(), status); rotations.add(endpoint.get()); } return rotations; } /** * Get the endpoint status for the global endpoint of this application * * @return Map between the endpoint and the rotation status * @throws IOException if global rotation status cannot be determined */ public Map<String, EndpointStatus> getGlobalRotationStatus(DeploymentId deploymentId) throws IOException { Map<String, EndpointStatus> result = new HashMap<>(); Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId); if (endpoint.isPresent()) { EndpointStatus status = configserverClient.getGlobalRotationStatus(deploymentId, endpoint.get()); result.put(endpoint.get(), status); } return result; } /** * Global rotations (plural as we can have aliases) map to exactly one service endpoint. * This method finds that one service endpoint and strips the URI part that * the routingGenerator is wrapping around the endpoint. * * @param deploymentId The deployment to retrieve global service endpoint for * @return Empty if no global endpoint exist, otherwise the service endpoint ([clustername.]app.tenant.region.env) */ Optional<String> getCanonicalGlobalEndpoint(DeploymentId deploymentId) throws IOException { Map<String, RoutingEndpoint> hostToGlobalEndpoint = new HashMap<>(); Map<String, String> hostToCanonicalEndpoint = new HashMap<>(); for (RoutingEndpoint endpoint : routingGenerator.endpoints(deploymentId)) { try { URI uri = new URI(endpoint.getEndpoint()); String serviceEndpoint = uri.getHost(); if (serviceEndpoint == null) { throw new IOException("Unexpected endpoints returned from the Routing Generator"); } String canonicalEndpoint = serviceEndpoint.replaceAll(".vespa.yahooapis.com", ""); String hostname = endpoint.getHostname(); if (hostname != null) { if (endpoint.isGlobal()) { hostToGlobalEndpoint.put(hostname, endpoint); } else { hostToCanonicalEndpoint.put(hostname, canonicalEndpoint); } if (hostToGlobalEndpoint.containsKey(hostname) && hostToCanonicalEndpoint.containsKey(hostname)) { return Optional.of(hostToCanonicalEndpoint.get(hostname)); } } } catch (URISyntaxException use) { throw new IOException(use); } } return Optional.empty(); } /** * Creates a new application for an existing tenant. * * @throws IllegalArgumentException if the application already exists */ /** Deploys an application. If the application does not exist it is created. */ public ActivateResult deployApplication(ApplicationId applicationId, ZoneId zone, Optional<ApplicationPackage> applicationPackageFromDeployer, DeployOptions options, Optional<NToken> token) { try (Lock lock = lock(applicationId)) { LockedApplication application = get(applicationId) .map(app -> new LockedApplication(app, lock)) .orElseGet(() -> new LockedApplication(createApplication(applicationId, token), lock)); Version version; if (options.deployCurrentVersion) { version = application.versionIn(zone, controller); } else if (canDeployDirectlyTo(zone, options)) { version = options.vespaVersion.map(Version::new).orElse(controller.systemVersion()); } else if (! application.change().isPresent() && ! zone.environment().isManuallyDeployed()) { return unexpectedDeployment(applicationId, zone, applicationPackageFromDeployer); } else { version = application.deployVersionIn(zone, controller); } ApplicationVersion applicationVersion; ApplicationPackage applicationPackage; Optional<DeploymentJobs.JobType> job = DeploymentJobs.JobType.from(controller.system(), zone); if (canDownloadReportedApplicationVersion(application) && !canDeployDirectlyTo(zone, options)) { if (!job.isPresent()) { throw new IllegalArgumentException("Cannot determine job for zone " + zone); } applicationVersion = application.deployApplicationVersion(job.get(), controller, options.deployCurrentVersion) .orElseThrow(() -> new IllegalArgumentException("Cannot determine application version for " + applicationId)); if (canDownloadArtifact(applicationVersion)) { applicationPackage = new ApplicationPackage( artifactRepository.getApplicationPackage(applicationId, applicationVersion.id()) ); } else { applicationPackage = applicationPackageFromDeployer.orElseThrow( () -> new IllegalArgumentException("Application package with version " + applicationVersion.id() + " cannot be downloaded, and " + "no package was given by deployer")); } } else { applicationPackage = applicationPackageFromDeployer.orElseThrow( () -> new IllegalArgumentException("Application package must be given as new application " + "version is not known for " + applicationId) ); applicationVersion = toApplicationPackageRevision(applicationPackage, options.screwdriverBuildJob); } validate(applicationPackage.deploymentSpec()); if (!options.deployCurrentVersion && !canDownloadReportedApplicationVersion(application)) { if (application.change().application().isPresent()) { application = application.withChange(application.change().with(applicationVersion)); } if (!canDeployDirectlyTo(zone, options) && job.isPresent()) { JobStatus.JobRun triggering = getOrCreateTriggering(application, version, job.get()); application = application.withJobTriggering(job.get(), application.change(), triggering.at(), version, applicationVersion, triggering.reason()); } } if (!options.deployCurrentVersion) { application = application.with(applicationPackage.deploymentSpec()); application = application.with(applicationPackage.validationOverrides()); application = deleteRemovedDeployments(application); application = deleteUnreferencedDeploymentJobs(application); store(application); } if (!canDeployDirectlyTo(zone, options)) { if (!application.deploymentJobs().isDeployableTo(zone.environment(), application.change())) { throw new IllegalArgumentException("Rejecting deployment of " + application + " to " + zone + " as " + application.change() + " is not tested"); } Deployment existingDeployment = application.deployments().get(zone); if (zone.environment().isProduction() && existingDeployment != null && existingDeployment.version().isAfter(version)) { throw new IllegalArgumentException("Rejecting deployment of " + application + " to " + zone + " as the requested version " + version + " is older than" + " the current version " + existingDeployment.version()); } } application = withRotation(application, zone); Set<String> rotationNames = new HashSet<>(); Set<String> cnames = new HashSet<>(); application.rotation().ifPresent(applicationRotation -> { rotationNames.add(applicationRotation.id().asString()); cnames.add(applicationRotation.dnsName()); cnames.add(applicationRotation.secureDnsName()); }); options = withVersion(version, options); ConfigServerClient.PreparedApplication preparedApplication = configserverClient.prepare(new DeploymentId(applicationId, zone), options, cnames, rotationNames, applicationPackage.zippedContent()); preparedApplication.activate(); application = application.withNewDeployment(zone, applicationVersion, version, clock.instant()); store(application); return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse(), applicationPackage.zippedContent().length); } } /** Makes sure the application has a global rotation, if eligible. */ private LockedApplication withRotation(LockedApplication application, ZoneId zone) { if (zone.environment() == Environment.prod && application.deploymentSpec().globalServiceId().isPresent()) { try (RotationLock rotationLock = rotationRepository.lock()) { Rotation rotation = rotationRepository.getRotation(application, rotationLock); application = application.with(rotation.id()); store(application); registerRotationInDns(rotation, application.rotation().get().dnsName()); registerRotationInDns(rotation, application.rotation().get().secureDnsName()); } } return application; } private ActivateResult unexpectedDeployment(ApplicationId applicationId, ZoneId zone, Optional<ApplicationPackage> applicationPackage) { Log logEntry = new Log(); logEntry.level = "WARNING"; logEntry.time = clock.instant().toEpochMilli(); logEntry.message = "Ignoring deployment of " + require(applicationId) + " to " + zone + " as a deployment is not currently expected"; PrepareResponse prepareResponse = new PrepareResponse(); prepareResponse.log = Collections.singletonList(logEntry); prepareResponse.configChangeActions = new ConfigChangeActions(Collections.emptyList(), Collections.emptyList()); return new ActivateResult(new RevisionId(applicationPackage.map(ApplicationPackage::hash) .orElse("0")), prepareResponse, applicationPackage.map(a -> a.zippedContent().length).orElse(0)); } private LockedApplication deleteRemovedDeployments(LockedApplication application) { List<Deployment> deploymentsToRemove = application.productionDeployments().values().stream() .filter(deployment -> ! application.deploymentSpec().includes(deployment.zone().environment(), Optional.of(deployment.zone().region()))) .collect(Collectors.toList()); if (deploymentsToRemove.isEmpty()) return application; if ( ! application.validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant())) throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application + " is deployed in " + deploymentsToRemove.stream() .map(deployment -> deployment.zone().region().value()) .collect(Collectors.joining(", ")) + ", but does not include " + (deploymentsToRemove.size() > 1 ? "these zones" : "this zone") + " in deployment.xml"); LockedApplication applicationWithRemoval = application; for (Deployment deployment : deploymentsToRemove) applicationWithRemoval = deactivate(applicationWithRemoval, deployment.zone()); return applicationWithRemoval; } private LockedApplication deleteUnreferencedDeploymentJobs(LockedApplication application) { for (DeploymentJobs.JobType job : application.deploymentJobs().jobStatus().keySet()) { Optional<ZoneId> zone = job.zone(controller.system()); if ( ! job.isProduction() || (zone.isPresent() && application.deploymentSpec().includes(zone.get().environment(), zone.map(ZoneId::region)))) continue; application = application.withoutDeploymentJob(job); } return application; } /** * Returns the existing triggering of the given type from this application, * or an incomplete one created in this method if none is present * This is needed (only) in the case where some external entity triggers a job. */ private JobStatus.JobRun getOrCreateTriggering(Application application, Version version, DeploymentJobs.JobType jobType) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return incompleteTriggeringEvent(version); if ( ! status.lastTriggered().isPresent()) return incompleteTriggeringEvent(version); return status.lastTriggered().get(); } private JobStatus.JobRun incompleteTriggeringEvent(Version version) { return new JobStatus.JobRun(-1, version, ApplicationVersion.unknown, false, "", clock.instant()); } private DeployOptions withVersion(Version version, DeployOptions options) { return new DeployOptions(options.screwdriverBuildJob, Optional.of(version), options.ignoreValidationErrors, options.deployCurrentVersion); } private ApplicationVersion toApplicationPackageRevision(ApplicationPackage applicationPackage, Optional<ScrewdriverBuildJob> buildJob) { if ( ! buildJob.isPresent()) return ApplicationVersion.from(applicationPackage.hash()); GitRevision gitRevision = buildJob.get().gitRevision; if (gitRevision.repository == null || gitRevision.branch == null || gitRevision.commit == null) return ApplicationVersion.from(applicationPackage.hash()); return ApplicationVersion.from(applicationPackage.hash(), new SourceRevision(gitRevision.repository.id(), gitRevision.branch.id(), gitRevision.commit.id())); } /** Register a DNS name for rotation */ private void registerRotationInDns(Rotation rotation, String dnsName) { try { Optional<Record> record = nameService.findRecord(Record.Type.CNAME, RecordName.from(dnsName)); RecordData rotationName = RecordData.fqdn(rotation.name()); if (record.isPresent()) { if ( ! record.get().data().equals(rotationName)) { nameService.updateRecord(record.get().id(), rotationName); log.info("Updated mapping for record ID " + record.get().id().asString() + ": '" + dnsName + "' -> '" + rotation.name() + "'"); } } else { RecordId id = nameService.createCname(RecordName.from(dnsName), rotationName); log.info("Registered mapping with record ID " + id.asString() + ": '" + dnsName + "' -> '" + rotation.name() + "'"); } } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to register CNAME", e); } } /** Returns the endpoints of the deployment, or empty if obtaining them failed */ public Optional<InstanceEndpoints> getDeploymentEndpoints(DeploymentId deploymentId) { try { List<RoutingEndpoint> endpoints = routingGenerator.endpoints(deploymentId); List<URI> endPointUrls = new ArrayList<>(); for (RoutingEndpoint endpoint : endpoints) { try { endPointUrls.add(new URI(endpoint.getEndpoint())); } catch (URISyntaxException e) { throw new RuntimeException("Routing generator returned illegal url's", e); } } return Optional.of(new InstanceEndpoints(endPointUrls)); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to get endpoint information for " + deploymentId + ": " + Exceptions.toMessageString(e)); return Optional.empty(); } } /** * Deletes the the given application. All known instances of the applications will be deleted, * including PR instances. * * @throws IllegalArgumentException if the application has deployments or the caller is not authorized * @throws NotExistsException if no instances of the application exist */ public void deleteApplication(ApplicationId applicationId, Optional<NToken> token) { List<ApplicationId> instances = controller.applications().asList(applicationId.tenant()) .stream() .map(Application::id) .filter(id -> id.application().equals(applicationId.application()) && id.tenant().equals(applicationId.tenant())) .collect(Collectors.toList()); if (instances.isEmpty()) { throw new NotExistsException("Could not delete application '" + applicationId + "': Application not found"); } instances.forEach(id -> lockOrThrow(id, application -> { if ( ! application.deployments().isEmpty()) throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments"); Tenant tenant = controller.tenants().tenant(new TenantId(id.tenant().value())).get(); if (tenant.isAthensTenant() && ! token.isPresent()) throw new IllegalArgumentException("Could not delete '" + application + "': No NToken provided"); if (id.instance().isDefault() && tenant.isAthensTenant()) { zmsClientFactory.createZmsClientWithAuthorizedServiceToken(token.get()) .deleteApplication(tenant.getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); } db.deleteApplication(id); log.info("Deleted " + application); })); } /** * Replace any previous version of this application by this instance * * @param application a locked application to store */ public void store(LockedApplication application) { db.store(application); } /** * Acquire a locked application to modify and store, if there is an application with the given id. * * @param applicationId ID of the application to lock and get. * @param action Function which acts on the locked application. */ public void lockIfPresent(ApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { get(applicationId).map(application -> new LockedApplication(application, lock)).ifPresent(action); } } /** * Acquire a locked application to modify and store, or throw an exception if no application has the given id. * * @param applicationId ID of the application to lock and require. * @param action Function which acts on the locked application. * @throws IllegalArgumentException when application does not exist. */ public void lockOrThrow(ApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { action.accept(new LockedApplication(require(applicationId), lock)); } } public void notifyJobCompletion(JobReport report) { if ( ! get(report.applicationId()).isPresent()) { log.log(Level.WARNING, "Ignoring completion of job of project '" + report.projectId() + "': Unknown application '" + report.applicationId() + "'"); return; } deploymentTrigger.triggerFromCompletion(report); } /** * Tells config server to schedule a restart of all nodes in this deployment * * @param hostname If non-empty, restart will only be scheduled for this host */ public void restart(DeploymentId deploymentId, Optional<Hostname> hostname) { try { configserverClient.restart(deploymentId, hostname); } catch (NoInstanceException e) { throw new IllegalArgumentException("Could not restart " + deploymentId + ": No such deployment"); } } /** Deactivate application in the given zone */ public void deactivate(Application application, ZoneId zone) { deactivate(application, zone, Optional.empty(), false); } /** Deactivate a known deployment of the given application */ public void deactivate(Application application, Deployment deployment, boolean requireThatDeploymentHasExpired) { deactivate(application, deployment.zone(), Optional.of(deployment), requireThatDeploymentHasExpired); } private void deactivate(Application application, ZoneId zone, Optional<Deployment> deployment, boolean requireThatDeploymentHasExpired) { if (requireThatDeploymentHasExpired && deployment.isPresent() && ! DeploymentExpirer.hasExpired(controller.zoneRegistry(), deployment.get(), clock.instant())) return; lockOrThrow(application.id(), lockedApplication -> store(deactivate(lockedApplication, zone))); } /** * Deactivates a locked application without storing it * * @return the application with the deployment in the given zone removed */ private LockedApplication deactivate(LockedApplication application, ZoneId zone) { try { configserverClient.deactivate(new DeploymentId(application.id(), zone)); } catch (NoInstanceException ignored) { } return application.withoutDeploymentIn(zone); } public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; } private ApplicationId dashToUnderscore(ApplicationId id) { return ApplicationId.from(id.tenant().value(), id.application().value().replaceAll("-", "_"), id.instance().value()); } public ConfigServerClient configserverClient() { return configserverClient; } /** * Returns a lock which provides exclusive rights to changing this application. * Any operation which stores an application need to first acquire this lock, then read, modify * and store the application, and finally release (close) the lock. */ Lock lock(ApplicationId application) { return curator.lock(application, Duration.ofMinutes(10)); } /** Returns whether a direct deployment to given zone is allowed */ private static boolean canDeployDirectlyTo(ZoneId zone, DeployOptions options) { return ! options.screwdriverBuildJob.isPresent() || options.screwdriverBuildJob.get().screwdriverId == null || zone.environment().isManuallyDeployed(); } /** Returns whether artifact for given version number is available in artifact repository */ private static boolean canDownloadArtifact(ApplicationVersion applicationVersion) { return applicationVersion.buildNumber().isPresent() && applicationVersion.source().isPresent(); } /** Returns whether component has reported a version number that is availabe in artifact repository */ private static boolean canDownloadReportedApplicationVersion(Application application) { return Optional.ofNullable(application.deploymentJobs().jobStatus().get(DeploymentJobs.JobType.component)) .flatMap(JobStatus::lastSuccess) .map(JobStatus.JobRun::applicationVersion) .filter(ApplicationController::canDownloadArtifact) .isPresent(); } /** Verify that each of the production zones listed in the deployment spec exist in this system. */ private void validate(DeploymentSpec deploymentSpec) { deploymentSpec.zones().stream() .filter(zone -> zone.environment() == Environment.prod) .forEach(zone -> { if ( ! controller.zoneRegistry().hasZone(ZoneId.from(zone.environment(), zone.region().orElse(null)))) throw new IllegalArgumentException("Zone " + zone + " in deployment spec was not found in this system!"); }); } public RotationRepository rotationRepository() { return rotationRepository; } }
class ApplicationController { private static final Logger log = Logger.getLogger(ApplicationController.class.getName()); /** The controller owning this */ private final Controller controller; /** For permanent storage */ private final ControllerDb db; /** For working memory storage and sharing between controllers */ private final CuratorDb curator; private final ArtifactRepository artifactRepository; private final RotationRepository rotationRepository; private final AthenzClientFactory zmsClientFactory; private final NameService nameService; private final ConfigServerClient configserverClient; private final RoutingGenerator routingGenerator; private final Clock clock; private final DeploymentTrigger deploymentTrigger; ApplicationController(Controller controller, ControllerDb db, CuratorDb curator, AthenzClientFactory zmsClientFactory, RotationsConfig rotationsConfig, NameService nameService, ConfigServerClient configserverClient, ArtifactRepository artifactRepository, RoutingGenerator routingGenerator, Clock clock) { this.controller = controller; this.db = db; this.curator = curator; this.zmsClientFactory = zmsClientFactory; this.nameService = nameService; this.configserverClient = configserverClient; this.routingGenerator = routingGenerator; this.clock = clock; this.artifactRepository = artifactRepository; this.rotationRepository = new RotationRepository(rotationsConfig, this, curator); this.deploymentTrigger = new DeploymentTrigger(controller, curator, clock); for (Application application : db.listApplications()) { lockIfPresent(application.id(), this::store); } } /** Returns the application with the given id, or null if it is not present */ public Optional<Application> get(ApplicationId id) { return db.getApplication(id); } /** * Returns the application with the given id * * @throws IllegalArgumentException if it does not exist */ public Application require(ApplicationId id) { return get(id).orElseThrow(() -> new IllegalArgumentException(id + " not found")); } /** Returns a snapshot of all applications */ public List<Application> asList() { return db.listApplications(); } /** Returns all applications of a tenant */ public List<Application> asList(TenantName tenant) { return db.listApplications(new TenantId(tenant.value())); } /** * Set the rotations marked as 'global' either 'in' or 'out of' service. * * @return The canonical endpoint altered if any * @throws IOException if rotation status cannot be updated */ public List<String> setGlobalRotationStatus(DeploymentId deploymentId, EndpointStatus status) throws IOException { List<String> rotations = new ArrayList<>(); Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId); if (endpoint.isPresent()) { configserverClient.setGlobalRotationStatus(deploymentId, endpoint.get(), status); rotations.add(endpoint.get()); } return rotations; } /** * Get the endpoint status for the global endpoint of this application * * @return Map between the endpoint and the rotation status * @throws IOException if global rotation status cannot be determined */ public Map<String, EndpointStatus> getGlobalRotationStatus(DeploymentId deploymentId) throws IOException { Map<String, EndpointStatus> result = new HashMap<>(); Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId); if (endpoint.isPresent()) { EndpointStatus status = configserverClient.getGlobalRotationStatus(deploymentId, endpoint.get()); result.put(endpoint.get(), status); } return result; } /** * Global rotations (plural as we can have aliases) map to exactly one service endpoint. * This method finds that one service endpoint and strips the URI part that * the routingGenerator is wrapping around the endpoint. * * @param deploymentId The deployment to retrieve global service endpoint for * @return Empty if no global endpoint exist, otherwise the service endpoint ([clustername.]app.tenant.region.env) */ Optional<String> getCanonicalGlobalEndpoint(DeploymentId deploymentId) throws IOException { Map<String, RoutingEndpoint> hostToGlobalEndpoint = new HashMap<>(); Map<String, String> hostToCanonicalEndpoint = new HashMap<>(); for (RoutingEndpoint endpoint : routingGenerator.endpoints(deploymentId)) { try { URI uri = new URI(endpoint.getEndpoint()); String serviceEndpoint = uri.getHost(); if (serviceEndpoint == null) { throw new IOException("Unexpected endpoints returned from the Routing Generator"); } String canonicalEndpoint = serviceEndpoint.replaceAll(".vespa.yahooapis.com", ""); String hostname = endpoint.getHostname(); if (hostname != null) { if (endpoint.isGlobal()) { hostToGlobalEndpoint.put(hostname, endpoint); } else { hostToCanonicalEndpoint.put(hostname, canonicalEndpoint); } if (hostToGlobalEndpoint.containsKey(hostname) && hostToCanonicalEndpoint.containsKey(hostname)) { return Optional.of(hostToCanonicalEndpoint.get(hostname)); } } } catch (URISyntaxException use) { throw new IOException(use); } } return Optional.empty(); } /** * Creates a new application for an existing tenant. * * @throws IllegalArgumentException if the application already exists */ /** Deploys an application. If the application does not exist it is created. */ public ActivateResult deployApplication(ApplicationId applicationId, ZoneId zone, Optional<ApplicationPackage> applicationPackageFromDeployer, DeployOptions options) { try (Lock lock = lock(applicationId)) { LockedApplication application = get(applicationId) .map(app -> new LockedApplication(app, lock)) .orElseGet(() -> new LockedApplication(createApplication(applicationId, Optional.empty()), lock)); Version version; if (options.deployCurrentVersion) { version = application.versionIn(zone, controller); } else if (canDeployDirectlyTo(zone, options)) { version = options.vespaVersion.map(Version::new).orElse(controller.systemVersion()); } else if (! application.change().isPresent() && ! zone.environment().isManuallyDeployed()) { return unexpectedDeployment(applicationId, zone, applicationPackageFromDeployer); } else { version = application.deployVersionIn(zone, controller); } ApplicationVersion applicationVersion; ApplicationPackage applicationPackage; Optional<DeploymentJobs.JobType> job = DeploymentJobs.JobType.from(controller.system(), zone); if (canDownloadReportedApplicationVersion(application) && !canDeployDirectlyTo(zone, options)) { if (!job.isPresent()) { throw new IllegalArgumentException("Cannot determine job for zone " + zone); } applicationVersion = application.deployApplicationVersion(job.get(), controller, options.deployCurrentVersion) .orElseThrow(() -> new IllegalArgumentException("Cannot determine application version for " + applicationId)); if (canDownloadArtifact(applicationVersion)) { applicationPackage = new ApplicationPackage( artifactRepository.getApplicationPackage(applicationId, applicationVersion.id()) ); } else { applicationPackage = applicationPackageFromDeployer.orElseThrow( () -> new IllegalArgumentException("Application package with version " + applicationVersion.id() + " cannot be downloaded, and " + "no package was given by deployer")); } } else { applicationPackage = applicationPackageFromDeployer.orElseThrow( () -> new IllegalArgumentException("Application package must be given as new application " + "version is not known for " + applicationId) ); applicationVersion = toApplicationPackageRevision(applicationPackage, options.screwdriverBuildJob); } validate(applicationPackage.deploymentSpec()); if (!options.deployCurrentVersion && !canDownloadReportedApplicationVersion(application)) { if (application.change().application().isPresent()) { application = application.withChange(application.change().with(applicationVersion)); } if (!canDeployDirectlyTo(zone, options) && job.isPresent()) { JobStatus.JobRun triggering = getOrCreateTriggering(application, version, job.get()); application = application.withJobTriggering(job.get(), application.change(), triggering.at(), version, applicationVersion, triggering.reason()); } } if (!options.deployCurrentVersion) { application = application.with(applicationPackage.deploymentSpec()); application = application.with(applicationPackage.validationOverrides()); application = deleteRemovedDeployments(application); application = deleteUnreferencedDeploymentJobs(application); store(application); } if (!canDeployDirectlyTo(zone, options)) { if (!application.deploymentJobs().isDeployableTo(zone.environment(), application.change())) { throw new IllegalArgumentException("Rejecting deployment of " + application + " to " + zone + " as " + application.change() + " is not tested"); } Deployment existingDeployment = application.deployments().get(zone); if (zone.environment().isProduction() && existingDeployment != null && existingDeployment.version().isAfter(version)) { throw new IllegalArgumentException("Rejecting deployment of " + application + " to " + zone + " as the requested version " + version + " is older than" + " the current version " + existingDeployment.version()); } } application = withRotation(application, zone); Set<String> rotationNames = new HashSet<>(); Set<String> cnames = new HashSet<>(); application.rotation().ifPresent(applicationRotation -> { rotationNames.add(applicationRotation.id().asString()); cnames.add(applicationRotation.dnsName()); cnames.add(applicationRotation.secureDnsName()); }); options = withVersion(version, options); ConfigServerClient.PreparedApplication preparedApplication = configserverClient.prepare(new DeploymentId(applicationId, zone), options, cnames, rotationNames, applicationPackage.zippedContent()); preparedApplication.activate(); application = application.withNewDeployment(zone, applicationVersion, version, clock.instant()); store(application); return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse(), applicationPackage.zippedContent().length); } } /** Makes sure the application has a global rotation, if eligible. */ private LockedApplication withRotation(LockedApplication application, ZoneId zone) { if (zone.environment() == Environment.prod && application.deploymentSpec().globalServiceId().isPresent()) { try (RotationLock rotationLock = rotationRepository.lock()) { Rotation rotation = rotationRepository.getRotation(application, rotationLock); application = application.with(rotation.id()); store(application); registerRotationInDns(rotation, application.rotation().get().dnsName()); registerRotationInDns(rotation, application.rotation().get().secureDnsName()); } } return application; } private ActivateResult unexpectedDeployment(ApplicationId applicationId, ZoneId zone, Optional<ApplicationPackage> applicationPackage) { Log logEntry = new Log(); logEntry.level = "WARNING"; logEntry.time = clock.instant().toEpochMilli(); logEntry.message = "Ignoring deployment of " + require(applicationId) + " to " + zone + " as a deployment is not currently expected"; PrepareResponse prepareResponse = new PrepareResponse(); prepareResponse.log = Collections.singletonList(logEntry); prepareResponse.configChangeActions = new ConfigChangeActions(Collections.emptyList(), Collections.emptyList()); return new ActivateResult(new RevisionId(applicationPackage.map(ApplicationPackage::hash) .orElse("0")), prepareResponse, applicationPackage.map(a -> a.zippedContent().length).orElse(0)); } private LockedApplication deleteRemovedDeployments(LockedApplication application) { List<Deployment> deploymentsToRemove = application.productionDeployments().values().stream() .filter(deployment -> ! application.deploymentSpec().includes(deployment.zone().environment(), Optional.of(deployment.zone().region()))) .collect(Collectors.toList()); if (deploymentsToRemove.isEmpty()) return application; if ( ! application.validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant())) throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application + " is deployed in " + deploymentsToRemove.stream() .map(deployment -> deployment.zone().region().value()) .collect(Collectors.joining(", ")) + ", but does not include " + (deploymentsToRemove.size() > 1 ? "these zones" : "this zone") + " in deployment.xml"); LockedApplication applicationWithRemoval = application; for (Deployment deployment : deploymentsToRemove) applicationWithRemoval = deactivate(applicationWithRemoval, deployment.zone()); return applicationWithRemoval; } private LockedApplication deleteUnreferencedDeploymentJobs(LockedApplication application) { for (DeploymentJobs.JobType job : application.deploymentJobs().jobStatus().keySet()) { Optional<ZoneId> zone = job.zone(controller.system()); if ( ! job.isProduction() || (zone.isPresent() && application.deploymentSpec().includes(zone.get().environment(), zone.map(ZoneId::region)))) continue; application = application.withoutDeploymentJob(job); } return application; } /** * Returns the existing triggering of the given type from this application, * or an incomplete one created in this method if none is present * This is needed (only) in the case where some external entity triggers a job. */ private JobStatus.JobRun getOrCreateTriggering(Application application, Version version, DeploymentJobs.JobType jobType) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return incompleteTriggeringEvent(version); if ( ! status.lastTriggered().isPresent()) return incompleteTriggeringEvent(version); return status.lastTriggered().get(); } private JobStatus.JobRun incompleteTriggeringEvent(Version version) { return new JobStatus.JobRun(-1, version, ApplicationVersion.unknown, false, "", clock.instant()); } private DeployOptions withVersion(Version version, DeployOptions options) { return new DeployOptions(options.screwdriverBuildJob, Optional.of(version), options.ignoreValidationErrors, options.deployCurrentVersion); } private ApplicationVersion toApplicationPackageRevision(ApplicationPackage applicationPackage, Optional<ScrewdriverBuildJob> buildJob) { if ( ! buildJob.isPresent()) return ApplicationVersion.from(applicationPackage.hash()); GitRevision gitRevision = buildJob.get().gitRevision; if (gitRevision.repository == null || gitRevision.branch == null || gitRevision.commit == null) return ApplicationVersion.from(applicationPackage.hash()); return ApplicationVersion.from(applicationPackage.hash(), new SourceRevision(gitRevision.repository.id(), gitRevision.branch.id(), gitRevision.commit.id())); } /** Register a DNS name for rotation */ private void registerRotationInDns(Rotation rotation, String dnsName) { try { Optional<Record> record = nameService.findRecord(Record.Type.CNAME, RecordName.from(dnsName)); RecordData rotationName = RecordData.fqdn(rotation.name()); if (record.isPresent()) { if ( ! record.get().data().equals(rotationName)) { nameService.updateRecord(record.get().id(), rotationName); log.info("Updated mapping for record ID " + record.get().id().asString() + ": '" + dnsName + "' -> '" + rotation.name() + "'"); } } else { RecordId id = nameService.createCname(RecordName.from(dnsName), rotationName); log.info("Registered mapping with record ID " + id.asString() + ": '" + dnsName + "' -> '" + rotation.name() + "'"); } } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to register CNAME", e); } } /** Returns the endpoints of the deployment, or empty if obtaining them failed */ public Optional<InstanceEndpoints> getDeploymentEndpoints(DeploymentId deploymentId) { try { List<RoutingEndpoint> endpoints = routingGenerator.endpoints(deploymentId); List<URI> endPointUrls = new ArrayList<>(); for (RoutingEndpoint endpoint : endpoints) { try { endPointUrls.add(new URI(endpoint.getEndpoint())); } catch (URISyntaxException e) { throw new RuntimeException("Routing generator returned illegal url's", e); } } return Optional.of(new InstanceEndpoints(endPointUrls)); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to get endpoint information for " + deploymentId + ": " + Exceptions.toMessageString(e)); return Optional.empty(); } } /** * Deletes the the given application. All known instances of the applications will be deleted, * including PR instances. * * @throws IllegalArgumentException if the application has deployments or the caller is not authorized * @throws NotExistsException if no instances of the application exist */ public void deleteApplication(ApplicationId applicationId, Optional<NToken> token) { List<ApplicationId> instances = controller.applications().asList(applicationId.tenant()) .stream() .map(Application::id) .filter(id -> id.application().equals(applicationId.application()) && id.tenant().equals(applicationId.tenant())) .collect(Collectors.toList()); if (instances.isEmpty()) { throw new NotExistsException("Could not delete application '" + applicationId + "': Application not found"); } instances.forEach(id -> lockOrThrow(id, application -> { if ( ! application.deployments().isEmpty()) throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments"); Tenant tenant = controller.tenants().tenant(new TenantId(id.tenant().value())).get(); if (tenant.isAthensTenant() && ! token.isPresent()) throw new IllegalArgumentException("Could not delete '" + application + "': No NToken provided"); if (id.instance().isDefault() && tenant.isAthensTenant()) { zmsClientFactory.createZmsClientWithAuthorizedServiceToken(token.get()) .deleteApplication(tenant.getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); } db.deleteApplication(id); log.info("Deleted " + application); })); } /** * Replace any previous version of this application by this instance * * @param application a locked application to store */ public void store(LockedApplication application) { db.store(application); } /** * Acquire a locked application to modify and store, if there is an application with the given id. * * @param applicationId ID of the application to lock and get. * @param action Function which acts on the locked application. */ public void lockIfPresent(ApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { get(applicationId).map(application -> new LockedApplication(application, lock)).ifPresent(action); } } /** * Acquire a locked application to modify and store, or throw an exception if no application has the given id. * * @param applicationId ID of the application to lock and require. * @param action Function which acts on the locked application. * @throws IllegalArgumentException when application does not exist. */ public void lockOrThrow(ApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { action.accept(new LockedApplication(require(applicationId), lock)); } } public void notifyJobCompletion(JobReport report) { if ( ! get(report.applicationId()).isPresent()) { log.log(Level.WARNING, "Ignoring completion of job of project '" + report.projectId() + "': Unknown application '" + report.applicationId() + "'"); return; } deploymentTrigger.triggerFromCompletion(report); } /** * Tells config server to schedule a restart of all nodes in this deployment * * @param hostname If non-empty, restart will only be scheduled for this host */ public void restart(DeploymentId deploymentId, Optional<Hostname> hostname) { try { configserverClient.restart(deploymentId, hostname); } catch (NoInstanceException e) { throw new IllegalArgumentException("Could not restart " + deploymentId + ": No such deployment"); } } /** Deactivate application in the given zone */ public void deactivate(Application application, ZoneId zone) { deactivate(application, zone, Optional.empty(), false); } /** Deactivate a known deployment of the given application */ public void deactivate(Application application, Deployment deployment, boolean requireThatDeploymentHasExpired) { deactivate(application, deployment.zone(), Optional.of(deployment), requireThatDeploymentHasExpired); } private void deactivate(Application application, ZoneId zone, Optional<Deployment> deployment, boolean requireThatDeploymentHasExpired) { if (requireThatDeploymentHasExpired && deployment.isPresent() && ! DeploymentExpirer.hasExpired(controller.zoneRegistry(), deployment.get(), clock.instant())) return; lockOrThrow(application.id(), lockedApplication -> store(deactivate(lockedApplication, zone))); } /** * Deactivates a locked application without storing it * * @return the application with the deployment in the given zone removed */ private LockedApplication deactivate(LockedApplication application, ZoneId zone) { try { configserverClient.deactivate(new DeploymentId(application.id(), zone)); } catch (NoInstanceException ignored) { } return application.withoutDeploymentIn(zone); } public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; } private ApplicationId dashToUnderscore(ApplicationId id) { return ApplicationId.from(id.tenant().value(), id.application().value().replaceAll("-", "_"), id.instance().value()); } public ConfigServerClient configserverClient() { return configserverClient; } /** * Returns a lock which provides exclusive rights to changing this application. * Any operation which stores an application need to first acquire this lock, then read, modify * and store the application, and finally release (close) the lock. */ Lock lock(ApplicationId application) { return curator.lock(application, Duration.ofMinutes(10)); } /** Returns whether a direct deployment to given zone is allowed */ private static boolean canDeployDirectlyTo(ZoneId zone, DeployOptions options) { return ! options.screwdriverBuildJob.isPresent() || options.screwdriverBuildJob.get().screwdriverId == null || zone.environment().isManuallyDeployed(); } /** Returns whether artifact for given version number is available in artifact repository */ private static boolean canDownloadArtifact(ApplicationVersion applicationVersion) { return applicationVersion.buildNumber().isPresent() && applicationVersion.source().isPresent(); } /** Returns whether component has reported a version number that is availabe in artifact repository */ private static boolean canDownloadReportedApplicationVersion(Application application) { return Optional.ofNullable(application.deploymentJobs().jobStatus().get(DeploymentJobs.JobType.component)) .flatMap(JobStatus::lastSuccess) .map(JobStatus.JobRun::applicationVersion) .filter(ApplicationController::canDownloadArtifact) .isPresent(); } /** Verify that each of the production zones listed in the deployment spec exist in this system. */ private void validate(DeploymentSpec deploymentSpec) { deploymentSpec.zones().stream() .filter(zone -> zone.environment() == Environment.prod) .forEach(zone -> { if ( ! controller.zoneRegistry().hasZone(ZoneId.from(zone.environment(), zone.region().orElse(null)))) throw new IllegalArgumentException("Zone " + zone + " in deployment spec was not found in this system!"); }); } public RotationRepository rotationRepository() { return rotationRepository; } }
Please remove the deleteApplication call to Athenz. It's a workaround for an earlier Athenz issue that should be fixed now.
public Application createApplication(ApplicationId id, Optional<NToken> token) { if ( ! (id.instance().value().equals("default") || id.instance().value().matches("\\d+"))) throw new UnsupportedOperationException("Only the instance names 'default' and names which are just the PR number are supported at the moment"); try (Lock lock = lock(id)) { com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value()); Optional<Tenant> tenant = controller.tenants().tenant(new TenantId(id.tenant().value())); if ( ! tenant.isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist"); if (get(id).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application already exists"); if (get(dashToUnderscore(id)).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists"); if (tenant.get().isAthensTenant()) { if ( ! token.isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': No NToken provided"); ZmsClient zmsClient = zmsClientFactory.createZmsClientWithAuthorizedServiceToken(token.get()); try { zmsClient.deleteApplication(tenant.get().getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); } catch (ZmsException ignored) { } zmsClient.addApplication(tenant.get().getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); } LockedApplication application = new LockedApplication(new Application(id), lock); store(application); log.info("Created " + application); return application; } }
zmsClient.deleteApplication(tenant.get().getAthensDomain().get(),
public Application createApplication(ApplicationId id, Optional<NToken> token) { if ( ! (id.instance().isDefault() || id.instance().value().matches("\\d+"))) throw new UnsupportedOperationException("Only the instance names 'default' and names which are just the PR number are supported at the moment"); try (Lock lock = lock(id)) { if (asList(id.tenant()).stream().noneMatch(application -> application.id().application().equals(id.application()))) com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value()); Optional<Tenant> tenant = controller.tenants().tenant(new TenantId(id.tenant().value())); if ( ! tenant.isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist"); if (get(id).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application already exists"); if (get(dashToUnderscore(id)).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists"); if (id.instance().isDefault() && tenant.get().isAthensTenant()) { if ( ! token.isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': No NToken provided"); ZmsClient zmsClient = zmsClientFactory.createZmsClientWithAuthorizedServiceToken(token.get()); zmsClient.addApplication(tenant.get().getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); } LockedApplication application = new LockedApplication(new Application(id), lock); store(application); log.info("Created " + application); return application; } }
class ApplicationController { private static final Logger log = Logger.getLogger(ApplicationController.class.getName()); /** The controller owning this */ private final Controller controller; /** For permanent storage */ private final ControllerDb db; /** For working memory storage and sharing between controllers */ private final CuratorDb curator; private final ArtifactRepository artifactRepository; private final RotationRepository rotationRepository; private final AthenzClientFactory zmsClientFactory; private final NameService nameService; private final ConfigServerClient configserverClient; private final RoutingGenerator routingGenerator; private final Clock clock; private final DeploymentTrigger deploymentTrigger; ApplicationController(Controller controller, ControllerDb db, CuratorDb curator, AthenzClientFactory zmsClientFactory, RotationsConfig rotationsConfig, NameService nameService, ConfigServerClient configserverClient, ArtifactRepository artifactRepository, RoutingGenerator routingGenerator, Clock clock) { this.controller = controller; this.db = db; this.curator = curator; this.zmsClientFactory = zmsClientFactory; this.nameService = nameService; this.configserverClient = configserverClient; this.routingGenerator = routingGenerator; this.clock = clock; this.artifactRepository = artifactRepository; this.rotationRepository = new RotationRepository(rotationsConfig, this, curator); this.deploymentTrigger = new DeploymentTrigger(controller, curator, clock); for (Application application : db.listApplications()) { lockIfPresent(application.id(), this::store); } } /** Returns the application with the given id, or null if it is not present */ public Optional<Application> get(ApplicationId id) { return db.getApplication(id); } /** * Returns the application with the given id * * @throws IllegalArgumentException if it does not exist */ public Application require(ApplicationId id) { return get(id).orElseThrow(() -> new IllegalArgumentException(id + " not found")); } /** Returns a snapshot of all applications */ public List<Application> asList() { return db.listApplications(); } /** Returns all applications of a tenant */ public List<Application> asList(TenantName tenant) { return db.listApplications(new TenantId(tenant.value())); } /** * Set the rotations marked as 'global' either 'in' or 'out of' service. * * @return The canonical endpoint altered if any * @throws IOException if rotation status cannot be updated */ public List<String> setGlobalRotationStatus(DeploymentId deploymentId, EndpointStatus status) throws IOException { List<String> rotations = new ArrayList<>(); Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId); if (endpoint.isPresent()) { configserverClient.setGlobalRotationStatus(deploymentId, endpoint.get(), status); rotations.add(endpoint.get()); } return rotations; } /** * Get the endpoint status for the global endpoint of this application * * @return Map between the endpoint and the rotation status * @throws IOException if global rotation status cannot be determined */ public Map<String, EndpointStatus> getGlobalRotationStatus(DeploymentId deploymentId) throws IOException { Map<String, EndpointStatus> result = new HashMap<>(); Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId); if (endpoint.isPresent()) { EndpointStatus status = configserverClient.getGlobalRotationStatus(deploymentId, endpoint.get()); result.put(endpoint.get(), status); } return result; } /** * Global rotations (plural as we can have aliases) map to exactly one service endpoint. * This method finds that one service endpoint and strips the URI part that * the routingGenerator is wrapping around the endpoint. * * @param deploymentId The deployment to retrieve global service endpoint for * @return Empty if no global endpoint exist, otherwise the service endpoint ([clustername.]app.tenant.region.env) */ Optional<String> getCanonicalGlobalEndpoint(DeploymentId deploymentId) throws IOException { Map<String, RoutingEndpoint> hostToGlobalEndpoint = new HashMap<>(); Map<String, String> hostToCanonicalEndpoint = new HashMap<>(); for (RoutingEndpoint endpoint : routingGenerator.endpoints(deploymentId)) { try { URI uri = new URI(endpoint.getEndpoint()); String serviceEndpoint = uri.getHost(); if (serviceEndpoint == null) { throw new IOException("Unexpected endpoints returned from the Routing Generator"); } String canonicalEndpoint = serviceEndpoint.replaceAll(".vespa.yahooapis.com", ""); String hostname = endpoint.getHostname(); if (hostname != null) { if (endpoint.isGlobal()) { hostToGlobalEndpoint.put(hostname, endpoint); } else { hostToCanonicalEndpoint.put(hostname, canonicalEndpoint); } if (hostToGlobalEndpoint.containsKey(hostname) && hostToCanonicalEndpoint.containsKey(hostname)) { return Optional.of(hostToCanonicalEndpoint.get(hostname)); } } } catch (URISyntaxException use) { throw new IOException(use); } } return Optional.empty(); } /** * Creates a new application for an existing tenant. * * @throws IllegalArgumentException if the application already exists */ /** Deploys an application. If the application does not exist it is created. */ public ActivateResult deployApplication(ApplicationId applicationId, ZoneId zone, Optional<ApplicationPackage> applicationPackageFromDeployer, DeployOptions options, Optional<NToken> token) { try (Lock lock = lock(applicationId)) { LockedApplication application = get(applicationId) .map(app -> new LockedApplication(app, lock)) .orElseGet(() -> new LockedApplication(createApplication(applicationId, token), lock)); Version version; if (options.deployCurrentVersion) { version = application.versionIn(zone, controller); } else if (canDeployDirectlyTo(zone, options)) { version = options.vespaVersion.map(Version::new).orElse(controller.systemVersion()); } else if (! application.change().isPresent() && ! zone.environment().isManuallyDeployed()) { return unexpectedDeployment(applicationId, zone, applicationPackageFromDeployer); } else { version = application.deployVersionIn(zone, controller); } ApplicationVersion applicationVersion; ApplicationPackage applicationPackage; Optional<DeploymentJobs.JobType> job = DeploymentJobs.JobType.from(controller.system(), zone); if (canDownloadReportedApplicationVersion(application) && !canDeployDirectlyTo(zone, options)) { if (!job.isPresent()) { throw new IllegalArgumentException("Cannot determine job for zone " + zone); } applicationVersion = application.deployApplicationVersion(job.get(), controller, options.deployCurrentVersion) .orElseThrow(() -> new IllegalArgumentException("Cannot determine application version for " + applicationId)); if (canDownloadArtifact(applicationVersion)) { applicationPackage = new ApplicationPackage( artifactRepository.getApplicationPackage(applicationId, applicationVersion.id()) ); } else { applicationPackage = applicationPackageFromDeployer.orElseThrow( () -> new IllegalArgumentException("Application package with version " + applicationVersion.id() + " cannot be downloaded, and " + "no package was given by deployer")); } } else { applicationPackage = applicationPackageFromDeployer.orElseThrow( () -> new IllegalArgumentException("Application package must be given as new application " + "version is not known for " + applicationId) ); applicationVersion = toApplicationPackageRevision(applicationPackage, options.screwdriverBuildJob); } validate(applicationPackage.deploymentSpec()); if (!options.deployCurrentVersion && !canDownloadReportedApplicationVersion(application)) { if (application.change().application().isPresent()) { application = application.withChange(application.change().with(applicationVersion)); } if (!canDeployDirectlyTo(zone, options) && job.isPresent()) { JobStatus.JobRun triggering = getOrCreateTriggering(application, version, job.get()); application = application.withJobTriggering(job.get(), application.change(), triggering.at(), version, applicationVersion, triggering.reason()); } } if (!options.deployCurrentVersion) { application = application.with(applicationPackage.deploymentSpec()); application = application.with(applicationPackage.validationOverrides()); application = deleteRemovedDeployments(application); application = deleteUnreferencedDeploymentJobs(application); store(application); } if (!canDeployDirectlyTo(zone, options)) { if (!application.deploymentJobs().isDeployableTo(zone.environment(), application.change())) { throw new IllegalArgumentException("Rejecting deployment of " + application + " to " + zone + " as " + application.change() + " is not tested"); } Deployment existingDeployment = application.deployments().get(zone); if (zone.environment().isProduction() && existingDeployment != null && existingDeployment.version().isAfter(version)) { throw new IllegalArgumentException("Rejecting deployment of " + application + " to " + zone + " as the requested version " + version + " is older than" + " the current version " + existingDeployment.version()); } } application = withRotation(application, zone); Set<String> rotationNames = new HashSet<>(); Set<String> cnames = new HashSet<>(); application.rotation().ifPresent(applicationRotation -> { rotationNames.add(applicationRotation.id().asString()); cnames.add(applicationRotation.dnsName()); cnames.add(applicationRotation.secureDnsName()); }); options = withVersion(version, options); ConfigServerClient.PreparedApplication preparedApplication = configserverClient.prepare(new DeploymentId(applicationId, zone), options, cnames, rotationNames, applicationPackage.zippedContent()); preparedApplication.activate(); application = application.withNewDeployment(zone, applicationVersion, version, clock.instant()); store(application); return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse(), applicationPackage.zippedContent().length); } } /** Makes sure the application has a global rotation, if eligible. */ private LockedApplication withRotation(LockedApplication application, ZoneId zone) { if (zone.environment() == Environment.prod && application.deploymentSpec().globalServiceId().isPresent()) { try (RotationLock rotationLock = rotationRepository.lock()) { Rotation rotation = rotationRepository.getRotation(application, rotationLock); application = application.with(rotation.id()); store(application); registerRotationInDns(rotation, application.rotation().get().dnsName()); registerRotationInDns(rotation, application.rotation().get().secureDnsName()); } } return application; } private ActivateResult unexpectedDeployment(ApplicationId applicationId, ZoneId zone, Optional<ApplicationPackage> applicationPackage) { Log logEntry = new Log(); logEntry.level = "WARNING"; logEntry.time = clock.instant().toEpochMilli(); logEntry.message = "Ignoring deployment of " + require(applicationId) + " to " + zone + " as a deployment is not currently expected"; PrepareResponse prepareResponse = new PrepareResponse(); prepareResponse.log = Collections.singletonList(logEntry); prepareResponse.configChangeActions = new ConfigChangeActions(Collections.emptyList(), Collections.emptyList()); return new ActivateResult(new RevisionId(applicationPackage.map(ApplicationPackage::hash) .orElse("0")), prepareResponse, applicationPackage.map(a -> a.zippedContent().length).orElse(0)); } private LockedApplication deleteRemovedDeployments(LockedApplication application) { List<Deployment> deploymentsToRemove = application.productionDeployments().values().stream() .filter(deployment -> ! application.deploymentSpec().includes(deployment.zone().environment(), Optional.of(deployment.zone().region()))) .collect(Collectors.toList()); if (deploymentsToRemove.isEmpty()) return application; if ( ! application.validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant())) throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application + " is deployed in " + deploymentsToRemove.stream() .map(deployment -> deployment.zone().region().value()) .collect(Collectors.joining(", ")) + ", but does not include " + (deploymentsToRemove.size() > 1 ? "these zones" : "this zone") + " in deployment.xml"); LockedApplication applicationWithRemoval = application; for (Deployment deployment : deploymentsToRemove) applicationWithRemoval = deactivate(applicationWithRemoval, deployment.zone()); return applicationWithRemoval; } private LockedApplication deleteUnreferencedDeploymentJobs(LockedApplication application) { for (DeploymentJobs.JobType job : application.deploymentJobs().jobStatus().keySet()) { Optional<ZoneId> zone = job.zone(controller.system()); if ( ! job.isProduction() || (zone.isPresent() && application.deploymentSpec().includes(zone.get().environment(), zone.map(ZoneId::region)))) continue; application = application.withoutDeploymentJob(job); } return application; } /** * Returns the existing triggering of the given type from this application, * or an incomplete one created in this method if none is present * This is needed (only) in the case where some external entity triggers a job. */ private JobStatus.JobRun getOrCreateTriggering(Application application, Version version, DeploymentJobs.JobType jobType) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return incompleteTriggeringEvent(version); if ( ! status.lastTriggered().isPresent()) return incompleteTriggeringEvent(version); return status.lastTriggered().get(); } private JobStatus.JobRun incompleteTriggeringEvent(Version version) { return new JobStatus.JobRun(-1, version, ApplicationVersion.unknown, false, "", clock.instant()); } private DeployOptions withVersion(Version version, DeployOptions options) { return new DeployOptions(options.screwdriverBuildJob, Optional.of(version), options.ignoreValidationErrors, options.deployCurrentVersion); } private ApplicationVersion toApplicationPackageRevision(ApplicationPackage applicationPackage, Optional<ScrewdriverBuildJob> buildJob) { if ( ! buildJob.isPresent()) return ApplicationVersion.from(applicationPackage.hash()); GitRevision gitRevision = buildJob.get().gitRevision; if (gitRevision.repository == null || gitRevision.branch == null || gitRevision.commit == null) return ApplicationVersion.from(applicationPackage.hash()); return ApplicationVersion.from(applicationPackage.hash(), new SourceRevision(gitRevision.repository.id(), gitRevision.branch.id(), gitRevision.commit.id())); } /** Register a DNS name for rotation */ private void registerRotationInDns(Rotation rotation, String dnsName) { try { Optional<Record> record = nameService.findRecord(Record.Type.CNAME, RecordName.from(dnsName)); RecordData rotationName = RecordData.fqdn(rotation.name()); if (record.isPresent()) { if ( ! record.get().data().equals(rotationName)) { nameService.updateRecord(record.get().id(), rotationName); log.info("Updated mapping for record ID " + record.get().id().asString() + ": '" + dnsName + "' -> '" + rotation.name() + "'"); } } else { RecordId id = nameService.createCname(RecordName.from(dnsName), rotationName); log.info("Registered mapping with record ID " + id.asString() + ": '" + dnsName + "' -> '" + rotation.name() + "'"); } } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to register CNAME", e); } } /** Returns the endpoints of the deployment, or empty if obtaining them failed */ public Optional<InstanceEndpoints> getDeploymentEndpoints(DeploymentId deploymentId) { try { List<RoutingEndpoint> endpoints = routingGenerator.endpoints(deploymentId); List<URI> endPointUrls = new ArrayList<>(); for (RoutingEndpoint endpoint : endpoints) { try { endPointUrls.add(new URI(endpoint.getEndpoint())); } catch (URISyntaxException e) { throw new RuntimeException("Routing generator returned illegal url's", e); } } return Optional.of(new InstanceEndpoints(endPointUrls)); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to get endpoint information for " + deploymentId + ": " + Exceptions.toMessageString(e)); return Optional.empty(); } } /** * Deletes the the given application. All known instances of the applications will be deleted, * including PR instances. * * @throws IllegalArgumentException if the application has deployments or the caller is not authorized * @throws NotExistsException if no instances of the application exist */ public void deleteApplication(ApplicationId applicationId, Optional<NToken> token) { List<ApplicationId> instances = controller.applications().asList(applicationId.tenant()) .stream() .map(Application::id) .filter(id -> id.application().equals(applicationId.application()) && id.tenant().equals(applicationId.tenant())) .collect(Collectors.toList()); if (instances.isEmpty()) { throw new NotExistsException("Could not delete application '" + applicationId + "': Application not found"); } instances.forEach(id -> lockOrThrow(id, application -> { if ( ! application.deployments().isEmpty()) throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments"); Tenant tenant = controller.tenants().tenant(new TenantId(id.tenant().value())).get(); if (tenant.isAthensTenant() && ! token.isPresent()) throw new IllegalArgumentException("Could not delete '" + application + "': No NToken provided"); if (id.instance().isDefault() && tenant.isAthensTenant()) { zmsClientFactory.createZmsClientWithAuthorizedServiceToken(token.get()) .deleteApplication(tenant.getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); } db.deleteApplication(id); log.info("Deleted " + application); })); } /** * Replace any previous version of this application by this instance * * @param application a locked application to store */ public void store(LockedApplication application) { db.store(application); } /** * Acquire a locked application to modify and store, if there is an application with the given id. * * @param applicationId ID of the application to lock and get. * @param action Function which acts on the locked application. */ public void lockIfPresent(ApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { get(applicationId).map(application -> new LockedApplication(application, lock)).ifPresent(action); } } /** * Acquire a locked application to modify and store, or throw an exception if no application has the given id. * * @param applicationId ID of the application to lock and require. * @param action Function which acts on the locked application. * @throws IllegalArgumentException when application does not exist. */ public void lockOrThrow(ApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { action.accept(new LockedApplication(require(applicationId), lock)); } } public void notifyJobCompletion(JobReport report) { if ( ! get(report.applicationId()).isPresent()) { log.log(Level.WARNING, "Ignoring completion of job of project '" + report.projectId() + "': Unknown application '" + report.applicationId() + "'"); return; } deploymentTrigger.triggerFromCompletion(report); } /** * Tells config server to schedule a restart of all nodes in this deployment * * @param hostname If non-empty, restart will only be scheduled for this host */ public void restart(DeploymentId deploymentId, Optional<Hostname> hostname) { try { configserverClient.restart(deploymentId, hostname); } catch (NoInstanceException e) { throw new IllegalArgumentException("Could not restart " + deploymentId + ": No such deployment"); } } /** Deactivate application in the given zone */ public void deactivate(Application application, ZoneId zone) { deactivate(application, zone, Optional.empty(), false); } /** Deactivate a known deployment of the given application */ public void deactivate(Application application, Deployment deployment, boolean requireThatDeploymentHasExpired) { deactivate(application, deployment.zone(), Optional.of(deployment), requireThatDeploymentHasExpired); } private void deactivate(Application application, ZoneId zone, Optional<Deployment> deployment, boolean requireThatDeploymentHasExpired) { if (requireThatDeploymentHasExpired && deployment.isPresent() && ! DeploymentExpirer.hasExpired(controller.zoneRegistry(), deployment.get(), clock.instant())) return; lockOrThrow(application.id(), lockedApplication -> store(deactivate(lockedApplication, zone))); } /** * Deactivates a locked application without storing it * * @return the application with the deployment in the given zone removed */ private LockedApplication deactivate(LockedApplication application, ZoneId zone) { try { configserverClient.deactivate(new DeploymentId(application.id(), zone)); } catch (NoInstanceException ignored) { } return application.withoutDeploymentIn(zone); } public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; } private ApplicationId dashToUnderscore(ApplicationId id) { return ApplicationId.from(id.tenant().value(), id.application().value().replaceAll("-", "_"), id.instance().value()); } public ConfigServerClient configserverClient() { return configserverClient; } /** * Returns a lock which provides exclusive rights to changing this application. * Any operation which stores an application need to first acquire this lock, then read, modify * and store the application, and finally release (close) the lock. */ Lock lock(ApplicationId application) { return curator.lock(application, Duration.ofMinutes(10)); } /** Returns whether a direct deployment to given zone is allowed */ private static boolean canDeployDirectlyTo(ZoneId zone, DeployOptions options) { return ! options.screwdriverBuildJob.isPresent() || options.screwdriverBuildJob.get().screwdriverId == null || zone.environment().isManuallyDeployed(); } /** Returns whether artifact for given version number is available in artifact repository */ private static boolean canDownloadArtifact(ApplicationVersion applicationVersion) { return applicationVersion.buildNumber().isPresent() && applicationVersion.source().isPresent(); } /** Returns whether component has reported a version number that is availabe in artifact repository */ private static boolean canDownloadReportedApplicationVersion(Application application) { return Optional.ofNullable(application.deploymentJobs().jobStatus().get(DeploymentJobs.JobType.component)) .flatMap(JobStatus::lastSuccess) .map(JobStatus.JobRun::applicationVersion) .filter(ApplicationController::canDownloadArtifact) .isPresent(); } /** Verify that each of the production zones listed in the deployment spec exist in this system. */ private void validate(DeploymentSpec deploymentSpec) { deploymentSpec.zones().stream() .filter(zone -> zone.environment() == Environment.prod) .forEach(zone -> { if ( ! controller.zoneRegistry().hasZone(ZoneId.from(zone.environment(), zone.region().orElse(null)))) throw new IllegalArgumentException("Zone " + zone + " in deployment spec was not found in this system!"); }); } public RotationRepository rotationRepository() { return rotationRepository; } }
class ApplicationController { private static final Logger log = Logger.getLogger(ApplicationController.class.getName()); /** The controller owning this */ private final Controller controller; /** For permanent storage */ private final ControllerDb db; /** For working memory storage and sharing between controllers */ private final CuratorDb curator; private final ArtifactRepository artifactRepository; private final RotationRepository rotationRepository; private final AthenzClientFactory zmsClientFactory; private final NameService nameService; private final ConfigServerClient configserverClient; private final RoutingGenerator routingGenerator; private final Clock clock; private final DeploymentTrigger deploymentTrigger; ApplicationController(Controller controller, ControllerDb db, CuratorDb curator, AthenzClientFactory zmsClientFactory, RotationsConfig rotationsConfig, NameService nameService, ConfigServerClient configserverClient, ArtifactRepository artifactRepository, RoutingGenerator routingGenerator, Clock clock) { this.controller = controller; this.db = db; this.curator = curator; this.zmsClientFactory = zmsClientFactory; this.nameService = nameService; this.configserverClient = configserverClient; this.routingGenerator = routingGenerator; this.clock = clock; this.artifactRepository = artifactRepository; this.rotationRepository = new RotationRepository(rotationsConfig, this, curator); this.deploymentTrigger = new DeploymentTrigger(controller, curator, clock); for (Application application : db.listApplications()) { lockIfPresent(application.id(), this::store); } } /** Returns the application with the given id, or null if it is not present */ public Optional<Application> get(ApplicationId id) { return db.getApplication(id); } /** * Returns the application with the given id * * @throws IllegalArgumentException if it does not exist */ public Application require(ApplicationId id) { return get(id).orElseThrow(() -> new IllegalArgumentException(id + " not found")); } /** Returns a snapshot of all applications */ public List<Application> asList() { return db.listApplications(); } /** Returns all applications of a tenant */ public List<Application> asList(TenantName tenant) { return db.listApplications(new TenantId(tenant.value())); } /** * Set the rotations marked as 'global' either 'in' or 'out of' service. * * @return The canonical endpoint altered if any * @throws IOException if rotation status cannot be updated */ public List<String> setGlobalRotationStatus(DeploymentId deploymentId, EndpointStatus status) throws IOException { List<String> rotations = new ArrayList<>(); Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId); if (endpoint.isPresent()) { configserverClient.setGlobalRotationStatus(deploymentId, endpoint.get(), status); rotations.add(endpoint.get()); } return rotations; } /** * Get the endpoint status for the global endpoint of this application * * @return Map between the endpoint and the rotation status * @throws IOException if global rotation status cannot be determined */ public Map<String, EndpointStatus> getGlobalRotationStatus(DeploymentId deploymentId) throws IOException { Map<String, EndpointStatus> result = new HashMap<>(); Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId); if (endpoint.isPresent()) { EndpointStatus status = configserverClient.getGlobalRotationStatus(deploymentId, endpoint.get()); result.put(endpoint.get(), status); } return result; } /** * Global rotations (plural as we can have aliases) map to exactly one service endpoint. * This method finds that one service endpoint and strips the URI part that * the routingGenerator is wrapping around the endpoint. * * @param deploymentId The deployment to retrieve global service endpoint for * @return Empty if no global endpoint exist, otherwise the service endpoint ([clustername.]app.tenant.region.env) */ Optional<String> getCanonicalGlobalEndpoint(DeploymentId deploymentId) throws IOException { Map<String, RoutingEndpoint> hostToGlobalEndpoint = new HashMap<>(); Map<String, String> hostToCanonicalEndpoint = new HashMap<>(); for (RoutingEndpoint endpoint : routingGenerator.endpoints(deploymentId)) { try { URI uri = new URI(endpoint.getEndpoint()); String serviceEndpoint = uri.getHost(); if (serviceEndpoint == null) { throw new IOException("Unexpected endpoints returned from the Routing Generator"); } String canonicalEndpoint = serviceEndpoint.replaceAll(".vespa.yahooapis.com", ""); String hostname = endpoint.getHostname(); if (hostname != null) { if (endpoint.isGlobal()) { hostToGlobalEndpoint.put(hostname, endpoint); } else { hostToCanonicalEndpoint.put(hostname, canonicalEndpoint); } if (hostToGlobalEndpoint.containsKey(hostname) && hostToCanonicalEndpoint.containsKey(hostname)) { return Optional.of(hostToCanonicalEndpoint.get(hostname)); } } } catch (URISyntaxException use) { throw new IOException(use); } } return Optional.empty(); } /** * Creates a new application for an existing tenant. * * @throws IllegalArgumentException if the application already exists */ /** Deploys an application. If the application does not exist it is created. */ public ActivateResult deployApplication(ApplicationId applicationId, ZoneId zone, Optional<ApplicationPackage> applicationPackageFromDeployer, DeployOptions options) { try (Lock lock = lock(applicationId)) { LockedApplication application = get(applicationId) .map(app -> new LockedApplication(app, lock)) .orElseGet(() -> new LockedApplication(createApplication(applicationId, Optional.empty()), lock)); Version version; if (options.deployCurrentVersion) { version = application.versionIn(zone, controller); } else if (canDeployDirectlyTo(zone, options)) { version = options.vespaVersion.map(Version::new).orElse(controller.systemVersion()); } else if (! application.change().isPresent() && ! zone.environment().isManuallyDeployed()) { return unexpectedDeployment(applicationId, zone, applicationPackageFromDeployer); } else { version = application.deployVersionIn(zone, controller); } ApplicationVersion applicationVersion; ApplicationPackage applicationPackage; Optional<DeploymentJobs.JobType> job = DeploymentJobs.JobType.from(controller.system(), zone); if (canDownloadReportedApplicationVersion(application) && !canDeployDirectlyTo(zone, options)) { if (!job.isPresent()) { throw new IllegalArgumentException("Cannot determine job for zone " + zone); } applicationVersion = application.deployApplicationVersion(job.get(), controller, options.deployCurrentVersion) .orElseThrow(() -> new IllegalArgumentException("Cannot determine application version for " + applicationId)); if (canDownloadArtifact(applicationVersion)) { applicationPackage = new ApplicationPackage( artifactRepository.getApplicationPackage(applicationId, applicationVersion.id()) ); } else { applicationPackage = applicationPackageFromDeployer.orElseThrow( () -> new IllegalArgumentException("Application package with version " + applicationVersion.id() + " cannot be downloaded, and " + "no package was given by deployer")); } } else { applicationPackage = applicationPackageFromDeployer.orElseThrow( () -> new IllegalArgumentException("Application package must be given as new application " + "version is not known for " + applicationId) ); applicationVersion = toApplicationPackageRevision(applicationPackage, options.screwdriverBuildJob); } validate(applicationPackage.deploymentSpec()); if (!options.deployCurrentVersion && !canDownloadReportedApplicationVersion(application)) { if (application.change().application().isPresent()) { application = application.withChange(application.change().with(applicationVersion)); } if (!canDeployDirectlyTo(zone, options) && job.isPresent()) { JobStatus.JobRun triggering = getOrCreateTriggering(application, version, job.get()); application = application.withJobTriggering(job.get(), application.change(), triggering.at(), version, applicationVersion, triggering.reason()); } } if (!options.deployCurrentVersion) { application = application.with(applicationPackage.deploymentSpec()); application = application.with(applicationPackage.validationOverrides()); application = deleteRemovedDeployments(application); application = deleteUnreferencedDeploymentJobs(application); store(application); } if (!canDeployDirectlyTo(zone, options)) { if (!application.deploymentJobs().isDeployableTo(zone.environment(), application.change())) { throw new IllegalArgumentException("Rejecting deployment of " + application + " to " + zone + " as " + application.change() + " is not tested"); } Deployment existingDeployment = application.deployments().get(zone); if (zone.environment().isProduction() && existingDeployment != null && existingDeployment.version().isAfter(version)) { throw new IllegalArgumentException("Rejecting deployment of " + application + " to " + zone + " as the requested version " + version + " is older than" + " the current version " + existingDeployment.version()); } } application = withRotation(application, zone); Set<String> rotationNames = new HashSet<>(); Set<String> cnames = new HashSet<>(); application.rotation().ifPresent(applicationRotation -> { rotationNames.add(applicationRotation.id().asString()); cnames.add(applicationRotation.dnsName()); cnames.add(applicationRotation.secureDnsName()); }); options = withVersion(version, options); ConfigServerClient.PreparedApplication preparedApplication = configserverClient.prepare(new DeploymentId(applicationId, zone), options, cnames, rotationNames, applicationPackage.zippedContent()); preparedApplication.activate(); application = application.withNewDeployment(zone, applicationVersion, version, clock.instant()); store(application); return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse(), applicationPackage.zippedContent().length); } } /** Makes sure the application has a global rotation, if eligible. */ private LockedApplication withRotation(LockedApplication application, ZoneId zone) { if (zone.environment() == Environment.prod && application.deploymentSpec().globalServiceId().isPresent()) { try (RotationLock rotationLock = rotationRepository.lock()) { Rotation rotation = rotationRepository.getRotation(application, rotationLock); application = application.with(rotation.id()); store(application); registerRotationInDns(rotation, application.rotation().get().dnsName()); registerRotationInDns(rotation, application.rotation().get().secureDnsName()); } } return application; } private ActivateResult unexpectedDeployment(ApplicationId applicationId, ZoneId zone, Optional<ApplicationPackage> applicationPackage) { Log logEntry = new Log(); logEntry.level = "WARNING"; logEntry.time = clock.instant().toEpochMilli(); logEntry.message = "Ignoring deployment of " + require(applicationId) + " to " + zone + " as a deployment is not currently expected"; PrepareResponse prepareResponse = new PrepareResponse(); prepareResponse.log = Collections.singletonList(logEntry); prepareResponse.configChangeActions = new ConfigChangeActions(Collections.emptyList(), Collections.emptyList()); return new ActivateResult(new RevisionId(applicationPackage.map(ApplicationPackage::hash) .orElse("0")), prepareResponse, applicationPackage.map(a -> a.zippedContent().length).orElse(0)); } private LockedApplication deleteRemovedDeployments(LockedApplication application) { List<Deployment> deploymentsToRemove = application.productionDeployments().values().stream() .filter(deployment -> ! application.deploymentSpec().includes(deployment.zone().environment(), Optional.of(deployment.zone().region()))) .collect(Collectors.toList()); if (deploymentsToRemove.isEmpty()) return application; if ( ! application.validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant())) throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application + " is deployed in " + deploymentsToRemove.stream() .map(deployment -> deployment.zone().region().value()) .collect(Collectors.joining(", ")) + ", but does not include " + (deploymentsToRemove.size() > 1 ? "these zones" : "this zone") + " in deployment.xml"); LockedApplication applicationWithRemoval = application; for (Deployment deployment : deploymentsToRemove) applicationWithRemoval = deactivate(applicationWithRemoval, deployment.zone()); return applicationWithRemoval; } private LockedApplication deleteUnreferencedDeploymentJobs(LockedApplication application) { for (DeploymentJobs.JobType job : application.deploymentJobs().jobStatus().keySet()) { Optional<ZoneId> zone = job.zone(controller.system()); if ( ! job.isProduction() || (zone.isPresent() && application.deploymentSpec().includes(zone.get().environment(), zone.map(ZoneId::region)))) continue; application = application.withoutDeploymentJob(job); } return application; } /** * Returns the existing triggering of the given type from this application, * or an incomplete one created in this method if none is present * This is needed (only) in the case where some external entity triggers a job. */ private JobStatus.JobRun getOrCreateTriggering(Application application, Version version, DeploymentJobs.JobType jobType) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return incompleteTriggeringEvent(version); if ( ! status.lastTriggered().isPresent()) return incompleteTriggeringEvent(version); return status.lastTriggered().get(); } private JobStatus.JobRun incompleteTriggeringEvent(Version version) { return new JobStatus.JobRun(-1, version, ApplicationVersion.unknown, false, "", clock.instant()); } private DeployOptions withVersion(Version version, DeployOptions options) { return new DeployOptions(options.screwdriverBuildJob, Optional.of(version), options.ignoreValidationErrors, options.deployCurrentVersion); } private ApplicationVersion toApplicationPackageRevision(ApplicationPackage applicationPackage, Optional<ScrewdriverBuildJob> buildJob) { if ( ! buildJob.isPresent()) return ApplicationVersion.from(applicationPackage.hash()); GitRevision gitRevision = buildJob.get().gitRevision; if (gitRevision.repository == null || gitRevision.branch == null || gitRevision.commit == null) return ApplicationVersion.from(applicationPackage.hash()); return ApplicationVersion.from(applicationPackage.hash(), new SourceRevision(gitRevision.repository.id(), gitRevision.branch.id(), gitRevision.commit.id())); } /** Register a DNS name for rotation */ private void registerRotationInDns(Rotation rotation, String dnsName) { try { Optional<Record> record = nameService.findRecord(Record.Type.CNAME, RecordName.from(dnsName)); RecordData rotationName = RecordData.fqdn(rotation.name()); if (record.isPresent()) { if ( ! record.get().data().equals(rotationName)) { nameService.updateRecord(record.get().id(), rotationName); log.info("Updated mapping for record ID " + record.get().id().asString() + ": '" + dnsName + "' -> '" + rotation.name() + "'"); } } else { RecordId id = nameService.createCname(RecordName.from(dnsName), rotationName); log.info("Registered mapping with record ID " + id.asString() + ": '" + dnsName + "' -> '" + rotation.name() + "'"); } } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to register CNAME", e); } } /** Returns the endpoints of the deployment, or empty if obtaining them failed */ public Optional<InstanceEndpoints> getDeploymentEndpoints(DeploymentId deploymentId) { try { List<RoutingEndpoint> endpoints = routingGenerator.endpoints(deploymentId); List<URI> endPointUrls = new ArrayList<>(); for (RoutingEndpoint endpoint : endpoints) { try { endPointUrls.add(new URI(endpoint.getEndpoint())); } catch (URISyntaxException e) { throw new RuntimeException("Routing generator returned illegal url's", e); } } return Optional.of(new InstanceEndpoints(endPointUrls)); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to get endpoint information for " + deploymentId + ": " + Exceptions.toMessageString(e)); return Optional.empty(); } } /** * Deletes the the given application. All known instances of the applications will be deleted, * including PR instances. * * @throws IllegalArgumentException if the application has deployments or the caller is not authorized * @throws NotExistsException if no instances of the application exist */ public void deleteApplication(ApplicationId applicationId, Optional<NToken> token) { List<ApplicationId> instances = controller.applications().asList(applicationId.tenant()) .stream() .map(Application::id) .filter(id -> id.application().equals(applicationId.application()) && id.tenant().equals(applicationId.tenant())) .collect(Collectors.toList()); if (instances.isEmpty()) { throw new NotExistsException("Could not delete application '" + applicationId + "': Application not found"); } instances.forEach(id -> lockOrThrow(id, application -> { if ( ! application.deployments().isEmpty()) throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments"); Tenant tenant = controller.tenants().tenant(new TenantId(id.tenant().value())).get(); if (tenant.isAthensTenant() && ! token.isPresent()) throw new IllegalArgumentException("Could not delete '" + application + "': No NToken provided"); if (id.instance().isDefault() && tenant.isAthensTenant()) { zmsClientFactory.createZmsClientWithAuthorizedServiceToken(token.get()) .deleteApplication(tenant.getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); } db.deleteApplication(id); log.info("Deleted " + application); })); } /** * Replace any previous version of this application by this instance * * @param application a locked application to store */ public void store(LockedApplication application) { db.store(application); } /** * Acquire a locked application to modify and store, if there is an application with the given id. * * @param applicationId ID of the application to lock and get. * @param action Function which acts on the locked application. */ public void lockIfPresent(ApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { get(applicationId).map(application -> new LockedApplication(application, lock)).ifPresent(action); } } /** * Acquire a locked application to modify and store, or throw an exception if no application has the given id. * * @param applicationId ID of the application to lock and require. * @param action Function which acts on the locked application. * @throws IllegalArgumentException when application does not exist. */ public void lockOrThrow(ApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { action.accept(new LockedApplication(require(applicationId), lock)); } } public void notifyJobCompletion(JobReport report) { if ( ! get(report.applicationId()).isPresent()) { log.log(Level.WARNING, "Ignoring completion of job of project '" + report.projectId() + "': Unknown application '" + report.applicationId() + "'"); return; } deploymentTrigger.triggerFromCompletion(report); } /** * Tells config server to schedule a restart of all nodes in this deployment * * @param hostname If non-empty, restart will only be scheduled for this host */ public void restart(DeploymentId deploymentId, Optional<Hostname> hostname) { try { configserverClient.restart(deploymentId, hostname); } catch (NoInstanceException e) { throw new IllegalArgumentException("Could not restart " + deploymentId + ": No such deployment"); } } /** Deactivate application in the given zone */ public void deactivate(Application application, ZoneId zone) { deactivate(application, zone, Optional.empty(), false); } /** Deactivate a known deployment of the given application */ public void deactivate(Application application, Deployment deployment, boolean requireThatDeploymentHasExpired) { deactivate(application, deployment.zone(), Optional.of(deployment), requireThatDeploymentHasExpired); } private void deactivate(Application application, ZoneId zone, Optional<Deployment> deployment, boolean requireThatDeploymentHasExpired) { if (requireThatDeploymentHasExpired && deployment.isPresent() && ! DeploymentExpirer.hasExpired(controller.zoneRegistry(), deployment.get(), clock.instant())) return; lockOrThrow(application.id(), lockedApplication -> store(deactivate(lockedApplication, zone))); } /** * Deactivates a locked application without storing it * * @return the application with the deployment in the given zone removed */ private LockedApplication deactivate(LockedApplication application, ZoneId zone) { try { configserverClient.deactivate(new DeploymentId(application.id(), zone)); } catch (NoInstanceException ignored) { } return application.withoutDeploymentIn(zone); } public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; } private ApplicationId dashToUnderscore(ApplicationId id) { return ApplicationId.from(id.tenant().value(), id.application().value().replaceAll("-", "_"), id.instance().value()); } public ConfigServerClient configserverClient() { return configserverClient; } /** * Returns a lock which provides exclusive rights to changing this application. * Any operation which stores an application need to first acquire this lock, then read, modify * and store the application, and finally release (close) the lock. */ Lock lock(ApplicationId application) { return curator.lock(application, Duration.ofMinutes(10)); } /** Returns whether a direct deployment to given zone is allowed */ private static boolean canDeployDirectlyTo(ZoneId zone, DeployOptions options) { return ! options.screwdriverBuildJob.isPresent() || options.screwdriverBuildJob.get().screwdriverId == null || zone.environment().isManuallyDeployed(); } /** Returns whether artifact for given version number is available in artifact repository */ private static boolean canDownloadArtifact(ApplicationVersion applicationVersion) { return applicationVersion.buildNumber().isPresent() && applicationVersion.source().isPresent(); } /** Returns whether component has reported a version number that is availabe in artifact repository */ private static boolean canDownloadReportedApplicationVersion(Application application) { return Optional.ofNullable(application.deploymentJobs().jobStatus().get(DeploymentJobs.JobType.component)) .flatMap(JobStatus::lastSuccess) .map(JobStatus.JobRun::applicationVersion) .filter(ApplicationController::canDownloadArtifact) .isPresent(); } /** Verify that each of the production zones listed in the deployment spec exist in this system. */ private void validate(DeploymentSpec deploymentSpec) { deploymentSpec.zones().stream() .filter(zone -> zone.environment() == Environment.prod) .forEach(zone -> { if ( ! controller.zoneRegistry().hasZone(ZoneId.from(zone.environment(), zone.region().orElse(null)))) throw new IllegalArgumentException("Zone " + zone + " in deployment spec was not found in this system!"); }); } public RotationRepository rotationRepository() { return rotationRepository; } }
Great!
public Application createApplication(ApplicationId id, Optional<NToken> token) { if ( ! (id.instance().value().equals("default") || id.instance().value().matches("\\d+"))) throw new UnsupportedOperationException("Only the instance names 'default' and names which are just the PR number are supported at the moment"); try (Lock lock = lock(id)) { com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value()); Optional<Tenant> tenant = controller.tenants().tenant(new TenantId(id.tenant().value())); if ( ! tenant.isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist"); if (get(id).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application already exists"); if (get(dashToUnderscore(id)).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists"); if (tenant.get().isAthensTenant()) { if ( ! token.isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': No NToken provided"); ZmsClient zmsClient = zmsClientFactory.createZmsClientWithAuthorizedServiceToken(token.get()); try { zmsClient.deleteApplication(tenant.get().getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); } catch (ZmsException ignored) { } zmsClient.addApplication(tenant.get().getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); } LockedApplication application = new LockedApplication(new Application(id), lock); store(application); log.info("Created " + application); return application; } }
zmsClient.deleteApplication(tenant.get().getAthensDomain().get(),
public Application createApplication(ApplicationId id, Optional<NToken> token) { if ( ! (id.instance().isDefault() || id.instance().value().matches("\\d+"))) throw new UnsupportedOperationException("Only the instance names 'default' and names which are just the PR number are supported at the moment"); try (Lock lock = lock(id)) { if (asList(id.tenant()).stream().noneMatch(application -> application.id().application().equals(id.application()))) com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value()); Optional<Tenant> tenant = controller.tenants().tenant(new TenantId(id.tenant().value())); if ( ! tenant.isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist"); if (get(id).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application already exists"); if (get(dashToUnderscore(id)).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists"); if (id.instance().isDefault() && tenant.get().isAthensTenant()) { if ( ! token.isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': No NToken provided"); ZmsClient zmsClient = zmsClientFactory.createZmsClientWithAuthorizedServiceToken(token.get()); zmsClient.addApplication(tenant.get().getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); } LockedApplication application = new LockedApplication(new Application(id), lock); store(application); log.info("Created " + application); return application; } }
class ApplicationController { private static final Logger log = Logger.getLogger(ApplicationController.class.getName()); /** The controller owning this */ private final Controller controller; /** For permanent storage */ private final ControllerDb db; /** For working memory storage and sharing between controllers */ private final CuratorDb curator; private final ArtifactRepository artifactRepository; private final RotationRepository rotationRepository; private final AthenzClientFactory zmsClientFactory; private final NameService nameService; private final ConfigServerClient configserverClient; private final RoutingGenerator routingGenerator; private final Clock clock; private final DeploymentTrigger deploymentTrigger; ApplicationController(Controller controller, ControllerDb db, CuratorDb curator, AthenzClientFactory zmsClientFactory, RotationsConfig rotationsConfig, NameService nameService, ConfigServerClient configserverClient, ArtifactRepository artifactRepository, RoutingGenerator routingGenerator, Clock clock) { this.controller = controller; this.db = db; this.curator = curator; this.zmsClientFactory = zmsClientFactory; this.nameService = nameService; this.configserverClient = configserverClient; this.routingGenerator = routingGenerator; this.clock = clock; this.artifactRepository = artifactRepository; this.rotationRepository = new RotationRepository(rotationsConfig, this, curator); this.deploymentTrigger = new DeploymentTrigger(controller, curator, clock); for (Application application : db.listApplications()) { lockIfPresent(application.id(), this::store); } } /** Returns the application with the given id, or null if it is not present */ public Optional<Application> get(ApplicationId id) { return db.getApplication(id); } /** * Returns the application with the given id * * @throws IllegalArgumentException if it does not exist */ public Application require(ApplicationId id) { return get(id).orElseThrow(() -> new IllegalArgumentException(id + " not found")); } /** Returns a snapshot of all applications */ public List<Application> asList() { return db.listApplications(); } /** Returns all applications of a tenant */ public List<Application> asList(TenantName tenant) { return db.listApplications(new TenantId(tenant.value())); } /** * Set the rotations marked as 'global' either 'in' or 'out of' service. * * @return The canonical endpoint altered if any * @throws IOException if rotation status cannot be updated */ public List<String> setGlobalRotationStatus(DeploymentId deploymentId, EndpointStatus status) throws IOException { List<String> rotations = new ArrayList<>(); Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId); if (endpoint.isPresent()) { configserverClient.setGlobalRotationStatus(deploymentId, endpoint.get(), status); rotations.add(endpoint.get()); } return rotations; } /** * Get the endpoint status for the global endpoint of this application * * @return Map between the endpoint and the rotation status * @throws IOException if global rotation status cannot be determined */ public Map<String, EndpointStatus> getGlobalRotationStatus(DeploymentId deploymentId) throws IOException { Map<String, EndpointStatus> result = new HashMap<>(); Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId); if (endpoint.isPresent()) { EndpointStatus status = configserverClient.getGlobalRotationStatus(deploymentId, endpoint.get()); result.put(endpoint.get(), status); } return result; } /** * Global rotations (plural as we can have aliases) map to exactly one service endpoint. * This method finds that one service endpoint and strips the URI part that * the routingGenerator is wrapping around the endpoint. * * @param deploymentId The deployment to retrieve global service endpoint for * @return Empty if no global endpoint exist, otherwise the service endpoint ([clustername.]app.tenant.region.env) */ Optional<String> getCanonicalGlobalEndpoint(DeploymentId deploymentId) throws IOException { Map<String, RoutingEndpoint> hostToGlobalEndpoint = new HashMap<>(); Map<String, String> hostToCanonicalEndpoint = new HashMap<>(); for (RoutingEndpoint endpoint : routingGenerator.endpoints(deploymentId)) { try { URI uri = new URI(endpoint.getEndpoint()); String serviceEndpoint = uri.getHost(); if (serviceEndpoint == null) { throw new IOException("Unexpected endpoints returned from the Routing Generator"); } String canonicalEndpoint = serviceEndpoint.replaceAll(".vespa.yahooapis.com", ""); String hostname = endpoint.getHostname(); if (hostname != null) { if (endpoint.isGlobal()) { hostToGlobalEndpoint.put(hostname, endpoint); } else { hostToCanonicalEndpoint.put(hostname, canonicalEndpoint); } if (hostToGlobalEndpoint.containsKey(hostname) && hostToCanonicalEndpoint.containsKey(hostname)) { return Optional.of(hostToCanonicalEndpoint.get(hostname)); } } } catch (URISyntaxException use) { throw new IOException(use); } } return Optional.empty(); } /** * Creates a new application for an existing tenant. * * @throws IllegalArgumentException if the application already exists */ /** Deploys an application. If the application does not exist it is created. */ public ActivateResult deployApplication(ApplicationId applicationId, ZoneId zone, Optional<ApplicationPackage> applicationPackageFromDeployer, DeployOptions options, Optional<NToken> token) { try (Lock lock = lock(applicationId)) { LockedApplication application = get(applicationId) .map(app -> new LockedApplication(app, lock)) .orElseGet(() -> new LockedApplication(createApplication(applicationId, token), lock)); Version version; if (options.deployCurrentVersion) { version = application.versionIn(zone, controller); } else if (canDeployDirectlyTo(zone, options)) { version = options.vespaVersion.map(Version::new).orElse(controller.systemVersion()); } else if (! application.change().isPresent() && ! zone.environment().isManuallyDeployed()) { return unexpectedDeployment(applicationId, zone, applicationPackageFromDeployer); } else { version = application.deployVersionIn(zone, controller); } ApplicationVersion applicationVersion; ApplicationPackage applicationPackage; Optional<DeploymentJobs.JobType> job = DeploymentJobs.JobType.from(controller.system(), zone); if (canDownloadReportedApplicationVersion(application) && !canDeployDirectlyTo(zone, options)) { if (!job.isPresent()) { throw new IllegalArgumentException("Cannot determine job for zone " + zone); } applicationVersion = application.deployApplicationVersion(job.get(), controller, options.deployCurrentVersion) .orElseThrow(() -> new IllegalArgumentException("Cannot determine application version for " + applicationId)); if (canDownloadArtifact(applicationVersion)) { applicationPackage = new ApplicationPackage( artifactRepository.getApplicationPackage(applicationId, applicationVersion.id()) ); } else { applicationPackage = applicationPackageFromDeployer.orElseThrow( () -> new IllegalArgumentException("Application package with version " + applicationVersion.id() + " cannot be downloaded, and " + "no package was given by deployer")); } } else { applicationPackage = applicationPackageFromDeployer.orElseThrow( () -> new IllegalArgumentException("Application package must be given as new application " + "version is not known for " + applicationId) ); applicationVersion = toApplicationPackageRevision(applicationPackage, options.screwdriverBuildJob); } validate(applicationPackage.deploymentSpec()); if (!options.deployCurrentVersion && !canDownloadReportedApplicationVersion(application)) { if (application.change().application().isPresent()) { application = application.withChange(application.change().with(applicationVersion)); } if (!canDeployDirectlyTo(zone, options) && job.isPresent()) { JobStatus.JobRun triggering = getOrCreateTriggering(application, version, job.get()); application = application.withJobTriggering(job.get(), application.change(), triggering.at(), version, applicationVersion, triggering.reason()); } } if (!options.deployCurrentVersion) { application = application.with(applicationPackage.deploymentSpec()); application = application.with(applicationPackage.validationOverrides()); application = deleteRemovedDeployments(application); application = deleteUnreferencedDeploymentJobs(application); store(application); } if (!canDeployDirectlyTo(zone, options)) { if (!application.deploymentJobs().isDeployableTo(zone.environment(), application.change())) { throw new IllegalArgumentException("Rejecting deployment of " + application + " to " + zone + " as " + application.change() + " is not tested"); } Deployment existingDeployment = application.deployments().get(zone); if (zone.environment().isProduction() && existingDeployment != null && existingDeployment.version().isAfter(version)) { throw new IllegalArgumentException("Rejecting deployment of " + application + " to " + zone + " as the requested version " + version + " is older than" + " the current version " + existingDeployment.version()); } } application = withRotation(application, zone); Set<String> rotationNames = new HashSet<>(); Set<String> cnames = new HashSet<>(); application.rotation().ifPresent(applicationRotation -> { rotationNames.add(applicationRotation.id().asString()); cnames.add(applicationRotation.dnsName()); cnames.add(applicationRotation.secureDnsName()); }); options = withVersion(version, options); ConfigServerClient.PreparedApplication preparedApplication = configserverClient.prepare(new DeploymentId(applicationId, zone), options, cnames, rotationNames, applicationPackage.zippedContent()); preparedApplication.activate(); application = application.withNewDeployment(zone, applicationVersion, version, clock.instant()); store(application); return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse(), applicationPackage.zippedContent().length); } } /** Makes sure the application has a global rotation, if eligible. */ private LockedApplication withRotation(LockedApplication application, ZoneId zone) { if (zone.environment() == Environment.prod && application.deploymentSpec().globalServiceId().isPresent()) { try (RotationLock rotationLock = rotationRepository.lock()) { Rotation rotation = rotationRepository.getRotation(application, rotationLock); application = application.with(rotation.id()); store(application); registerRotationInDns(rotation, application.rotation().get().dnsName()); registerRotationInDns(rotation, application.rotation().get().secureDnsName()); } } return application; } private ActivateResult unexpectedDeployment(ApplicationId applicationId, ZoneId zone, Optional<ApplicationPackage> applicationPackage) { Log logEntry = new Log(); logEntry.level = "WARNING"; logEntry.time = clock.instant().toEpochMilli(); logEntry.message = "Ignoring deployment of " + require(applicationId) + " to " + zone + " as a deployment is not currently expected"; PrepareResponse prepareResponse = new PrepareResponse(); prepareResponse.log = Collections.singletonList(logEntry); prepareResponse.configChangeActions = new ConfigChangeActions(Collections.emptyList(), Collections.emptyList()); return new ActivateResult(new RevisionId(applicationPackage.map(ApplicationPackage::hash) .orElse("0")), prepareResponse, applicationPackage.map(a -> a.zippedContent().length).orElse(0)); } private LockedApplication deleteRemovedDeployments(LockedApplication application) { List<Deployment> deploymentsToRemove = application.productionDeployments().values().stream() .filter(deployment -> ! application.deploymentSpec().includes(deployment.zone().environment(), Optional.of(deployment.zone().region()))) .collect(Collectors.toList()); if (deploymentsToRemove.isEmpty()) return application; if ( ! application.validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant())) throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application + " is deployed in " + deploymentsToRemove.stream() .map(deployment -> deployment.zone().region().value()) .collect(Collectors.joining(", ")) + ", but does not include " + (deploymentsToRemove.size() > 1 ? "these zones" : "this zone") + " in deployment.xml"); LockedApplication applicationWithRemoval = application; for (Deployment deployment : deploymentsToRemove) applicationWithRemoval = deactivate(applicationWithRemoval, deployment.zone()); return applicationWithRemoval; } private LockedApplication deleteUnreferencedDeploymentJobs(LockedApplication application) { for (DeploymentJobs.JobType job : application.deploymentJobs().jobStatus().keySet()) { Optional<ZoneId> zone = job.zone(controller.system()); if ( ! job.isProduction() || (zone.isPresent() && application.deploymentSpec().includes(zone.get().environment(), zone.map(ZoneId::region)))) continue; application = application.withoutDeploymentJob(job); } return application; } /** * Returns the existing triggering of the given type from this application, * or an incomplete one created in this method if none is present * This is needed (only) in the case where some external entity triggers a job. */ private JobStatus.JobRun getOrCreateTriggering(Application application, Version version, DeploymentJobs.JobType jobType) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return incompleteTriggeringEvent(version); if ( ! status.lastTriggered().isPresent()) return incompleteTriggeringEvent(version); return status.lastTriggered().get(); } private JobStatus.JobRun incompleteTriggeringEvent(Version version) { return new JobStatus.JobRun(-1, version, ApplicationVersion.unknown, false, "", clock.instant()); } private DeployOptions withVersion(Version version, DeployOptions options) { return new DeployOptions(options.screwdriverBuildJob, Optional.of(version), options.ignoreValidationErrors, options.deployCurrentVersion); } private ApplicationVersion toApplicationPackageRevision(ApplicationPackage applicationPackage, Optional<ScrewdriverBuildJob> buildJob) { if ( ! buildJob.isPresent()) return ApplicationVersion.from(applicationPackage.hash()); GitRevision gitRevision = buildJob.get().gitRevision; if (gitRevision.repository == null || gitRevision.branch == null || gitRevision.commit == null) return ApplicationVersion.from(applicationPackage.hash()); return ApplicationVersion.from(applicationPackage.hash(), new SourceRevision(gitRevision.repository.id(), gitRevision.branch.id(), gitRevision.commit.id())); } /** Register a DNS name for rotation */ private void registerRotationInDns(Rotation rotation, String dnsName) { try { Optional<Record> record = nameService.findRecord(Record.Type.CNAME, RecordName.from(dnsName)); RecordData rotationName = RecordData.fqdn(rotation.name()); if (record.isPresent()) { if ( ! record.get().data().equals(rotationName)) { nameService.updateRecord(record.get().id(), rotationName); log.info("Updated mapping for record ID " + record.get().id().asString() + ": '" + dnsName + "' -> '" + rotation.name() + "'"); } } else { RecordId id = nameService.createCname(RecordName.from(dnsName), rotationName); log.info("Registered mapping with record ID " + id.asString() + ": '" + dnsName + "' -> '" + rotation.name() + "'"); } } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to register CNAME", e); } } /** Returns the endpoints of the deployment, or empty if obtaining them failed */ public Optional<InstanceEndpoints> getDeploymentEndpoints(DeploymentId deploymentId) { try { List<RoutingEndpoint> endpoints = routingGenerator.endpoints(deploymentId); List<URI> endPointUrls = new ArrayList<>(); for (RoutingEndpoint endpoint : endpoints) { try { endPointUrls.add(new URI(endpoint.getEndpoint())); } catch (URISyntaxException e) { throw new RuntimeException("Routing generator returned illegal url's", e); } } return Optional.of(new InstanceEndpoints(endPointUrls)); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to get endpoint information for " + deploymentId + ": " + Exceptions.toMessageString(e)); return Optional.empty(); } } /** * Deletes the the given application. All known instances of the applications will be deleted, * including PR instances. * * @throws IllegalArgumentException if the application has deployments or the caller is not authorized * @throws NotExistsException if no instances of the application exist */ public void deleteApplication(ApplicationId applicationId, Optional<NToken> token) { List<ApplicationId> instances = controller.applications().asList(applicationId.tenant()) .stream() .map(Application::id) .filter(id -> id.application().equals(applicationId.application()) && id.tenant().equals(applicationId.tenant())) .collect(Collectors.toList()); if (instances.isEmpty()) { throw new NotExistsException("Could not delete application '" + applicationId + "': Application not found"); } instances.forEach(id -> lockOrThrow(id, application -> { if ( ! application.deployments().isEmpty()) throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments"); Tenant tenant = controller.tenants().tenant(new TenantId(id.tenant().value())).get(); if (tenant.isAthensTenant() && ! token.isPresent()) throw new IllegalArgumentException("Could not delete '" + application + "': No NToken provided"); if (id.instance().isDefault() && tenant.isAthensTenant()) { zmsClientFactory.createZmsClientWithAuthorizedServiceToken(token.get()) .deleteApplication(tenant.getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); } db.deleteApplication(id); log.info("Deleted " + application); })); } /** * Replace any previous version of this application by this instance * * @param application a locked application to store */ public void store(LockedApplication application) { db.store(application); } /** * Acquire a locked application to modify and store, if there is an application with the given id. * * @param applicationId ID of the application to lock and get. * @param action Function which acts on the locked application. */ public void lockIfPresent(ApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { get(applicationId).map(application -> new LockedApplication(application, lock)).ifPresent(action); } } /** * Acquire a locked application to modify and store, or throw an exception if no application has the given id. * * @param applicationId ID of the application to lock and require. * @param action Function which acts on the locked application. * @throws IllegalArgumentException when application does not exist. */ public void lockOrThrow(ApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { action.accept(new LockedApplication(require(applicationId), lock)); } } public void notifyJobCompletion(JobReport report) { if ( ! get(report.applicationId()).isPresent()) { log.log(Level.WARNING, "Ignoring completion of job of project '" + report.projectId() + "': Unknown application '" + report.applicationId() + "'"); return; } deploymentTrigger.triggerFromCompletion(report); } /** * Tells config server to schedule a restart of all nodes in this deployment * * @param hostname If non-empty, restart will only be scheduled for this host */ public void restart(DeploymentId deploymentId, Optional<Hostname> hostname) { try { configserverClient.restart(deploymentId, hostname); } catch (NoInstanceException e) { throw new IllegalArgumentException("Could not restart " + deploymentId + ": No such deployment"); } } /** Deactivate application in the given zone */ public void deactivate(Application application, ZoneId zone) { deactivate(application, zone, Optional.empty(), false); } /** Deactivate a known deployment of the given application */ public void deactivate(Application application, Deployment deployment, boolean requireThatDeploymentHasExpired) { deactivate(application, deployment.zone(), Optional.of(deployment), requireThatDeploymentHasExpired); } private void deactivate(Application application, ZoneId zone, Optional<Deployment> deployment, boolean requireThatDeploymentHasExpired) { if (requireThatDeploymentHasExpired && deployment.isPresent() && ! DeploymentExpirer.hasExpired(controller.zoneRegistry(), deployment.get(), clock.instant())) return; lockOrThrow(application.id(), lockedApplication -> store(deactivate(lockedApplication, zone))); } /** * Deactivates a locked application without storing it * * @return the application with the deployment in the given zone removed */ private LockedApplication deactivate(LockedApplication application, ZoneId zone) { try { configserverClient.deactivate(new DeploymentId(application.id(), zone)); } catch (NoInstanceException ignored) { } return application.withoutDeploymentIn(zone); } public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; } private ApplicationId dashToUnderscore(ApplicationId id) { return ApplicationId.from(id.tenant().value(), id.application().value().replaceAll("-", "_"), id.instance().value()); } public ConfigServerClient configserverClient() { return configserverClient; } /** * Returns a lock which provides exclusive rights to changing this application. * Any operation which stores an application need to first acquire this lock, then read, modify * and store the application, and finally release (close) the lock. */ Lock lock(ApplicationId application) { return curator.lock(application, Duration.ofMinutes(10)); } /** Returns whether a direct deployment to given zone is allowed */ private static boolean canDeployDirectlyTo(ZoneId zone, DeployOptions options) { return ! options.screwdriverBuildJob.isPresent() || options.screwdriverBuildJob.get().screwdriverId == null || zone.environment().isManuallyDeployed(); } /** Returns whether artifact for given version number is available in artifact repository */ private static boolean canDownloadArtifact(ApplicationVersion applicationVersion) { return applicationVersion.buildNumber().isPresent() && applicationVersion.source().isPresent(); } /** Returns whether component has reported a version number that is availabe in artifact repository */ private static boolean canDownloadReportedApplicationVersion(Application application) { return Optional.ofNullable(application.deploymentJobs().jobStatus().get(DeploymentJobs.JobType.component)) .flatMap(JobStatus::lastSuccess) .map(JobStatus.JobRun::applicationVersion) .filter(ApplicationController::canDownloadArtifact) .isPresent(); } /** Verify that each of the production zones listed in the deployment spec exist in this system. */ private void validate(DeploymentSpec deploymentSpec) { deploymentSpec.zones().stream() .filter(zone -> zone.environment() == Environment.prod) .forEach(zone -> { if ( ! controller.zoneRegistry().hasZone(ZoneId.from(zone.environment(), zone.region().orElse(null)))) throw new IllegalArgumentException("Zone " + zone + " in deployment spec was not found in this system!"); }); } public RotationRepository rotationRepository() { return rotationRepository; } }
class ApplicationController { private static final Logger log = Logger.getLogger(ApplicationController.class.getName()); /** The controller owning this */ private final Controller controller; /** For permanent storage */ private final ControllerDb db; /** For working memory storage and sharing between controllers */ private final CuratorDb curator; private final ArtifactRepository artifactRepository; private final RotationRepository rotationRepository; private final AthenzClientFactory zmsClientFactory; private final NameService nameService; private final ConfigServerClient configserverClient; private final RoutingGenerator routingGenerator; private final Clock clock; private final DeploymentTrigger deploymentTrigger; ApplicationController(Controller controller, ControllerDb db, CuratorDb curator, AthenzClientFactory zmsClientFactory, RotationsConfig rotationsConfig, NameService nameService, ConfigServerClient configserverClient, ArtifactRepository artifactRepository, RoutingGenerator routingGenerator, Clock clock) { this.controller = controller; this.db = db; this.curator = curator; this.zmsClientFactory = zmsClientFactory; this.nameService = nameService; this.configserverClient = configserverClient; this.routingGenerator = routingGenerator; this.clock = clock; this.artifactRepository = artifactRepository; this.rotationRepository = new RotationRepository(rotationsConfig, this, curator); this.deploymentTrigger = new DeploymentTrigger(controller, curator, clock); for (Application application : db.listApplications()) { lockIfPresent(application.id(), this::store); } } /** Returns the application with the given id, or null if it is not present */ public Optional<Application> get(ApplicationId id) { return db.getApplication(id); } /** * Returns the application with the given id * * @throws IllegalArgumentException if it does not exist */ public Application require(ApplicationId id) { return get(id).orElseThrow(() -> new IllegalArgumentException(id + " not found")); } /** Returns a snapshot of all applications */ public List<Application> asList() { return db.listApplications(); } /** Returns all applications of a tenant */ public List<Application> asList(TenantName tenant) { return db.listApplications(new TenantId(tenant.value())); } /** * Set the rotations marked as 'global' either 'in' or 'out of' service. * * @return The canonical endpoint altered if any * @throws IOException if rotation status cannot be updated */ public List<String> setGlobalRotationStatus(DeploymentId deploymentId, EndpointStatus status) throws IOException { List<String> rotations = new ArrayList<>(); Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId); if (endpoint.isPresent()) { configserverClient.setGlobalRotationStatus(deploymentId, endpoint.get(), status); rotations.add(endpoint.get()); } return rotations; } /** * Get the endpoint status for the global endpoint of this application * * @return Map between the endpoint and the rotation status * @throws IOException if global rotation status cannot be determined */ public Map<String, EndpointStatus> getGlobalRotationStatus(DeploymentId deploymentId) throws IOException { Map<String, EndpointStatus> result = new HashMap<>(); Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId); if (endpoint.isPresent()) { EndpointStatus status = configserverClient.getGlobalRotationStatus(deploymentId, endpoint.get()); result.put(endpoint.get(), status); } return result; } /** * Global rotations (plural as we can have aliases) map to exactly one service endpoint. * This method finds that one service endpoint and strips the URI part that * the routingGenerator is wrapping around the endpoint. * * @param deploymentId The deployment to retrieve global service endpoint for * @return Empty if no global endpoint exist, otherwise the service endpoint ([clustername.]app.tenant.region.env) */ Optional<String> getCanonicalGlobalEndpoint(DeploymentId deploymentId) throws IOException { Map<String, RoutingEndpoint> hostToGlobalEndpoint = new HashMap<>(); Map<String, String> hostToCanonicalEndpoint = new HashMap<>(); for (RoutingEndpoint endpoint : routingGenerator.endpoints(deploymentId)) { try { URI uri = new URI(endpoint.getEndpoint()); String serviceEndpoint = uri.getHost(); if (serviceEndpoint == null) { throw new IOException("Unexpected endpoints returned from the Routing Generator"); } String canonicalEndpoint = serviceEndpoint.replaceAll(".vespa.yahooapis.com", ""); String hostname = endpoint.getHostname(); if (hostname != null) { if (endpoint.isGlobal()) { hostToGlobalEndpoint.put(hostname, endpoint); } else { hostToCanonicalEndpoint.put(hostname, canonicalEndpoint); } if (hostToGlobalEndpoint.containsKey(hostname) && hostToCanonicalEndpoint.containsKey(hostname)) { return Optional.of(hostToCanonicalEndpoint.get(hostname)); } } } catch (URISyntaxException use) { throw new IOException(use); } } return Optional.empty(); } /** * Creates a new application for an existing tenant. * * @throws IllegalArgumentException if the application already exists */ /** Deploys an application. If the application does not exist it is created. */ public ActivateResult deployApplication(ApplicationId applicationId, ZoneId zone, Optional<ApplicationPackage> applicationPackageFromDeployer, DeployOptions options) { try (Lock lock = lock(applicationId)) { LockedApplication application = get(applicationId) .map(app -> new LockedApplication(app, lock)) .orElseGet(() -> new LockedApplication(createApplication(applicationId, Optional.empty()), lock)); Version version; if (options.deployCurrentVersion) { version = application.versionIn(zone, controller); } else if (canDeployDirectlyTo(zone, options)) { version = options.vespaVersion.map(Version::new).orElse(controller.systemVersion()); } else if (! application.change().isPresent() && ! zone.environment().isManuallyDeployed()) { return unexpectedDeployment(applicationId, zone, applicationPackageFromDeployer); } else { version = application.deployVersionIn(zone, controller); } ApplicationVersion applicationVersion; ApplicationPackage applicationPackage; Optional<DeploymentJobs.JobType> job = DeploymentJobs.JobType.from(controller.system(), zone); if (canDownloadReportedApplicationVersion(application) && !canDeployDirectlyTo(zone, options)) { if (!job.isPresent()) { throw new IllegalArgumentException("Cannot determine job for zone " + zone); } applicationVersion = application.deployApplicationVersion(job.get(), controller, options.deployCurrentVersion) .orElseThrow(() -> new IllegalArgumentException("Cannot determine application version for " + applicationId)); if (canDownloadArtifact(applicationVersion)) { applicationPackage = new ApplicationPackage( artifactRepository.getApplicationPackage(applicationId, applicationVersion.id()) ); } else { applicationPackage = applicationPackageFromDeployer.orElseThrow( () -> new IllegalArgumentException("Application package with version " + applicationVersion.id() + " cannot be downloaded, and " + "no package was given by deployer")); } } else { applicationPackage = applicationPackageFromDeployer.orElseThrow( () -> new IllegalArgumentException("Application package must be given as new application " + "version is not known for " + applicationId) ); applicationVersion = toApplicationPackageRevision(applicationPackage, options.screwdriverBuildJob); } validate(applicationPackage.deploymentSpec()); if (!options.deployCurrentVersion && !canDownloadReportedApplicationVersion(application)) { if (application.change().application().isPresent()) { application = application.withChange(application.change().with(applicationVersion)); } if (!canDeployDirectlyTo(zone, options) && job.isPresent()) { JobStatus.JobRun triggering = getOrCreateTriggering(application, version, job.get()); application = application.withJobTriggering(job.get(), application.change(), triggering.at(), version, applicationVersion, triggering.reason()); } } if (!options.deployCurrentVersion) { application = application.with(applicationPackage.deploymentSpec()); application = application.with(applicationPackage.validationOverrides()); application = deleteRemovedDeployments(application); application = deleteUnreferencedDeploymentJobs(application); store(application); } if (!canDeployDirectlyTo(zone, options)) { if (!application.deploymentJobs().isDeployableTo(zone.environment(), application.change())) { throw new IllegalArgumentException("Rejecting deployment of " + application + " to " + zone + " as " + application.change() + " is not tested"); } Deployment existingDeployment = application.deployments().get(zone); if (zone.environment().isProduction() && existingDeployment != null && existingDeployment.version().isAfter(version)) { throw new IllegalArgumentException("Rejecting deployment of " + application + " to " + zone + " as the requested version " + version + " is older than" + " the current version " + existingDeployment.version()); } } application = withRotation(application, zone); Set<String> rotationNames = new HashSet<>(); Set<String> cnames = new HashSet<>(); application.rotation().ifPresent(applicationRotation -> { rotationNames.add(applicationRotation.id().asString()); cnames.add(applicationRotation.dnsName()); cnames.add(applicationRotation.secureDnsName()); }); options = withVersion(version, options); ConfigServerClient.PreparedApplication preparedApplication = configserverClient.prepare(new DeploymentId(applicationId, zone), options, cnames, rotationNames, applicationPackage.zippedContent()); preparedApplication.activate(); application = application.withNewDeployment(zone, applicationVersion, version, clock.instant()); store(application); return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse(), applicationPackage.zippedContent().length); } } /** Makes sure the application has a global rotation, if eligible. */ private LockedApplication withRotation(LockedApplication application, ZoneId zone) { if (zone.environment() == Environment.prod && application.deploymentSpec().globalServiceId().isPresent()) { try (RotationLock rotationLock = rotationRepository.lock()) { Rotation rotation = rotationRepository.getRotation(application, rotationLock); application = application.with(rotation.id()); store(application); registerRotationInDns(rotation, application.rotation().get().dnsName()); registerRotationInDns(rotation, application.rotation().get().secureDnsName()); } } return application; } private ActivateResult unexpectedDeployment(ApplicationId applicationId, ZoneId zone, Optional<ApplicationPackage> applicationPackage) { Log logEntry = new Log(); logEntry.level = "WARNING"; logEntry.time = clock.instant().toEpochMilli(); logEntry.message = "Ignoring deployment of " + require(applicationId) + " to " + zone + " as a deployment is not currently expected"; PrepareResponse prepareResponse = new PrepareResponse(); prepareResponse.log = Collections.singletonList(logEntry); prepareResponse.configChangeActions = new ConfigChangeActions(Collections.emptyList(), Collections.emptyList()); return new ActivateResult(new RevisionId(applicationPackage.map(ApplicationPackage::hash) .orElse("0")), prepareResponse, applicationPackage.map(a -> a.zippedContent().length).orElse(0)); } private LockedApplication deleteRemovedDeployments(LockedApplication application) { List<Deployment> deploymentsToRemove = application.productionDeployments().values().stream() .filter(deployment -> ! application.deploymentSpec().includes(deployment.zone().environment(), Optional.of(deployment.zone().region()))) .collect(Collectors.toList()); if (deploymentsToRemove.isEmpty()) return application; if ( ! application.validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant())) throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application + " is deployed in " + deploymentsToRemove.stream() .map(deployment -> deployment.zone().region().value()) .collect(Collectors.joining(", ")) + ", but does not include " + (deploymentsToRemove.size() > 1 ? "these zones" : "this zone") + " in deployment.xml"); LockedApplication applicationWithRemoval = application; for (Deployment deployment : deploymentsToRemove) applicationWithRemoval = deactivate(applicationWithRemoval, deployment.zone()); return applicationWithRemoval; } private LockedApplication deleteUnreferencedDeploymentJobs(LockedApplication application) { for (DeploymentJobs.JobType job : application.deploymentJobs().jobStatus().keySet()) { Optional<ZoneId> zone = job.zone(controller.system()); if ( ! job.isProduction() || (zone.isPresent() && application.deploymentSpec().includes(zone.get().environment(), zone.map(ZoneId::region)))) continue; application = application.withoutDeploymentJob(job); } return application; } /** * Returns the existing triggering of the given type from this application, * or an incomplete one created in this method if none is present * This is needed (only) in the case where some external entity triggers a job. */ private JobStatus.JobRun getOrCreateTriggering(Application application, Version version, DeploymentJobs.JobType jobType) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return incompleteTriggeringEvent(version); if ( ! status.lastTriggered().isPresent()) return incompleteTriggeringEvent(version); return status.lastTriggered().get(); } private JobStatus.JobRun incompleteTriggeringEvent(Version version) { return new JobStatus.JobRun(-1, version, ApplicationVersion.unknown, false, "", clock.instant()); } private DeployOptions withVersion(Version version, DeployOptions options) { return new DeployOptions(options.screwdriverBuildJob, Optional.of(version), options.ignoreValidationErrors, options.deployCurrentVersion); } private ApplicationVersion toApplicationPackageRevision(ApplicationPackage applicationPackage, Optional<ScrewdriverBuildJob> buildJob) { if ( ! buildJob.isPresent()) return ApplicationVersion.from(applicationPackage.hash()); GitRevision gitRevision = buildJob.get().gitRevision; if (gitRevision.repository == null || gitRevision.branch == null || gitRevision.commit == null) return ApplicationVersion.from(applicationPackage.hash()); return ApplicationVersion.from(applicationPackage.hash(), new SourceRevision(gitRevision.repository.id(), gitRevision.branch.id(), gitRevision.commit.id())); } /** Register a DNS name for rotation */ private void registerRotationInDns(Rotation rotation, String dnsName) { try { Optional<Record> record = nameService.findRecord(Record.Type.CNAME, RecordName.from(dnsName)); RecordData rotationName = RecordData.fqdn(rotation.name()); if (record.isPresent()) { if ( ! record.get().data().equals(rotationName)) { nameService.updateRecord(record.get().id(), rotationName); log.info("Updated mapping for record ID " + record.get().id().asString() + ": '" + dnsName + "' -> '" + rotation.name() + "'"); } } else { RecordId id = nameService.createCname(RecordName.from(dnsName), rotationName); log.info("Registered mapping with record ID " + id.asString() + ": '" + dnsName + "' -> '" + rotation.name() + "'"); } } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to register CNAME", e); } } /** Returns the endpoints of the deployment, or empty if obtaining them failed */ public Optional<InstanceEndpoints> getDeploymentEndpoints(DeploymentId deploymentId) { try { List<RoutingEndpoint> endpoints = routingGenerator.endpoints(deploymentId); List<URI> endPointUrls = new ArrayList<>(); for (RoutingEndpoint endpoint : endpoints) { try { endPointUrls.add(new URI(endpoint.getEndpoint())); } catch (URISyntaxException e) { throw new RuntimeException("Routing generator returned illegal url's", e); } } return Optional.of(new InstanceEndpoints(endPointUrls)); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to get endpoint information for " + deploymentId + ": " + Exceptions.toMessageString(e)); return Optional.empty(); } } /** * Deletes the the given application. All known instances of the applications will be deleted, * including PR instances. * * @throws IllegalArgumentException if the application has deployments or the caller is not authorized * @throws NotExistsException if no instances of the application exist */ public void deleteApplication(ApplicationId applicationId, Optional<NToken> token) { List<ApplicationId> instances = controller.applications().asList(applicationId.tenant()) .stream() .map(Application::id) .filter(id -> id.application().equals(applicationId.application()) && id.tenant().equals(applicationId.tenant())) .collect(Collectors.toList()); if (instances.isEmpty()) { throw new NotExistsException("Could not delete application '" + applicationId + "': Application not found"); } instances.forEach(id -> lockOrThrow(id, application -> { if ( ! application.deployments().isEmpty()) throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments"); Tenant tenant = controller.tenants().tenant(new TenantId(id.tenant().value())).get(); if (tenant.isAthensTenant() && ! token.isPresent()) throw new IllegalArgumentException("Could not delete '" + application + "': No NToken provided"); if (id.instance().isDefault() && tenant.isAthensTenant()) { zmsClientFactory.createZmsClientWithAuthorizedServiceToken(token.get()) .deleteApplication(tenant.getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); } db.deleteApplication(id); log.info("Deleted " + application); })); } /** * Replace any previous version of this application by this instance * * @param application a locked application to store */ public void store(LockedApplication application) { db.store(application); } /** * Acquire a locked application to modify and store, if there is an application with the given id. * * @param applicationId ID of the application to lock and get. * @param action Function which acts on the locked application. */ public void lockIfPresent(ApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { get(applicationId).map(application -> new LockedApplication(application, lock)).ifPresent(action); } } /** * Acquire a locked application to modify and store, or throw an exception if no application has the given id. * * @param applicationId ID of the application to lock and require. * @param action Function which acts on the locked application. * @throws IllegalArgumentException when application does not exist. */ public void lockOrThrow(ApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { action.accept(new LockedApplication(require(applicationId), lock)); } } public void notifyJobCompletion(JobReport report) { if ( ! get(report.applicationId()).isPresent()) { log.log(Level.WARNING, "Ignoring completion of job of project '" + report.projectId() + "': Unknown application '" + report.applicationId() + "'"); return; } deploymentTrigger.triggerFromCompletion(report); } /** * Tells config server to schedule a restart of all nodes in this deployment * * @param hostname If non-empty, restart will only be scheduled for this host */ public void restart(DeploymentId deploymentId, Optional<Hostname> hostname) { try { configserverClient.restart(deploymentId, hostname); } catch (NoInstanceException e) { throw new IllegalArgumentException("Could not restart " + deploymentId + ": No such deployment"); } } /** Deactivate application in the given zone */ public void deactivate(Application application, ZoneId zone) { deactivate(application, zone, Optional.empty(), false); } /** Deactivate a known deployment of the given application */ public void deactivate(Application application, Deployment deployment, boolean requireThatDeploymentHasExpired) { deactivate(application, deployment.zone(), Optional.of(deployment), requireThatDeploymentHasExpired); } private void deactivate(Application application, ZoneId zone, Optional<Deployment> deployment, boolean requireThatDeploymentHasExpired) { if (requireThatDeploymentHasExpired && deployment.isPresent() && ! DeploymentExpirer.hasExpired(controller.zoneRegistry(), deployment.get(), clock.instant())) return; lockOrThrow(application.id(), lockedApplication -> store(deactivate(lockedApplication, zone))); } /** * Deactivates a locked application without storing it * * @return the application with the deployment in the given zone removed */ private LockedApplication deactivate(LockedApplication application, ZoneId zone) { try { configserverClient.deactivate(new DeploymentId(application.id(), zone)); } catch (NoInstanceException ignored) { } return application.withoutDeploymentIn(zone); } public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; } private ApplicationId dashToUnderscore(ApplicationId id) { return ApplicationId.from(id.tenant().value(), id.application().value().replaceAll("-", "_"), id.instance().value()); } public ConfigServerClient configserverClient() { return configserverClient; } /** * Returns a lock which provides exclusive rights to changing this application. * Any operation which stores an application need to first acquire this lock, then read, modify * and store the application, and finally release (close) the lock. */ Lock lock(ApplicationId application) { return curator.lock(application, Duration.ofMinutes(10)); } /** Returns whether a direct deployment to given zone is allowed */ private static boolean canDeployDirectlyTo(ZoneId zone, DeployOptions options) { return ! options.screwdriverBuildJob.isPresent() || options.screwdriverBuildJob.get().screwdriverId == null || zone.environment().isManuallyDeployed(); } /** Returns whether artifact for given version number is available in artifact repository */ private static boolean canDownloadArtifact(ApplicationVersion applicationVersion) { return applicationVersion.buildNumber().isPresent() && applicationVersion.source().isPresent(); } /** Returns whether component has reported a version number that is availabe in artifact repository */ private static boolean canDownloadReportedApplicationVersion(Application application) { return Optional.ofNullable(application.deploymentJobs().jobStatus().get(DeploymentJobs.JobType.component)) .flatMap(JobStatus::lastSuccess) .map(JobStatus.JobRun::applicationVersion) .filter(ApplicationController::canDownloadArtifact) .isPresent(); } /** Verify that each of the production zones listed in the deployment spec exist in this system. */ private void validate(DeploymentSpec deploymentSpec) { deploymentSpec.zones().stream() .filter(zone -> zone.environment() == Environment.prod) .forEach(zone -> { if ( ! controller.zoneRegistry().hasZone(ZoneId.from(zone.environment(), zone.region().orElse(null)))) throw new IllegalArgumentException("Zone " + zone + " in deployment spec was not found in this system!"); }); } public RotationRepository rotationRepository() { return rotationRepository; } }
It would be, with these changes, yes. That's not what we want, then?
public Application createApplication(ApplicationId id, Optional<NToken> token) { if ( ! (id.instance().value().equals("default") || id.instance().value().matches("\\d+"))) throw new UnsupportedOperationException("Only the instance names 'default' and names which are just the PR number are supported at the moment"); try (Lock lock = lock(id)) { com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value()); Optional<Tenant> tenant = controller.tenants().tenant(new TenantId(id.tenant().value())); if ( ! tenant.isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist"); if (get(id).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application already exists"); if (get(dashToUnderscore(id)).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists"); if (tenant.get().isAthensTenant()) { if ( ! token.isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': No NToken provided"); ZmsClient zmsClient = zmsClientFactory.createZmsClientWithAuthorizedServiceToken(token.get()); try { zmsClient.deleteApplication(tenant.get().getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); } catch (ZmsException ignored) { } zmsClient.addApplication(tenant.get().getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); } LockedApplication application = new LockedApplication(new Application(id), lock); store(application); log.info("Created " + application); return application; } }
if ( ! (id.instance().value().equals("default") || id.instance().value().matches("\\d+")))
public Application createApplication(ApplicationId id, Optional<NToken> token) { if ( ! (id.instance().isDefault() || id.instance().value().matches("\\d+"))) throw new UnsupportedOperationException("Only the instance names 'default' and names which are just the PR number are supported at the moment"); try (Lock lock = lock(id)) { if (asList(id.tenant()).stream().noneMatch(application -> application.id().application().equals(id.application()))) com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value()); Optional<Tenant> tenant = controller.tenants().tenant(new TenantId(id.tenant().value())); if ( ! tenant.isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist"); if (get(id).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application already exists"); if (get(dashToUnderscore(id)).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists"); if (id.instance().isDefault() && tenant.get().isAthensTenant()) { if ( ! token.isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': No NToken provided"); ZmsClient zmsClient = zmsClientFactory.createZmsClientWithAuthorizedServiceToken(token.get()); zmsClient.addApplication(tenant.get().getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); } LockedApplication application = new LockedApplication(new Application(id), lock); store(application); log.info("Created " + application); return application; } }
class ApplicationController { private static final Logger log = Logger.getLogger(ApplicationController.class.getName()); /** The controller owning this */ private final Controller controller; /** For permanent storage */ private final ControllerDb db; /** For working memory storage and sharing between controllers */ private final CuratorDb curator; private final ArtifactRepository artifactRepository; private final RotationRepository rotationRepository; private final AthenzClientFactory zmsClientFactory; private final NameService nameService; private final ConfigServerClient configserverClient; private final RoutingGenerator routingGenerator; private final Clock clock; private final DeploymentTrigger deploymentTrigger; ApplicationController(Controller controller, ControllerDb db, CuratorDb curator, AthenzClientFactory zmsClientFactory, RotationsConfig rotationsConfig, NameService nameService, ConfigServerClient configserverClient, ArtifactRepository artifactRepository, RoutingGenerator routingGenerator, Clock clock) { this.controller = controller; this.db = db; this.curator = curator; this.zmsClientFactory = zmsClientFactory; this.nameService = nameService; this.configserverClient = configserverClient; this.routingGenerator = routingGenerator; this.clock = clock; this.artifactRepository = artifactRepository; this.rotationRepository = new RotationRepository(rotationsConfig, this, curator); this.deploymentTrigger = new DeploymentTrigger(controller, curator, clock); for (Application application : db.listApplications()) { lockIfPresent(application.id(), this::store); } } /** Returns the application with the given id, or null if it is not present */ public Optional<Application> get(ApplicationId id) { return db.getApplication(id); } /** * Returns the application with the given id * * @throws IllegalArgumentException if it does not exist */ public Application require(ApplicationId id) { return get(id).orElseThrow(() -> new IllegalArgumentException(id + " not found")); } /** Returns a snapshot of all applications */ public List<Application> asList() { return db.listApplications(); } /** Returns all applications of a tenant */ public List<Application> asList(TenantName tenant) { return db.listApplications(new TenantId(tenant.value())); } /** * Set the rotations marked as 'global' either 'in' or 'out of' service. * * @return The canonical endpoint altered if any * @throws IOException if rotation status cannot be updated */ public List<String> setGlobalRotationStatus(DeploymentId deploymentId, EndpointStatus status) throws IOException { List<String> rotations = new ArrayList<>(); Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId); if (endpoint.isPresent()) { configserverClient.setGlobalRotationStatus(deploymentId, endpoint.get(), status); rotations.add(endpoint.get()); } return rotations; } /** * Get the endpoint status for the global endpoint of this application * * @return Map between the endpoint and the rotation status * @throws IOException if global rotation status cannot be determined */ public Map<String, EndpointStatus> getGlobalRotationStatus(DeploymentId deploymentId) throws IOException { Map<String, EndpointStatus> result = new HashMap<>(); Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId); if (endpoint.isPresent()) { EndpointStatus status = configserverClient.getGlobalRotationStatus(deploymentId, endpoint.get()); result.put(endpoint.get(), status); } return result; } /** * Global rotations (plural as we can have aliases) map to exactly one service endpoint. * This method finds that one service endpoint and strips the URI part that * the routingGenerator is wrapping around the endpoint. * * @param deploymentId The deployment to retrieve global service endpoint for * @return Empty if no global endpoint exist, otherwise the service endpoint ([clustername.]app.tenant.region.env) */ Optional<String> getCanonicalGlobalEndpoint(DeploymentId deploymentId) throws IOException { Map<String, RoutingEndpoint> hostToGlobalEndpoint = new HashMap<>(); Map<String, String> hostToCanonicalEndpoint = new HashMap<>(); for (RoutingEndpoint endpoint : routingGenerator.endpoints(deploymentId)) { try { URI uri = new URI(endpoint.getEndpoint()); String serviceEndpoint = uri.getHost(); if (serviceEndpoint == null) { throw new IOException("Unexpected endpoints returned from the Routing Generator"); } String canonicalEndpoint = serviceEndpoint.replaceAll(".vespa.yahooapis.com", ""); String hostname = endpoint.getHostname(); if (hostname != null) { if (endpoint.isGlobal()) { hostToGlobalEndpoint.put(hostname, endpoint); } else { hostToCanonicalEndpoint.put(hostname, canonicalEndpoint); } if (hostToGlobalEndpoint.containsKey(hostname) && hostToCanonicalEndpoint.containsKey(hostname)) { return Optional.of(hostToCanonicalEndpoint.get(hostname)); } } } catch (URISyntaxException use) { throw new IOException(use); } } return Optional.empty(); } /** * Creates a new application for an existing tenant. * * @throws IllegalArgumentException if the application already exists */ /** Deploys an application. If the application does not exist it is created. */ public ActivateResult deployApplication(ApplicationId applicationId, ZoneId zone, Optional<ApplicationPackage> applicationPackageFromDeployer, DeployOptions options, Optional<NToken> token) { try (Lock lock = lock(applicationId)) { LockedApplication application = get(applicationId) .map(app -> new LockedApplication(app, lock)) .orElseGet(() -> new LockedApplication(createApplication(applicationId, token), lock)); Version version; if (options.deployCurrentVersion) { version = application.versionIn(zone, controller); } else if (canDeployDirectlyTo(zone, options)) { version = options.vespaVersion.map(Version::new).orElse(controller.systemVersion()); } else if (! application.change().isPresent() && ! zone.environment().isManuallyDeployed()) { return unexpectedDeployment(applicationId, zone, applicationPackageFromDeployer); } else { version = application.deployVersionIn(zone, controller); } ApplicationVersion applicationVersion; ApplicationPackage applicationPackage; Optional<DeploymentJobs.JobType> job = DeploymentJobs.JobType.from(controller.system(), zone); if (canDownloadReportedApplicationVersion(application) && !canDeployDirectlyTo(zone, options)) { if (!job.isPresent()) { throw new IllegalArgumentException("Cannot determine job for zone " + zone); } applicationVersion = application.deployApplicationVersion(job.get(), controller, options.deployCurrentVersion) .orElseThrow(() -> new IllegalArgumentException("Cannot determine application version for " + applicationId)); if (canDownloadArtifact(applicationVersion)) { applicationPackage = new ApplicationPackage( artifactRepository.getApplicationPackage(applicationId, applicationVersion.id()) ); } else { applicationPackage = applicationPackageFromDeployer.orElseThrow( () -> new IllegalArgumentException("Application package with version " + applicationVersion.id() + " cannot be downloaded, and " + "no package was given by deployer")); } } else { applicationPackage = applicationPackageFromDeployer.orElseThrow( () -> new IllegalArgumentException("Application package must be given as new application " + "version is not known for " + applicationId) ); applicationVersion = toApplicationPackageRevision(applicationPackage, options.screwdriverBuildJob); } validate(applicationPackage.deploymentSpec()); if (!options.deployCurrentVersion && !canDownloadReportedApplicationVersion(application)) { if (application.change().application().isPresent()) { application = application.withChange(application.change().with(applicationVersion)); } if (!canDeployDirectlyTo(zone, options) && job.isPresent()) { JobStatus.JobRun triggering = getOrCreateTriggering(application, version, job.get()); application = application.withJobTriggering(job.get(), application.change(), triggering.at(), version, applicationVersion, triggering.reason()); } } if (!options.deployCurrentVersion) { application = application.with(applicationPackage.deploymentSpec()); application = application.with(applicationPackage.validationOverrides()); application = deleteRemovedDeployments(application); application = deleteUnreferencedDeploymentJobs(application); store(application); } if (!canDeployDirectlyTo(zone, options)) { if (!application.deploymentJobs().isDeployableTo(zone.environment(), application.change())) { throw new IllegalArgumentException("Rejecting deployment of " + application + " to " + zone + " as " + application.change() + " is not tested"); } Deployment existingDeployment = application.deployments().get(zone); if (zone.environment().isProduction() && existingDeployment != null && existingDeployment.version().isAfter(version)) { throw new IllegalArgumentException("Rejecting deployment of " + application + " to " + zone + " as the requested version " + version + " is older than" + " the current version " + existingDeployment.version()); } } application = withRotation(application, zone); Set<String> rotationNames = new HashSet<>(); Set<String> cnames = new HashSet<>(); application.rotation().ifPresent(applicationRotation -> { rotationNames.add(applicationRotation.id().asString()); cnames.add(applicationRotation.dnsName()); cnames.add(applicationRotation.secureDnsName()); }); options = withVersion(version, options); ConfigServerClient.PreparedApplication preparedApplication = configserverClient.prepare(new DeploymentId(applicationId, zone), options, cnames, rotationNames, applicationPackage.zippedContent()); preparedApplication.activate(); application = application.withNewDeployment(zone, applicationVersion, version, clock.instant()); store(application); return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse(), applicationPackage.zippedContent().length); } } /** Makes sure the application has a global rotation, if eligible. */ private LockedApplication withRotation(LockedApplication application, ZoneId zone) { if (zone.environment() == Environment.prod && application.deploymentSpec().globalServiceId().isPresent()) { try (RotationLock rotationLock = rotationRepository.lock()) { Rotation rotation = rotationRepository.getRotation(application, rotationLock); application = application.with(rotation.id()); store(application); registerRotationInDns(rotation, application.rotation().get().dnsName()); registerRotationInDns(rotation, application.rotation().get().secureDnsName()); } } return application; } private ActivateResult unexpectedDeployment(ApplicationId applicationId, ZoneId zone, Optional<ApplicationPackage> applicationPackage) { Log logEntry = new Log(); logEntry.level = "WARNING"; logEntry.time = clock.instant().toEpochMilli(); logEntry.message = "Ignoring deployment of " + require(applicationId) + " to " + zone + " as a deployment is not currently expected"; PrepareResponse prepareResponse = new PrepareResponse(); prepareResponse.log = Collections.singletonList(logEntry); prepareResponse.configChangeActions = new ConfigChangeActions(Collections.emptyList(), Collections.emptyList()); return new ActivateResult(new RevisionId(applicationPackage.map(ApplicationPackage::hash) .orElse("0")), prepareResponse, applicationPackage.map(a -> a.zippedContent().length).orElse(0)); } private LockedApplication deleteRemovedDeployments(LockedApplication application) { List<Deployment> deploymentsToRemove = application.productionDeployments().values().stream() .filter(deployment -> ! application.deploymentSpec().includes(deployment.zone().environment(), Optional.of(deployment.zone().region()))) .collect(Collectors.toList()); if (deploymentsToRemove.isEmpty()) return application; if ( ! application.validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant())) throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application + " is deployed in " + deploymentsToRemove.stream() .map(deployment -> deployment.zone().region().value()) .collect(Collectors.joining(", ")) + ", but does not include " + (deploymentsToRemove.size() > 1 ? "these zones" : "this zone") + " in deployment.xml"); LockedApplication applicationWithRemoval = application; for (Deployment deployment : deploymentsToRemove) applicationWithRemoval = deactivate(applicationWithRemoval, deployment.zone()); return applicationWithRemoval; } private LockedApplication deleteUnreferencedDeploymentJobs(LockedApplication application) { for (DeploymentJobs.JobType job : application.deploymentJobs().jobStatus().keySet()) { Optional<ZoneId> zone = job.zone(controller.system()); if ( ! job.isProduction() || (zone.isPresent() && application.deploymentSpec().includes(zone.get().environment(), zone.map(ZoneId::region)))) continue; application = application.withoutDeploymentJob(job); } return application; } /** * Returns the existing triggering of the given type from this application, * or an incomplete one created in this method if none is present * This is needed (only) in the case where some external entity triggers a job. */ private JobStatus.JobRun getOrCreateTriggering(Application application, Version version, DeploymentJobs.JobType jobType) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return incompleteTriggeringEvent(version); if ( ! status.lastTriggered().isPresent()) return incompleteTriggeringEvent(version); return status.lastTriggered().get(); } private JobStatus.JobRun incompleteTriggeringEvent(Version version) { return new JobStatus.JobRun(-1, version, ApplicationVersion.unknown, false, "", clock.instant()); } private DeployOptions withVersion(Version version, DeployOptions options) { return new DeployOptions(options.screwdriverBuildJob, Optional.of(version), options.ignoreValidationErrors, options.deployCurrentVersion); } private ApplicationVersion toApplicationPackageRevision(ApplicationPackage applicationPackage, Optional<ScrewdriverBuildJob> buildJob) { if ( ! buildJob.isPresent()) return ApplicationVersion.from(applicationPackage.hash()); GitRevision gitRevision = buildJob.get().gitRevision; if (gitRevision.repository == null || gitRevision.branch == null || gitRevision.commit == null) return ApplicationVersion.from(applicationPackage.hash()); return ApplicationVersion.from(applicationPackage.hash(), new SourceRevision(gitRevision.repository.id(), gitRevision.branch.id(), gitRevision.commit.id())); } /** Register a DNS name for rotation */ private void registerRotationInDns(Rotation rotation, String dnsName) { try { Optional<Record> record = nameService.findRecord(Record.Type.CNAME, RecordName.from(dnsName)); RecordData rotationName = RecordData.fqdn(rotation.name()); if (record.isPresent()) { if ( ! record.get().data().equals(rotationName)) { nameService.updateRecord(record.get().id(), rotationName); log.info("Updated mapping for record ID " + record.get().id().asString() + ": '" + dnsName + "' -> '" + rotation.name() + "'"); } } else { RecordId id = nameService.createCname(RecordName.from(dnsName), rotationName); log.info("Registered mapping with record ID " + id.asString() + ": '" + dnsName + "' -> '" + rotation.name() + "'"); } } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to register CNAME", e); } } /** Returns the endpoints of the deployment, or empty if obtaining them failed */ public Optional<InstanceEndpoints> getDeploymentEndpoints(DeploymentId deploymentId) { try { List<RoutingEndpoint> endpoints = routingGenerator.endpoints(deploymentId); List<URI> endPointUrls = new ArrayList<>(); for (RoutingEndpoint endpoint : endpoints) { try { endPointUrls.add(new URI(endpoint.getEndpoint())); } catch (URISyntaxException e) { throw new RuntimeException("Routing generator returned illegal url's", e); } } return Optional.of(new InstanceEndpoints(endPointUrls)); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to get endpoint information for " + deploymentId + ": " + Exceptions.toMessageString(e)); return Optional.empty(); } } /** * Deletes the the given application. All known instances of the applications will be deleted, * including PR instances. * * @throws IllegalArgumentException if the application has deployments or the caller is not authorized * @throws NotExistsException if no instances of the application exist */ public void deleteApplication(ApplicationId applicationId, Optional<NToken> token) { List<ApplicationId> instances = controller.applications().asList(applicationId.tenant()) .stream() .map(Application::id) .filter(id -> id.application().equals(applicationId.application()) && id.tenant().equals(applicationId.tenant())) .collect(Collectors.toList()); if (instances.isEmpty()) { throw new NotExistsException("Could not delete application '" + applicationId + "': Application not found"); } instances.forEach(id -> lockOrThrow(id, application -> { if ( ! application.deployments().isEmpty()) throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments"); Tenant tenant = controller.tenants().tenant(new TenantId(id.tenant().value())).get(); if (tenant.isAthensTenant() && ! token.isPresent()) throw new IllegalArgumentException("Could not delete '" + application + "': No NToken provided"); if (id.instance().isDefault() && tenant.isAthensTenant()) { zmsClientFactory.createZmsClientWithAuthorizedServiceToken(token.get()) .deleteApplication(tenant.getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); } db.deleteApplication(id); log.info("Deleted " + application); })); } /** * Replace any previous version of this application by this instance * * @param application a locked application to store */ public void store(LockedApplication application) { db.store(application); } /** * Acquire a locked application to modify and store, if there is an application with the given id. * * @param applicationId ID of the application to lock and get. * @param action Function which acts on the locked application. */ public void lockIfPresent(ApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { get(applicationId).map(application -> new LockedApplication(application, lock)).ifPresent(action); } } /** * Acquire a locked application to modify and store, or throw an exception if no application has the given id. * * @param applicationId ID of the application to lock and require. * @param action Function which acts on the locked application. * @throws IllegalArgumentException when application does not exist. */ public void lockOrThrow(ApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { action.accept(new LockedApplication(require(applicationId), lock)); } } public void notifyJobCompletion(JobReport report) { if ( ! get(report.applicationId()).isPresent()) { log.log(Level.WARNING, "Ignoring completion of job of project '" + report.projectId() + "': Unknown application '" + report.applicationId() + "'"); return; } deploymentTrigger.triggerFromCompletion(report); } /** * Tells config server to schedule a restart of all nodes in this deployment * * @param hostname If non-empty, restart will only be scheduled for this host */ public void restart(DeploymentId deploymentId, Optional<Hostname> hostname) { try { configserverClient.restart(deploymentId, hostname); } catch (NoInstanceException e) { throw new IllegalArgumentException("Could not restart " + deploymentId + ": No such deployment"); } } /** Deactivate application in the given zone */ public void deactivate(Application application, ZoneId zone) { deactivate(application, zone, Optional.empty(), false); } /** Deactivate a known deployment of the given application */ public void deactivate(Application application, Deployment deployment, boolean requireThatDeploymentHasExpired) { deactivate(application, deployment.zone(), Optional.of(deployment), requireThatDeploymentHasExpired); } private void deactivate(Application application, ZoneId zone, Optional<Deployment> deployment, boolean requireThatDeploymentHasExpired) { if (requireThatDeploymentHasExpired && deployment.isPresent() && ! DeploymentExpirer.hasExpired(controller.zoneRegistry(), deployment.get(), clock.instant())) return; lockOrThrow(application.id(), lockedApplication -> store(deactivate(lockedApplication, zone))); } /** * Deactivates a locked application without storing it * * @return the application with the deployment in the given zone removed */ private LockedApplication deactivate(LockedApplication application, ZoneId zone) { try { configserverClient.deactivate(new DeploymentId(application.id(), zone)); } catch (NoInstanceException ignored) { } return application.withoutDeploymentIn(zone); } public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; } private ApplicationId dashToUnderscore(ApplicationId id) { return ApplicationId.from(id.tenant().value(), id.application().value().replaceAll("-", "_"), id.instance().value()); } public ConfigServerClient configserverClient() { return configserverClient; } /** * Returns a lock which provides exclusive rights to changing this application. * Any operation which stores an application need to first acquire this lock, then read, modify * and store the application, and finally release (close) the lock. */ Lock lock(ApplicationId application) { return curator.lock(application, Duration.ofMinutes(10)); } /** Returns whether a direct deployment to given zone is allowed */ private static boolean canDeployDirectlyTo(ZoneId zone, DeployOptions options) { return ! options.screwdriverBuildJob.isPresent() || options.screwdriverBuildJob.get().screwdriverId == null || zone.environment().isManuallyDeployed(); } /** Returns whether artifact for given version number is available in artifact repository */ private static boolean canDownloadArtifact(ApplicationVersion applicationVersion) { return applicationVersion.buildNumber().isPresent() && applicationVersion.source().isPresent(); } /** Returns whether component has reported a version number that is availabe in artifact repository */ private static boolean canDownloadReportedApplicationVersion(Application application) { return Optional.ofNullable(application.deploymentJobs().jobStatus().get(DeploymentJobs.JobType.component)) .flatMap(JobStatus::lastSuccess) .map(JobStatus.JobRun::applicationVersion) .filter(ApplicationController::canDownloadArtifact) .isPresent(); } /** Verify that each of the production zones listed in the deployment spec exist in this system. */ private void validate(DeploymentSpec deploymentSpec) { deploymentSpec.zones().stream() .filter(zone -> zone.environment() == Environment.prod) .forEach(zone -> { if ( ! controller.zoneRegistry().hasZone(ZoneId.from(zone.environment(), zone.region().orElse(null)))) throw new IllegalArgumentException("Zone " + zone + " in deployment spec was not found in this system!"); }); } public RotationRepository rotationRepository() { return rotationRepository; } }
class ApplicationController { private static final Logger log = Logger.getLogger(ApplicationController.class.getName()); /** The controller owning this */ private final Controller controller; /** For permanent storage */ private final ControllerDb db; /** For working memory storage and sharing between controllers */ private final CuratorDb curator; private final ArtifactRepository artifactRepository; private final RotationRepository rotationRepository; private final AthenzClientFactory zmsClientFactory; private final NameService nameService; private final ConfigServerClient configserverClient; private final RoutingGenerator routingGenerator; private final Clock clock; private final DeploymentTrigger deploymentTrigger; ApplicationController(Controller controller, ControllerDb db, CuratorDb curator, AthenzClientFactory zmsClientFactory, RotationsConfig rotationsConfig, NameService nameService, ConfigServerClient configserverClient, ArtifactRepository artifactRepository, RoutingGenerator routingGenerator, Clock clock) { this.controller = controller; this.db = db; this.curator = curator; this.zmsClientFactory = zmsClientFactory; this.nameService = nameService; this.configserverClient = configserverClient; this.routingGenerator = routingGenerator; this.clock = clock; this.artifactRepository = artifactRepository; this.rotationRepository = new RotationRepository(rotationsConfig, this, curator); this.deploymentTrigger = new DeploymentTrigger(controller, curator, clock); for (Application application : db.listApplications()) { lockIfPresent(application.id(), this::store); } } /** Returns the application with the given id, or null if it is not present */ public Optional<Application> get(ApplicationId id) { return db.getApplication(id); } /** * Returns the application with the given id * * @throws IllegalArgumentException if it does not exist */ public Application require(ApplicationId id) { return get(id).orElseThrow(() -> new IllegalArgumentException(id + " not found")); } /** Returns a snapshot of all applications */ public List<Application> asList() { return db.listApplications(); } /** Returns all applications of a tenant */ public List<Application> asList(TenantName tenant) { return db.listApplications(new TenantId(tenant.value())); } /** * Set the rotations marked as 'global' either 'in' or 'out of' service. * * @return The canonical endpoint altered if any * @throws IOException if rotation status cannot be updated */ public List<String> setGlobalRotationStatus(DeploymentId deploymentId, EndpointStatus status) throws IOException { List<String> rotations = new ArrayList<>(); Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId); if (endpoint.isPresent()) { configserverClient.setGlobalRotationStatus(deploymentId, endpoint.get(), status); rotations.add(endpoint.get()); } return rotations; } /** * Get the endpoint status for the global endpoint of this application * * @return Map between the endpoint and the rotation status * @throws IOException if global rotation status cannot be determined */ public Map<String, EndpointStatus> getGlobalRotationStatus(DeploymentId deploymentId) throws IOException { Map<String, EndpointStatus> result = new HashMap<>(); Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId); if (endpoint.isPresent()) { EndpointStatus status = configserverClient.getGlobalRotationStatus(deploymentId, endpoint.get()); result.put(endpoint.get(), status); } return result; } /** * Global rotations (plural as we can have aliases) map to exactly one service endpoint. * This method finds that one service endpoint and strips the URI part that * the routingGenerator is wrapping around the endpoint. * * @param deploymentId The deployment to retrieve global service endpoint for * @return Empty if no global endpoint exist, otherwise the service endpoint ([clustername.]app.tenant.region.env) */ Optional<String> getCanonicalGlobalEndpoint(DeploymentId deploymentId) throws IOException { Map<String, RoutingEndpoint> hostToGlobalEndpoint = new HashMap<>(); Map<String, String> hostToCanonicalEndpoint = new HashMap<>(); for (RoutingEndpoint endpoint : routingGenerator.endpoints(deploymentId)) { try { URI uri = new URI(endpoint.getEndpoint()); String serviceEndpoint = uri.getHost(); if (serviceEndpoint == null) { throw new IOException("Unexpected endpoints returned from the Routing Generator"); } String canonicalEndpoint = serviceEndpoint.replaceAll(".vespa.yahooapis.com", ""); String hostname = endpoint.getHostname(); if (hostname != null) { if (endpoint.isGlobal()) { hostToGlobalEndpoint.put(hostname, endpoint); } else { hostToCanonicalEndpoint.put(hostname, canonicalEndpoint); } if (hostToGlobalEndpoint.containsKey(hostname) && hostToCanonicalEndpoint.containsKey(hostname)) { return Optional.of(hostToCanonicalEndpoint.get(hostname)); } } } catch (URISyntaxException use) { throw new IOException(use); } } return Optional.empty(); } /** * Creates a new application for an existing tenant. * * @throws IllegalArgumentException if the application already exists */ /** Deploys an application. If the application does not exist it is created. */ public ActivateResult deployApplication(ApplicationId applicationId, ZoneId zone, Optional<ApplicationPackage> applicationPackageFromDeployer, DeployOptions options) { try (Lock lock = lock(applicationId)) { LockedApplication application = get(applicationId) .map(app -> new LockedApplication(app, lock)) .orElseGet(() -> new LockedApplication(createApplication(applicationId, Optional.empty()), lock)); Version version; if (options.deployCurrentVersion) { version = application.versionIn(zone, controller); } else if (canDeployDirectlyTo(zone, options)) { version = options.vespaVersion.map(Version::new).orElse(controller.systemVersion()); } else if (! application.change().isPresent() && ! zone.environment().isManuallyDeployed()) { return unexpectedDeployment(applicationId, zone, applicationPackageFromDeployer); } else { version = application.deployVersionIn(zone, controller); } ApplicationVersion applicationVersion; ApplicationPackage applicationPackage; Optional<DeploymentJobs.JobType> job = DeploymentJobs.JobType.from(controller.system(), zone); if (canDownloadReportedApplicationVersion(application) && !canDeployDirectlyTo(zone, options)) { if (!job.isPresent()) { throw new IllegalArgumentException("Cannot determine job for zone " + zone); } applicationVersion = application.deployApplicationVersion(job.get(), controller, options.deployCurrentVersion) .orElseThrow(() -> new IllegalArgumentException("Cannot determine application version for " + applicationId)); if (canDownloadArtifact(applicationVersion)) { applicationPackage = new ApplicationPackage( artifactRepository.getApplicationPackage(applicationId, applicationVersion.id()) ); } else { applicationPackage = applicationPackageFromDeployer.orElseThrow( () -> new IllegalArgumentException("Application package with version " + applicationVersion.id() + " cannot be downloaded, and " + "no package was given by deployer")); } } else { applicationPackage = applicationPackageFromDeployer.orElseThrow( () -> new IllegalArgumentException("Application package must be given as new application " + "version is not known for " + applicationId) ); applicationVersion = toApplicationPackageRevision(applicationPackage, options.screwdriverBuildJob); } validate(applicationPackage.deploymentSpec()); if (!options.deployCurrentVersion && !canDownloadReportedApplicationVersion(application)) { if (application.change().application().isPresent()) { application = application.withChange(application.change().with(applicationVersion)); } if (!canDeployDirectlyTo(zone, options) && job.isPresent()) { JobStatus.JobRun triggering = getOrCreateTriggering(application, version, job.get()); application = application.withJobTriggering(job.get(), application.change(), triggering.at(), version, applicationVersion, triggering.reason()); } } if (!options.deployCurrentVersion) { application = application.with(applicationPackage.deploymentSpec()); application = application.with(applicationPackage.validationOverrides()); application = deleteRemovedDeployments(application); application = deleteUnreferencedDeploymentJobs(application); store(application); } if (!canDeployDirectlyTo(zone, options)) { if (!application.deploymentJobs().isDeployableTo(zone.environment(), application.change())) { throw new IllegalArgumentException("Rejecting deployment of " + application + " to " + zone + " as " + application.change() + " is not tested"); } Deployment existingDeployment = application.deployments().get(zone); if (zone.environment().isProduction() && existingDeployment != null && existingDeployment.version().isAfter(version)) { throw new IllegalArgumentException("Rejecting deployment of " + application + " to " + zone + " as the requested version " + version + " is older than" + " the current version " + existingDeployment.version()); } } application = withRotation(application, zone); Set<String> rotationNames = new HashSet<>(); Set<String> cnames = new HashSet<>(); application.rotation().ifPresent(applicationRotation -> { rotationNames.add(applicationRotation.id().asString()); cnames.add(applicationRotation.dnsName()); cnames.add(applicationRotation.secureDnsName()); }); options = withVersion(version, options); ConfigServerClient.PreparedApplication preparedApplication = configserverClient.prepare(new DeploymentId(applicationId, zone), options, cnames, rotationNames, applicationPackage.zippedContent()); preparedApplication.activate(); application = application.withNewDeployment(zone, applicationVersion, version, clock.instant()); store(application); return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse(), applicationPackage.zippedContent().length); } } /** Makes sure the application has a global rotation, if eligible. */ private LockedApplication withRotation(LockedApplication application, ZoneId zone) { if (zone.environment() == Environment.prod && application.deploymentSpec().globalServiceId().isPresent()) { try (RotationLock rotationLock = rotationRepository.lock()) { Rotation rotation = rotationRepository.getRotation(application, rotationLock); application = application.with(rotation.id()); store(application); registerRotationInDns(rotation, application.rotation().get().dnsName()); registerRotationInDns(rotation, application.rotation().get().secureDnsName()); } } return application; } private ActivateResult unexpectedDeployment(ApplicationId applicationId, ZoneId zone, Optional<ApplicationPackage> applicationPackage) { Log logEntry = new Log(); logEntry.level = "WARNING"; logEntry.time = clock.instant().toEpochMilli(); logEntry.message = "Ignoring deployment of " + require(applicationId) + " to " + zone + " as a deployment is not currently expected"; PrepareResponse prepareResponse = new PrepareResponse(); prepareResponse.log = Collections.singletonList(logEntry); prepareResponse.configChangeActions = new ConfigChangeActions(Collections.emptyList(), Collections.emptyList()); return new ActivateResult(new RevisionId(applicationPackage.map(ApplicationPackage::hash) .orElse("0")), prepareResponse, applicationPackage.map(a -> a.zippedContent().length).orElse(0)); } private LockedApplication deleteRemovedDeployments(LockedApplication application) { List<Deployment> deploymentsToRemove = application.productionDeployments().values().stream() .filter(deployment -> ! application.deploymentSpec().includes(deployment.zone().environment(), Optional.of(deployment.zone().region()))) .collect(Collectors.toList()); if (deploymentsToRemove.isEmpty()) return application; if ( ! application.validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant())) throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application + " is deployed in " + deploymentsToRemove.stream() .map(deployment -> deployment.zone().region().value()) .collect(Collectors.joining(", ")) + ", but does not include " + (deploymentsToRemove.size() > 1 ? "these zones" : "this zone") + " in deployment.xml"); LockedApplication applicationWithRemoval = application; for (Deployment deployment : deploymentsToRemove) applicationWithRemoval = deactivate(applicationWithRemoval, deployment.zone()); return applicationWithRemoval; } private LockedApplication deleteUnreferencedDeploymentJobs(LockedApplication application) { for (DeploymentJobs.JobType job : application.deploymentJobs().jobStatus().keySet()) { Optional<ZoneId> zone = job.zone(controller.system()); if ( ! job.isProduction() || (zone.isPresent() && application.deploymentSpec().includes(zone.get().environment(), zone.map(ZoneId::region)))) continue; application = application.withoutDeploymentJob(job); } return application; } /** * Returns the existing triggering of the given type from this application, * or an incomplete one created in this method if none is present * This is needed (only) in the case where some external entity triggers a job. */ private JobStatus.JobRun getOrCreateTriggering(Application application, Version version, DeploymentJobs.JobType jobType) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return incompleteTriggeringEvent(version); if ( ! status.lastTriggered().isPresent()) return incompleteTriggeringEvent(version); return status.lastTriggered().get(); } private JobStatus.JobRun incompleteTriggeringEvent(Version version) { return new JobStatus.JobRun(-1, version, ApplicationVersion.unknown, false, "", clock.instant()); } private DeployOptions withVersion(Version version, DeployOptions options) { return new DeployOptions(options.screwdriverBuildJob, Optional.of(version), options.ignoreValidationErrors, options.deployCurrentVersion); } private ApplicationVersion toApplicationPackageRevision(ApplicationPackage applicationPackage, Optional<ScrewdriverBuildJob> buildJob) { if ( ! buildJob.isPresent()) return ApplicationVersion.from(applicationPackage.hash()); GitRevision gitRevision = buildJob.get().gitRevision; if (gitRevision.repository == null || gitRevision.branch == null || gitRevision.commit == null) return ApplicationVersion.from(applicationPackage.hash()); return ApplicationVersion.from(applicationPackage.hash(), new SourceRevision(gitRevision.repository.id(), gitRevision.branch.id(), gitRevision.commit.id())); } /** Register a DNS name for rotation */ private void registerRotationInDns(Rotation rotation, String dnsName) { try { Optional<Record> record = nameService.findRecord(Record.Type.CNAME, RecordName.from(dnsName)); RecordData rotationName = RecordData.fqdn(rotation.name()); if (record.isPresent()) { if ( ! record.get().data().equals(rotationName)) { nameService.updateRecord(record.get().id(), rotationName); log.info("Updated mapping for record ID " + record.get().id().asString() + ": '" + dnsName + "' -> '" + rotation.name() + "'"); } } else { RecordId id = nameService.createCname(RecordName.from(dnsName), rotationName); log.info("Registered mapping with record ID " + id.asString() + ": '" + dnsName + "' -> '" + rotation.name() + "'"); } } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to register CNAME", e); } } /** Returns the endpoints of the deployment, or empty if obtaining them failed */ public Optional<InstanceEndpoints> getDeploymentEndpoints(DeploymentId deploymentId) { try { List<RoutingEndpoint> endpoints = routingGenerator.endpoints(deploymentId); List<URI> endPointUrls = new ArrayList<>(); for (RoutingEndpoint endpoint : endpoints) { try { endPointUrls.add(new URI(endpoint.getEndpoint())); } catch (URISyntaxException e) { throw new RuntimeException("Routing generator returned illegal url's", e); } } return Optional.of(new InstanceEndpoints(endPointUrls)); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to get endpoint information for " + deploymentId + ": " + Exceptions.toMessageString(e)); return Optional.empty(); } } /** * Deletes the the given application. All known instances of the applications will be deleted, * including PR instances. * * @throws IllegalArgumentException if the application has deployments or the caller is not authorized * @throws NotExistsException if no instances of the application exist */ public void deleteApplication(ApplicationId applicationId, Optional<NToken> token) { List<ApplicationId> instances = controller.applications().asList(applicationId.tenant()) .stream() .map(Application::id) .filter(id -> id.application().equals(applicationId.application()) && id.tenant().equals(applicationId.tenant())) .collect(Collectors.toList()); if (instances.isEmpty()) { throw new NotExistsException("Could not delete application '" + applicationId + "': Application not found"); } instances.forEach(id -> lockOrThrow(id, application -> { if ( ! application.deployments().isEmpty()) throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments"); Tenant tenant = controller.tenants().tenant(new TenantId(id.tenant().value())).get(); if (tenant.isAthensTenant() && ! token.isPresent()) throw new IllegalArgumentException("Could not delete '" + application + "': No NToken provided"); if (id.instance().isDefault() && tenant.isAthensTenant()) { zmsClientFactory.createZmsClientWithAuthorizedServiceToken(token.get()) .deleteApplication(tenant.getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); } db.deleteApplication(id); log.info("Deleted " + application); })); } /** * Replace any previous version of this application by this instance * * @param application a locked application to store */ public void store(LockedApplication application) { db.store(application); } /** * Acquire a locked application to modify and store, if there is an application with the given id. * * @param applicationId ID of the application to lock and get. * @param action Function which acts on the locked application. */ public void lockIfPresent(ApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { get(applicationId).map(application -> new LockedApplication(application, lock)).ifPresent(action); } } /** * Acquire a locked application to modify and store, or throw an exception if no application has the given id. * * @param applicationId ID of the application to lock and require. * @param action Function which acts on the locked application. * @throws IllegalArgumentException when application does not exist. */ public void lockOrThrow(ApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { action.accept(new LockedApplication(require(applicationId), lock)); } } public void notifyJobCompletion(JobReport report) { if ( ! get(report.applicationId()).isPresent()) { log.log(Level.WARNING, "Ignoring completion of job of project '" + report.projectId() + "': Unknown application '" + report.applicationId() + "'"); return; } deploymentTrigger.triggerFromCompletion(report); } /** * Tells config server to schedule a restart of all nodes in this deployment * * @param hostname If non-empty, restart will only be scheduled for this host */ public void restart(DeploymentId deploymentId, Optional<Hostname> hostname) { try { configserverClient.restart(deploymentId, hostname); } catch (NoInstanceException e) { throw new IllegalArgumentException("Could not restart " + deploymentId + ": No such deployment"); } } /** Deactivate application in the given zone */ public void deactivate(Application application, ZoneId zone) { deactivate(application, zone, Optional.empty(), false); } /** Deactivate a known deployment of the given application */ public void deactivate(Application application, Deployment deployment, boolean requireThatDeploymentHasExpired) { deactivate(application, deployment.zone(), Optional.of(deployment), requireThatDeploymentHasExpired); } private void deactivate(Application application, ZoneId zone, Optional<Deployment> deployment, boolean requireThatDeploymentHasExpired) { if (requireThatDeploymentHasExpired && deployment.isPresent() && ! DeploymentExpirer.hasExpired(controller.zoneRegistry(), deployment.get(), clock.instant())) return; lockOrThrow(application.id(), lockedApplication -> store(deactivate(lockedApplication, zone))); } /** * Deactivates a locked application without storing it * * @return the application with the deployment in the given zone removed */ private LockedApplication deactivate(LockedApplication application, ZoneId zone) { try { configserverClient.deactivate(new DeploymentId(application.id(), zone)); } catch (NoInstanceException ignored) { } return application.withoutDeploymentIn(zone); } public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; } private ApplicationId dashToUnderscore(ApplicationId id) { return ApplicationId.from(id.tenant().value(), id.application().value().replaceAll("-", "_"), id.instance().value()); } public ConfigServerClient configserverClient() { return configserverClient; } /** * Returns a lock which provides exclusive rights to changing this application. * Any operation which stores an application need to first acquire this lock, then read, modify * and store the application, and finally release (close) the lock. */ Lock lock(ApplicationId application) { return curator.lock(application, Duration.ofMinutes(10)); } /** Returns whether a direct deployment to given zone is allowed */ private static boolean canDeployDirectlyTo(ZoneId zone, DeployOptions options) { return ! options.screwdriverBuildJob.isPresent() || options.screwdriverBuildJob.get().screwdriverId == null || zone.environment().isManuallyDeployed(); } /** Returns whether artifact for given version number is available in artifact repository */ private static boolean canDownloadArtifact(ApplicationVersion applicationVersion) { return applicationVersion.buildNumber().isPresent() && applicationVersion.source().isPresent(); } /** Returns whether component has reported a version number that is availabe in artifact repository */ private static boolean canDownloadReportedApplicationVersion(Application application) { return Optional.ofNullable(application.deploymentJobs().jobStatus().get(DeploymentJobs.JobType.component)) .flatMap(JobStatus::lastSuccess) .map(JobStatus.JobRun::applicationVersion) .filter(ApplicationController::canDownloadArtifact) .isPresent(); } /** Verify that each of the production zones listed in the deployment spec exist in this system. */ private void validate(DeploymentSpec deploymentSpec) { deploymentSpec.zones().stream() .filter(zone -> zone.environment() == Environment.prod) .forEach(zone -> { if ( ! controller.zoneRegistry().hasZone(ZoneId.from(zone.environment(), zone.region().orElse(null)))) throw new IllegalArgumentException("Zone " + zone + " in deployment spec was not found in this system!"); }); } public RotationRepository rotationRepository() { return rotationRepository; } }
We do at least want validation of the new instance -- this is currently missing, apart from the application name being checked against a regular expression. As you can see, `createApplication` does a lot more than that. I guess we could split off the validation part of creation and just call that, and then a "lightweight" creation, when deploying new instances.
public Application createApplication(ApplicationId id, Optional<NToken> token) { if ( ! (id.instance().value().equals("default") || id.instance().value().matches("\\d+"))) throw new UnsupportedOperationException("Only the instance names 'default' and names which are just the PR number are supported at the moment"); try (Lock lock = lock(id)) { com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value()); Optional<Tenant> tenant = controller.tenants().tenant(new TenantId(id.tenant().value())); if ( ! tenant.isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist"); if (get(id).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application already exists"); if (get(dashToUnderscore(id)).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists"); if (tenant.get().isAthensTenant()) { if ( ! token.isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': No NToken provided"); ZmsClient zmsClient = zmsClientFactory.createZmsClientWithAuthorizedServiceToken(token.get()); try { zmsClient.deleteApplication(tenant.get().getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); } catch (ZmsException ignored) { } zmsClient.addApplication(tenant.get().getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); } LockedApplication application = new LockedApplication(new Application(id), lock); store(application); log.info("Created " + application); return application; } }
if ( ! (id.instance().value().equals("default") || id.instance().value().matches("\\d+")))
public Application createApplication(ApplicationId id, Optional<NToken> token) { if ( ! (id.instance().isDefault() || id.instance().value().matches("\\d+"))) throw new UnsupportedOperationException("Only the instance names 'default' and names which are just the PR number are supported at the moment"); try (Lock lock = lock(id)) { if (asList(id.tenant()).stream().noneMatch(application -> application.id().application().equals(id.application()))) com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value()); Optional<Tenant> tenant = controller.tenants().tenant(new TenantId(id.tenant().value())); if ( ! tenant.isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist"); if (get(id).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application already exists"); if (get(dashToUnderscore(id)).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists"); if (id.instance().isDefault() && tenant.get().isAthensTenant()) { if ( ! token.isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': No NToken provided"); ZmsClient zmsClient = zmsClientFactory.createZmsClientWithAuthorizedServiceToken(token.get()); zmsClient.addApplication(tenant.get().getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); } LockedApplication application = new LockedApplication(new Application(id), lock); store(application); log.info("Created " + application); return application; } }
class ApplicationController { private static final Logger log = Logger.getLogger(ApplicationController.class.getName()); /** The controller owning this */ private final Controller controller; /** For permanent storage */ private final ControllerDb db; /** For working memory storage and sharing between controllers */ private final CuratorDb curator; private final ArtifactRepository artifactRepository; private final RotationRepository rotationRepository; private final AthenzClientFactory zmsClientFactory; private final NameService nameService; private final ConfigServerClient configserverClient; private final RoutingGenerator routingGenerator; private final Clock clock; private final DeploymentTrigger deploymentTrigger; ApplicationController(Controller controller, ControllerDb db, CuratorDb curator, AthenzClientFactory zmsClientFactory, RotationsConfig rotationsConfig, NameService nameService, ConfigServerClient configserverClient, ArtifactRepository artifactRepository, RoutingGenerator routingGenerator, Clock clock) { this.controller = controller; this.db = db; this.curator = curator; this.zmsClientFactory = zmsClientFactory; this.nameService = nameService; this.configserverClient = configserverClient; this.routingGenerator = routingGenerator; this.clock = clock; this.artifactRepository = artifactRepository; this.rotationRepository = new RotationRepository(rotationsConfig, this, curator); this.deploymentTrigger = new DeploymentTrigger(controller, curator, clock); for (Application application : db.listApplications()) { lockIfPresent(application.id(), this::store); } } /** Returns the application with the given id, or null if it is not present */ public Optional<Application> get(ApplicationId id) { return db.getApplication(id); } /** * Returns the application with the given id * * @throws IllegalArgumentException if it does not exist */ public Application require(ApplicationId id) { return get(id).orElseThrow(() -> new IllegalArgumentException(id + " not found")); } /** Returns a snapshot of all applications */ public List<Application> asList() { return db.listApplications(); } /** Returns all applications of a tenant */ public List<Application> asList(TenantName tenant) { return db.listApplications(new TenantId(tenant.value())); } /** * Set the rotations marked as 'global' either 'in' or 'out of' service. * * @return The canonical endpoint altered if any * @throws IOException if rotation status cannot be updated */ public List<String> setGlobalRotationStatus(DeploymentId deploymentId, EndpointStatus status) throws IOException { List<String> rotations = new ArrayList<>(); Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId); if (endpoint.isPresent()) { configserverClient.setGlobalRotationStatus(deploymentId, endpoint.get(), status); rotations.add(endpoint.get()); } return rotations; } /** * Get the endpoint status for the global endpoint of this application * * @return Map between the endpoint and the rotation status * @throws IOException if global rotation status cannot be determined */ public Map<String, EndpointStatus> getGlobalRotationStatus(DeploymentId deploymentId) throws IOException { Map<String, EndpointStatus> result = new HashMap<>(); Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId); if (endpoint.isPresent()) { EndpointStatus status = configserverClient.getGlobalRotationStatus(deploymentId, endpoint.get()); result.put(endpoint.get(), status); } return result; } /** * Global rotations (plural as we can have aliases) map to exactly one service endpoint. * This method finds that one service endpoint and strips the URI part that * the routingGenerator is wrapping around the endpoint. * * @param deploymentId The deployment to retrieve global service endpoint for * @return Empty if no global endpoint exist, otherwise the service endpoint ([clustername.]app.tenant.region.env) */ Optional<String> getCanonicalGlobalEndpoint(DeploymentId deploymentId) throws IOException { Map<String, RoutingEndpoint> hostToGlobalEndpoint = new HashMap<>(); Map<String, String> hostToCanonicalEndpoint = new HashMap<>(); for (RoutingEndpoint endpoint : routingGenerator.endpoints(deploymentId)) { try { URI uri = new URI(endpoint.getEndpoint()); String serviceEndpoint = uri.getHost(); if (serviceEndpoint == null) { throw new IOException("Unexpected endpoints returned from the Routing Generator"); } String canonicalEndpoint = serviceEndpoint.replaceAll(".vespa.yahooapis.com", ""); String hostname = endpoint.getHostname(); if (hostname != null) { if (endpoint.isGlobal()) { hostToGlobalEndpoint.put(hostname, endpoint); } else { hostToCanonicalEndpoint.put(hostname, canonicalEndpoint); } if (hostToGlobalEndpoint.containsKey(hostname) && hostToCanonicalEndpoint.containsKey(hostname)) { return Optional.of(hostToCanonicalEndpoint.get(hostname)); } } } catch (URISyntaxException use) { throw new IOException(use); } } return Optional.empty(); } /** * Creates a new application for an existing tenant. * * @throws IllegalArgumentException if the application already exists */ /** Deploys an application. If the application does not exist it is created. */ public ActivateResult deployApplication(ApplicationId applicationId, ZoneId zone, Optional<ApplicationPackage> applicationPackageFromDeployer, DeployOptions options, Optional<NToken> token) { try (Lock lock = lock(applicationId)) { LockedApplication application = get(applicationId) .map(app -> new LockedApplication(app, lock)) .orElseGet(() -> new LockedApplication(createApplication(applicationId, token), lock)); Version version; if (options.deployCurrentVersion) { version = application.versionIn(zone, controller); } else if (canDeployDirectlyTo(zone, options)) { version = options.vespaVersion.map(Version::new).orElse(controller.systemVersion()); } else if (! application.change().isPresent() && ! zone.environment().isManuallyDeployed()) { return unexpectedDeployment(applicationId, zone, applicationPackageFromDeployer); } else { version = application.deployVersionIn(zone, controller); } ApplicationVersion applicationVersion; ApplicationPackage applicationPackage; Optional<DeploymentJobs.JobType> job = DeploymentJobs.JobType.from(controller.system(), zone); if (canDownloadReportedApplicationVersion(application) && !canDeployDirectlyTo(zone, options)) { if (!job.isPresent()) { throw new IllegalArgumentException("Cannot determine job for zone " + zone); } applicationVersion = application.deployApplicationVersion(job.get(), controller, options.deployCurrentVersion) .orElseThrow(() -> new IllegalArgumentException("Cannot determine application version for " + applicationId)); if (canDownloadArtifact(applicationVersion)) { applicationPackage = new ApplicationPackage( artifactRepository.getApplicationPackage(applicationId, applicationVersion.id()) ); } else { applicationPackage = applicationPackageFromDeployer.orElseThrow( () -> new IllegalArgumentException("Application package with version " + applicationVersion.id() + " cannot be downloaded, and " + "no package was given by deployer")); } } else { applicationPackage = applicationPackageFromDeployer.orElseThrow( () -> new IllegalArgumentException("Application package must be given as new application " + "version is not known for " + applicationId) ); applicationVersion = toApplicationPackageRevision(applicationPackage, options.screwdriverBuildJob); } validate(applicationPackage.deploymentSpec()); if (!options.deployCurrentVersion && !canDownloadReportedApplicationVersion(application)) { if (application.change().application().isPresent()) { application = application.withChange(application.change().with(applicationVersion)); } if (!canDeployDirectlyTo(zone, options) && job.isPresent()) { JobStatus.JobRun triggering = getOrCreateTriggering(application, version, job.get()); application = application.withJobTriggering(job.get(), application.change(), triggering.at(), version, applicationVersion, triggering.reason()); } } if (!options.deployCurrentVersion) { application = application.with(applicationPackage.deploymentSpec()); application = application.with(applicationPackage.validationOverrides()); application = deleteRemovedDeployments(application); application = deleteUnreferencedDeploymentJobs(application); store(application); } if (!canDeployDirectlyTo(zone, options)) { if (!application.deploymentJobs().isDeployableTo(zone.environment(), application.change())) { throw new IllegalArgumentException("Rejecting deployment of " + application + " to " + zone + " as " + application.change() + " is not tested"); } Deployment existingDeployment = application.deployments().get(zone); if (zone.environment().isProduction() && existingDeployment != null && existingDeployment.version().isAfter(version)) { throw new IllegalArgumentException("Rejecting deployment of " + application + " to " + zone + " as the requested version " + version + " is older than" + " the current version " + existingDeployment.version()); } } application = withRotation(application, zone); Set<String> rotationNames = new HashSet<>(); Set<String> cnames = new HashSet<>(); application.rotation().ifPresent(applicationRotation -> { rotationNames.add(applicationRotation.id().asString()); cnames.add(applicationRotation.dnsName()); cnames.add(applicationRotation.secureDnsName()); }); options = withVersion(version, options); ConfigServerClient.PreparedApplication preparedApplication = configserverClient.prepare(new DeploymentId(applicationId, zone), options, cnames, rotationNames, applicationPackage.zippedContent()); preparedApplication.activate(); application = application.withNewDeployment(zone, applicationVersion, version, clock.instant()); store(application); return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse(), applicationPackage.zippedContent().length); } } /** Makes sure the application has a global rotation, if eligible. */ private LockedApplication withRotation(LockedApplication application, ZoneId zone) { if (zone.environment() == Environment.prod && application.deploymentSpec().globalServiceId().isPresent()) { try (RotationLock rotationLock = rotationRepository.lock()) { Rotation rotation = rotationRepository.getRotation(application, rotationLock); application = application.with(rotation.id()); store(application); registerRotationInDns(rotation, application.rotation().get().dnsName()); registerRotationInDns(rotation, application.rotation().get().secureDnsName()); } } return application; } private ActivateResult unexpectedDeployment(ApplicationId applicationId, ZoneId zone, Optional<ApplicationPackage> applicationPackage) { Log logEntry = new Log(); logEntry.level = "WARNING"; logEntry.time = clock.instant().toEpochMilli(); logEntry.message = "Ignoring deployment of " + require(applicationId) + " to " + zone + " as a deployment is not currently expected"; PrepareResponse prepareResponse = new PrepareResponse(); prepareResponse.log = Collections.singletonList(logEntry); prepareResponse.configChangeActions = new ConfigChangeActions(Collections.emptyList(), Collections.emptyList()); return new ActivateResult(new RevisionId(applicationPackage.map(ApplicationPackage::hash) .orElse("0")), prepareResponse, applicationPackage.map(a -> a.zippedContent().length).orElse(0)); } private LockedApplication deleteRemovedDeployments(LockedApplication application) { List<Deployment> deploymentsToRemove = application.productionDeployments().values().stream() .filter(deployment -> ! application.deploymentSpec().includes(deployment.zone().environment(), Optional.of(deployment.zone().region()))) .collect(Collectors.toList()); if (deploymentsToRemove.isEmpty()) return application; if ( ! application.validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant())) throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application + " is deployed in " + deploymentsToRemove.stream() .map(deployment -> deployment.zone().region().value()) .collect(Collectors.joining(", ")) + ", but does not include " + (deploymentsToRemove.size() > 1 ? "these zones" : "this zone") + " in deployment.xml"); LockedApplication applicationWithRemoval = application; for (Deployment deployment : deploymentsToRemove) applicationWithRemoval = deactivate(applicationWithRemoval, deployment.zone()); return applicationWithRemoval; } private LockedApplication deleteUnreferencedDeploymentJobs(LockedApplication application) { for (DeploymentJobs.JobType job : application.deploymentJobs().jobStatus().keySet()) { Optional<ZoneId> zone = job.zone(controller.system()); if ( ! job.isProduction() || (zone.isPresent() && application.deploymentSpec().includes(zone.get().environment(), zone.map(ZoneId::region)))) continue; application = application.withoutDeploymentJob(job); } return application; } /** * Returns the existing triggering of the given type from this application, * or an incomplete one created in this method if none is present * This is needed (only) in the case where some external entity triggers a job. */ private JobStatus.JobRun getOrCreateTriggering(Application application, Version version, DeploymentJobs.JobType jobType) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return incompleteTriggeringEvent(version); if ( ! status.lastTriggered().isPresent()) return incompleteTriggeringEvent(version); return status.lastTriggered().get(); } private JobStatus.JobRun incompleteTriggeringEvent(Version version) { return new JobStatus.JobRun(-1, version, ApplicationVersion.unknown, false, "", clock.instant()); } private DeployOptions withVersion(Version version, DeployOptions options) { return new DeployOptions(options.screwdriverBuildJob, Optional.of(version), options.ignoreValidationErrors, options.deployCurrentVersion); } private ApplicationVersion toApplicationPackageRevision(ApplicationPackage applicationPackage, Optional<ScrewdriverBuildJob> buildJob) { if ( ! buildJob.isPresent()) return ApplicationVersion.from(applicationPackage.hash()); GitRevision gitRevision = buildJob.get().gitRevision; if (gitRevision.repository == null || gitRevision.branch == null || gitRevision.commit == null) return ApplicationVersion.from(applicationPackage.hash()); return ApplicationVersion.from(applicationPackage.hash(), new SourceRevision(gitRevision.repository.id(), gitRevision.branch.id(), gitRevision.commit.id())); } /** Register a DNS name for rotation */ private void registerRotationInDns(Rotation rotation, String dnsName) { try { Optional<Record> record = nameService.findRecord(Record.Type.CNAME, RecordName.from(dnsName)); RecordData rotationName = RecordData.fqdn(rotation.name()); if (record.isPresent()) { if ( ! record.get().data().equals(rotationName)) { nameService.updateRecord(record.get().id(), rotationName); log.info("Updated mapping for record ID " + record.get().id().asString() + ": '" + dnsName + "' -> '" + rotation.name() + "'"); } } else { RecordId id = nameService.createCname(RecordName.from(dnsName), rotationName); log.info("Registered mapping with record ID " + id.asString() + ": '" + dnsName + "' -> '" + rotation.name() + "'"); } } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to register CNAME", e); } } /** Returns the endpoints of the deployment, or empty if obtaining them failed */ public Optional<InstanceEndpoints> getDeploymentEndpoints(DeploymentId deploymentId) { try { List<RoutingEndpoint> endpoints = routingGenerator.endpoints(deploymentId); List<URI> endPointUrls = new ArrayList<>(); for (RoutingEndpoint endpoint : endpoints) { try { endPointUrls.add(new URI(endpoint.getEndpoint())); } catch (URISyntaxException e) { throw new RuntimeException("Routing generator returned illegal url's", e); } } return Optional.of(new InstanceEndpoints(endPointUrls)); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to get endpoint information for " + deploymentId + ": " + Exceptions.toMessageString(e)); return Optional.empty(); } } /** * Deletes the the given application. All known instances of the applications will be deleted, * including PR instances. * * @throws IllegalArgumentException if the application has deployments or the caller is not authorized * @throws NotExistsException if no instances of the application exist */ public void deleteApplication(ApplicationId applicationId, Optional<NToken> token) { List<ApplicationId> instances = controller.applications().asList(applicationId.tenant()) .stream() .map(Application::id) .filter(id -> id.application().equals(applicationId.application()) && id.tenant().equals(applicationId.tenant())) .collect(Collectors.toList()); if (instances.isEmpty()) { throw new NotExistsException("Could not delete application '" + applicationId + "': Application not found"); } instances.forEach(id -> lockOrThrow(id, application -> { if ( ! application.deployments().isEmpty()) throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments"); Tenant tenant = controller.tenants().tenant(new TenantId(id.tenant().value())).get(); if (tenant.isAthensTenant() && ! token.isPresent()) throw new IllegalArgumentException("Could not delete '" + application + "': No NToken provided"); if (id.instance().isDefault() && tenant.isAthensTenant()) { zmsClientFactory.createZmsClientWithAuthorizedServiceToken(token.get()) .deleteApplication(tenant.getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); } db.deleteApplication(id); log.info("Deleted " + application); })); } /** * Replace any previous version of this application by this instance * * @param application a locked application to store */ public void store(LockedApplication application) { db.store(application); } /** * Acquire a locked application to modify and store, if there is an application with the given id. * * @param applicationId ID of the application to lock and get. * @param action Function which acts on the locked application. */ public void lockIfPresent(ApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { get(applicationId).map(application -> new LockedApplication(application, lock)).ifPresent(action); } } /** * Acquire a locked application to modify and store, or throw an exception if no application has the given id. * * @param applicationId ID of the application to lock and require. * @param action Function which acts on the locked application. * @throws IllegalArgumentException when application does not exist. */ public void lockOrThrow(ApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { action.accept(new LockedApplication(require(applicationId), lock)); } } public void notifyJobCompletion(JobReport report) { if ( ! get(report.applicationId()).isPresent()) { log.log(Level.WARNING, "Ignoring completion of job of project '" + report.projectId() + "': Unknown application '" + report.applicationId() + "'"); return; } deploymentTrigger.triggerFromCompletion(report); } /** * Tells config server to schedule a restart of all nodes in this deployment * * @param hostname If non-empty, restart will only be scheduled for this host */ public void restart(DeploymentId deploymentId, Optional<Hostname> hostname) { try { configserverClient.restart(deploymentId, hostname); } catch (NoInstanceException e) { throw new IllegalArgumentException("Could not restart " + deploymentId + ": No such deployment"); } } /** Deactivate application in the given zone */ public void deactivate(Application application, ZoneId zone) { deactivate(application, zone, Optional.empty(), false); } /** Deactivate a known deployment of the given application */ public void deactivate(Application application, Deployment deployment, boolean requireThatDeploymentHasExpired) { deactivate(application, deployment.zone(), Optional.of(deployment), requireThatDeploymentHasExpired); } private void deactivate(Application application, ZoneId zone, Optional<Deployment> deployment, boolean requireThatDeploymentHasExpired) { if (requireThatDeploymentHasExpired && deployment.isPresent() && ! DeploymentExpirer.hasExpired(controller.zoneRegistry(), deployment.get(), clock.instant())) return; lockOrThrow(application.id(), lockedApplication -> store(deactivate(lockedApplication, zone))); } /** * Deactivates a locked application without storing it * * @return the application with the deployment in the given zone removed */ private LockedApplication deactivate(LockedApplication application, ZoneId zone) { try { configserverClient.deactivate(new DeploymentId(application.id(), zone)); } catch (NoInstanceException ignored) { } return application.withoutDeploymentIn(zone); } public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; } private ApplicationId dashToUnderscore(ApplicationId id) { return ApplicationId.from(id.tenant().value(), id.application().value().replaceAll("-", "_"), id.instance().value()); } public ConfigServerClient configserverClient() { return configserverClient; } /** * Returns a lock which provides exclusive rights to changing this application. * Any operation which stores an application need to first acquire this lock, then read, modify * and store the application, and finally release (close) the lock. */ Lock lock(ApplicationId application) { return curator.lock(application, Duration.ofMinutes(10)); } /** Returns whether a direct deployment to given zone is allowed */ private static boolean canDeployDirectlyTo(ZoneId zone, DeployOptions options) { return ! options.screwdriverBuildJob.isPresent() || options.screwdriverBuildJob.get().screwdriverId == null || zone.environment().isManuallyDeployed(); } /** Returns whether artifact for given version number is available in artifact repository */ private static boolean canDownloadArtifact(ApplicationVersion applicationVersion) { return applicationVersion.buildNumber().isPresent() && applicationVersion.source().isPresent(); } /** Returns whether component has reported a version number that is availabe in artifact repository */ private static boolean canDownloadReportedApplicationVersion(Application application) { return Optional.ofNullable(application.deploymentJobs().jobStatus().get(DeploymentJobs.JobType.component)) .flatMap(JobStatus::lastSuccess) .map(JobStatus.JobRun::applicationVersion) .filter(ApplicationController::canDownloadArtifact) .isPresent(); } /** Verify that each of the production zones listed in the deployment spec exist in this system. */ private void validate(DeploymentSpec deploymentSpec) { deploymentSpec.zones().stream() .filter(zone -> zone.environment() == Environment.prod) .forEach(zone -> { if ( ! controller.zoneRegistry().hasZone(ZoneId.from(zone.environment(), zone.region().orElse(null)))) throw new IllegalArgumentException("Zone " + zone + " in deployment spec was not found in this system!"); }); } public RotationRepository rotationRepository() { return rotationRepository; } }
class ApplicationController { private static final Logger log = Logger.getLogger(ApplicationController.class.getName()); /** The controller owning this */ private final Controller controller; /** For permanent storage */ private final ControllerDb db; /** For working memory storage and sharing between controllers */ private final CuratorDb curator; private final ArtifactRepository artifactRepository; private final RotationRepository rotationRepository; private final AthenzClientFactory zmsClientFactory; private final NameService nameService; private final ConfigServerClient configserverClient; private final RoutingGenerator routingGenerator; private final Clock clock; private final DeploymentTrigger deploymentTrigger; ApplicationController(Controller controller, ControllerDb db, CuratorDb curator, AthenzClientFactory zmsClientFactory, RotationsConfig rotationsConfig, NameService nameService, ConfigServerClient configserverClient, ArtifactRepository artifactRepository, RoutingGenerator routingGenerator, Clock clock) { this.controller = controller; this.db = db; this.curator = curator; this.zmsClientFactory = zmsClientFactory; this.nameService = nameService; this.configserverClient = configserverClient; this.routingGenerator = routingGenerator; this.clock = clock; this.artifactRepository = artifactRepository; this.rotationRepository = new RotationRepository(rotationsConfig, this, curator); this.deploymentTrigger = new DeploymentTrigger(controller, curator, clock); for (Application application : db.listApplications()) { lockIfPresent(application.id(), this::store); } } /** Returns the application with the given id, or null if it is not present */ public Optional<Application> get(ApplicationId id) { return db.getApplication(id); } /** * Returns the application with the given id * * @throws IllegalArgumentException if it does not exist */ public Application require(ApplicationId id) { return get(id).orElseThrow(() -> new IllegalArgumentException(id + " not found")); } /** Returns a snapshot of all applications */ public List<Application> asList() { return db.listApplications(); } /** Returns all applications of a tenant */ public List<Application> asList(TenantName tenant) { return db.listApplications(new TenantId(tenant.value())); } /** * Set the rotations marked as 'global' either 'in' or 'out of' service. * * @return The canonical endpoint altered if any * @throws IOException if rotation status cannot be updated */ public List<String> setGlobalRotationStatus(DeploymentId deploymentId, EndpointStatus status) throws IOException { List<String> rotations = new ArrayList<>(); Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId); if (endpoint.isPresent()) { configserverClient.setGlobalRotationStatus(deploymentId, endpoint.get(), status); rotations.add(endpoint.get()); } return rotations; } /** * Get the endpoint status for the global endpoint of this application * * @return Map between the endpoint and the rotation status * @throws IOException if global rotation status cannot be determined */ public Map<String, EndpointStatus> getGlobalRotationStatus(DeploymentId deploymentId) throws IOException { Map<String, EndpointStatus> result = new HashMap<>(); Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId); if (endpoint.isPresent()) { EndpointStatus status = configserverClient.getGlobalRotationStatus(deploymentId, endpoint.get()); result.put(endpoint.get(), status); } return result; } /** * Global rotations (plural as we can have aliases) map to exactly one service endpoint. * This method finds that one service endpoint and strips the URI part that * the routingGenerator is wrapping around the endpoint. * * @param deploymentId The deployment to retrieve global service endpoint for * @return Empty if no global endpoint exist, otherwise the service endpoint ([clustername.]app.tenant.region.env) */ Optional<String> getCanonicalGlobalEndpoint(DeploymentId deploymentId) throws IOException { Map<String, RoutingEndpoint> hostToGlobalEndpoint = new HashMap<>(); Map<String, String> hostToCanonicalEndpoint = new HashMap<>(); for (RoutingEndpoint endpoint : routingGenerator.endpoints(deploymentId)) { try { URI uri = new URI(endpoint.getEndpoint()); String serviceEndpoint = uri.getHost(); if (serviceEndpoint == null) { throw new IOException("Unexpected endpoints returned from the Routing Generator"); } String canonicalEndpoint = serviceEndpoint.replaceAll(".vespa.yahooapis.com", ""); String hostname = endpoint.getHostname(); if (hostname != null) { if (endpoint.isGlobal()) { hostToGlobalEndpoint.put(hostname, endpoint); } else { hostToCanonicalEndpoint.put(hostname, canonicalEndpoint); } if (hostToGlobalEndpoint.containsKey(hostname) && hostToCanonicalEndpoint.containsKey(hostname)) { return Optional.of(hostToCanonicalEndpoint.get(hostname)); } } } catch (URISyntaxException use) { throw new IOException(use); } } return Optional.empty(); } /** * Creates a new application for an existing tenant. * * @throws IllegalArgumentException if the application already exists */ /** Deploys an application. If the application does not exist it is created. */ public ActivateResult deployApplication(ApplicationId applicationId, ZoneId zone, Optional<ApplicationPackage> applicationPackageFromDeployer, DeployOptions options) { try (Lock lock = lock(applicationId)) { LockedApplication application = get(applicationId) .map(app -> new LockedApplication(app, lock)) .orElseGet(() -> new LockedApplication(createApplication(applicationId, Optional.empty()), lock)); Version version; if (options.deployCurrentVersion) { version = application.versionIn(zone, controller); } else if (canDeployDirectlyTo(zone, options)) { version = options.vespaVersion.map(Version::new).orElse(controller.systemVersion()); } else if (! application.change().isPresent() && ! zone.environment().isManuallyDeployed()) { return unexpectedDeployment(applicationId, zone, applicationPackageFromDeployer); } else { version = application.deployVersionIn(zone, controller); } ApplicationVersion applicationVersion; ApplicationPackage applicationPackage; Optional<DeploymentJobs.JobType> job = DeploymentJobs.JobType.from(controller.system(), zone); if (canDownloadReportedApplicationVersion(application) && !canDeployDirectlyTo(zone, options)) { if (!job.isPresent()) { throw new IllegalArgumentException("Cannot determine job for zone " + zone); } applicationVersion = application.deployApplicationVersion(job.get(), controller, options.deployCurrentVersion) .orElseThrow(() -> new IllegalArgumentException("Cannot determine application version for " + applicationId)); if (canDownloadArtifact(applicationVersion)) { applicationPackage = new ApplicationPackage( artifactRepository.getApplicationPackage(applicationId, applicationVersion.id()) ); } else { applicationPackage = applicationPackageFromDeployer.orElseThrow( () -> new IllegalArgumentException("Application package with version " + applicationVersion.id() + " cannot be downloaded, and " + "no package was given by deployer")); } } else { applicationPackage = applicationPackageFromDeployer.orElseThrow( () -> new IllegalArgumentException("Application package must be given as new application " + "version is not known for " + applicationId) ); applicationVersion = toApplicationPackageRevision(applicationPackage, options.screwdriverBuildJob); } validate(applicationPackage.deploymentSpec()); if (!options.deployCurrentVersion && !canDownloadReportedApplicationVersion(application)) { if (application.change().application().isPresent()) { application = application.withChange(application.change().with(applicationVersion)); } if (!canDeployDirectlyTo(zone, options) && job.isPresent()) { JobStatus.JobRun triggering = getOrCreateTriggering(application, version, job.get()); application = application.withJobTriggering(job.get(), application.change(), triggering.at(), version, applicationVersion, triggering.reason()); } } if (!options.deployCurrentVersion) { application = application.with(applicationPackage.deploymentSpec()); application = application.with(applicationPackage.validationOverrides()); application = deleteRemovedDeployments(application); application = deleteUnreferencedDeploymentJobs(application); store(application); } if (!canDeployDirectlyTo(zone, options)) { if (!application.deploymentJobs().isDeployableTo(zone.environment(), application.change())) { throw new IllegalArgumentException("Rejecting deployment of " + application + " to " + zone + " as " + application.change() + " is not tested"); } Deployment existingDeployment = application.deployments().get(zone); if (zone.environment().isProduction() && existingDeployment != null && existingDeployment.version().isAfter(version)) { throw new IllegalArgumentException("Rejecting deployment of " + application + " to " + zone + " as the requested version " + version + " is older than" + " the current version " + existingDeployment.version()); } } application = withRotation(application, zone); Set<String> rotationNames = new HashSet<>(); Set<String> cnames = new HashSet<>(); application.rotation().ifPresent(applicationRotation -> { rotationNames.add(applicationRotation.id().asString()); cnames.add(applicationRotation.dnsName()); cnames.add(applicationRotation.secureDnsName()); }); options = withVersion(version, options); ConfigServerClient.PreparedApplication preparedApplication = configserverClient.prepare(new DeploymentId(applicationId, zone), options, cnames, rotationNames, applicationPackage.zippedContent()); preparedApplication.activate(); application = application.withNewDeployment(zone, applicationVersion, version, clock.instant()); store(application); return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse(), applicationPackage.zippedContent().length); } } /** Makes sure the application has a global rotation, if eligible. */ private LockedApplication withRotation(LockedApplication application, ZoneId zone) { if (zone.environment() == Environment.prod && application.deploymentSpec().globalServiceId().isPresent()) { try (RotationLock rotationLock = rotationRepository.lock()) { Rotation rotation = rotationRepository.getRotation(application, rotationLock); application = application.with(rotation.id()); store(application); registerRotationInDns(rotation, application.rotation().get().dnsName()); registerRotationInDns(rotation, application.rotation().get().secureDnsName()); } } return application; } private ActivateResult unexpectedDeployment(ApplicationId applicationId, ZoneId zone, Optional<ApplicationPackage> applicationPackage) { Log logEntry = new Log(); logEntry.level = "WARNING"; logEntry.time = clock.instant().toEpochMilli(); logEntry.message = "Ignoring deployment of " + require(applicationId) + " to " + zone + " as a deployment is not currently expected"; PrepareResponse prepareResponse = new PrepareResponse(); prepareResponse.log = Collections.singletonList(logEntry); prepareResponse.configChangeActions = new ConfigChangeActions(Collections.emptyList(), Collections.emptyList()); return new ActivateResult(new RevisionId(applicationPackage.map(ApplicationPackage::hash) .orElse("0")), prepareResponse, applicationPackage.map(a -> a.zippedContent().length).orElse(0)); } private LockedApplication deleteRemovedDeployments(LockedApplication application) { List<Deployment> deploymentsToRemove = application.productionDeployments().values().stream() .filter(deployment -> ! application.deploymentSpec().includes(deployment.zone().environment(), Optional.of(deployment.zone().region()))) .collect(Collectors.toList()); if (deploymentsToRemove.isEmpty()) return application; if ( ! application.validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant())) throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application + " is deployed in " + deploymentsToRemove.stream() .map(deployment -> deployment.zone().region().value()) .collect(Collectors.joining(", ")) + ", but does not include " + (deploymentsToRemove.size() > 1 ? "these zones" : "this zone") + " in deployment.xml"); LockedApplication applicationWithRemoval = application; for (Deployment deployment : deploymentsToRemove) applicationWithRemoval = deactivate(applicationWithRemoval, deployment.zone()); return applicationWithRemoval; } private LockedApplication deleteUnreferencedDeploymentJobs(LockedApplication application) { for (DeploymentJobs.JobType job : application.deploymentJobs().jobStatus().keySet()) { Optional<ZoneId> zone = job.zone(controller.system()); if ( ! job.isProduction() || (zone.isPresent() && application.deploymentSpec().includes(zone.get().environment(), zone.map(ZoneId::region)))) continue; application = application.withoutDeploymentJob(job); } return application; } /** * Returns the existing triggering of the given type from this application, * or an incomplete one created in this method if none is present * This is needed (only) in the case where some external entity triggers a job. */ private JobStatus.JobRun getOrCreateTriggering(Application application, Version version, DeploymentJobs.JobType jobType) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return incompleteTriggeringEvent(version); if ( ! status.lastTriggered().isPresent()) return incompleteTriggeringEvent(version); return status.lastTriggered().get(); } private JobStatus.JobRun incompleteTriggeringEvent(Version version) { return new JobStatus.JobRun(-1, version, ApplicationVersion.unknown, false, "", clock.instant()); } private DeployOptions withVersion(Version version, DeployOptions options) { return new DeployOptions(options.screwdriverBuildJob, Optional.of(version), options.ignoreValidationErrors, options.deployCurrentVersion); } private ApplicationVersion toApplicationPackageRevision(ApplicationPackage applicationPackage, Optional<ScrewdriverBuildJob> buildJob) { if ( ! buildJob.isPresent()) return ApplicationVersion.from(applicationPackage.hash()); GitRevision gitRevision = buildJob.get().gitRevision; if (gitRevision.repository == null || gitRevision.branch == null || gitRevision.commit == null) return ApplicationVersion.from(applicationPackage.hash()); return ApplicationVersion.from(applicationPackage.hash(), new SourceRevision(gitRevision.repository.id(), gitRevision.branch.id(), gitRevision.commit.id())); } /** Register a DNS name for rotation */ private void registerRotationInDns(Rotation rotation, String dnsName) { try { Optional<Record> record = nameService.findRecord(Record.Type.CNAME, RecordName.from(dnsName)); RecordData rotationName = RecordData.fqdn(rotation.name()); if (record.isPresent()) { if ( ! record.get().data().equals(rotationName)) { nameService.updateRecord(record.get().id(), rotationName); log.info("Updated mapping for record ID " + record.get().id().asString() + ": '" + dnsName + "' -> '" + rotation.name() + "'"); } } else { RecordId id = nameService.createCname(RecordName.from(dnsName), rotationName); log.info("Registered mapping with record ID " + id.asString() + ": '" + dnsName + "' -> '" + rotation.name() + "'"); } } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to register CNAME", e); } } /** Returns the endpoints of the deployment, or empty if obtaining them failed */ public Optional<InstanceEndpoints> getDeploymentEndpoints(DeploymentId deploymentId) { try { List<RoutingEndpoint> endpoints = routingGenerator.endpoints(deploymentId); List<URI> endPointUrls = new ArrayList<>(); for (RoutingEndpoint endpoint : endpoints) { try { endPointUrls.add(new URI(endpoint.getEndpoint())); } catch (URISyntaxException e) { throw new RuntimeException("Routing generator returned illegal url's", e); } } return Optional.of(new InstanceEndpoints(endPointUrls)); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to get endpoint information for " + deploymentId + ": " + Exceptions.toMessageString(e)); return Optional.empty(); } } /** * Deletes the the given application. All known instances of the applications will be deleted, * including PR instances. * * @throws IllegalArgumentException if the application has deployments or the caller is not authorized * @throws NotExistsException if no instances of the application exist */ public void deleteApplication(ApplicationId applicationId, Optional<NToken> token) { List<ApplicationId> instances = controller.applications().asList(applicationId.tenant()) .stream() .map(Application::id) .filter(id -> id.application().equals(applicationId.application()) && id.tenant().equals(applicationId.tenant())) .collect(Collectors.toList()); if (instances.isEmpty()) { throw new NotExistsException("Could not delete application '" + applicationId + "': Application not found"); } instances.forEach(id -> lockOrThrow(id, application -> { if ( ! application.deployments().isEmpty()) throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments"); Tenant tenant = controller.tenants().tenant(new TenantId(id.tenant().value())).get(); if (tenant.isAthensTenant() && ! token.isPresent()) throw new IllegalArgumentException("Could not delete '" + application + "': No NToken provided"); if (id.instance().isDefault() && tenant.isAthensTenant()) { zmsClientFactory.createZmsClientWithAuthorizedServiceToken(token.get()) .deleteApplication(tenant.getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); } db.deleteApplication(id); log.info("Deleted " + application); })); } /** * Replace any previous version of this application by this instance * * @param application a locked application to store */ public void store(LockedApplication application) { db.store(application); } /** * Acquire a locked application to modify and store, if there is an application with the given id. * * @param applicationId ID of the application to lock and get. * @param action Function which acts on the locked application. */ public void lockIfPresent(ApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { get(applicationId).map(application -> new LockedApplication(application, lock)).ifPresent(action); } } /** * Acquire a locked application to modify and store, or throw an exception if no application has the given id. * * @param applicationId ID of the application to lock and require. * @param action Function which acts on the locked application. * @throws IllegalArgumentException when application does not exist. */ public void lockOrThrow(ApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { action.accept(new LockedApplication(require(applicationId), lock)); } } public void notifyJobCompletion(JobReport report) { if ( ! get(report.applicationId()).isPresent()) { log.log(Level.WARNING, "Ignoring completion of job of project '" + report.projectId() + "': Unknown application '" + report.applicationId() + "'"); return; } deploymentTrigger.triggerFromCompletion(report); } /** * Tells config server to schedule a restart of all nodes in this deployment * * @param hostname If non-empty, restart will only be scheduled for this host */ public void restart(DeploymentId deploymentId, Optional<Hostname> hostname) { try { configserverClient.restart(deploymentId, hostname); } catch (NoInstanceException e) { throw new IllegalArgumentException("Could not restart " + deploymentId + ": No such deployment"); } } /** Deactivate application in the given zone */ public void deactivate(Application application, ZoneId zone) { deactivate(application, zone, Optional.empty(), false); } /** Deactivate a known deployment of the given application */ public void deactivate(Application application, Deployment deployment, boolean requireThatDeploymentHasExpired) { deactivate(application, deployment.zone(), Optional.of(deployment), requireThatDeploymentHasExpired); } private void deactivate(Application application, ZoneId zone, Optional<Deployment> deployment, boolean requireThatDeploymentHasExpired) { if (requireThatDeploymentHasExpired && deployment.isPresent() && ! DeploymentExpirer.hasExpired(controller.zoneRegistry(), deployment.get(), clock.instant())) return; lockOrThrow(application.id(), lockedApplication -> store(deactivate(lockedApplication, zone))); } /** * Deactivates a locked application without storing it * * @return the application with the deployment in the given zone removed */ private LockedApplication deactivate(LockedApplication application, ZoneId zone) { try { configserverClient.deactivate(new DeploymentId(application.id(), zone)); } catch (NoInstanceException ignored) { } return application.withoutDeploymentIn(zone); } public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; } private ApplicationId dashToUnderscore(ApplicationId id) { return ApplicationId.from(id.tenant().value(), id.application().value().replaceAll("-", "_"), id.instance().value()); } public ConfigServerClient configserverClient() { return configserverClient; } /** * Returns a lock which provides exclusive rights to changing this application. * Any operation which stores an application need to first acquire this lock, then read, modify * and store the application, and finally release (close) the lock. */ Lock lock(ApplicationId application) { return curator.lock(application, Duration.ofMinutes(10)); } /** Returns whether a direct deployment to given zone is allowed */ private static boolean canDeployDirectlyTo(ZoneId zone, DeployOptions options) { return ! options.screwdriverBuildJob.isPresent() || options.screwdriverBuildJob.get().screwdriverId == null || zone.environment().isManuallyDeployed(); } /** Returns whether artifact for given version number is available in artifact repository */ private static boolean canDownloadArtifact(ApplicationVersion applicationVersion) { return applicationVersion.buildNumber().isPresent() && applicationVersion.source().isPresent(); } /** Returns whether component has reported a version number that is availabe in artifact repository */ private static boolean canDownloadReportedApplicationVersion(Application application) { return Optional.ofNullable(application.deploymentJobs().jobStatus().get(DeploymentJobs.JobType.component)) .flatMap(JobStatus::lastSuccess) .map(JobStatus.JobRun::applicationVersion) .filter(ApplicationController::canDownloadArtifact) .isPresent(); } /** Verify that each of the production zones listed in the deployment spec exist in this system. */ private void validate(DeploymentSpec deploymentSpec) { deploymentSpec.zones().stream() .filter(zone -> zone.environment() == Environment.prod) .forEach(zone -> { if ( ! controller.zoneRegistry().hasZone(ZoneId.from(zone.environment(), zone.region().orElse(null)))) throw new IllegalArgumentException("Zone " + zone + " in deployment spec was not found in this system!"); }); } public RotationRepository rotationRepository() { return rotationRepository; } }
Create application is only intended to be called before the first instance for a given application name is deployed. The instance id validation must be performed somewhere else.
public Application createApplication(ApplicationId id, Optional<NToken> token) { if ( ! (id.instance().value().equals("default") || id.instance().value().matches("\\d+"))) throw new UnsupportedOperationException("Only the instance names 'default' and names which are just the PR number are supported at the moment"); try (Lock lock = lock(id)) { com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value()); Optional<Tenant> tenant = controller.tenants().tenant(new TenantId(id.tenant().value())); if ( ! tenant.isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist"); if (get(id).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application already exists"); if (get(dashToUnderscore(id)).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists"); if (tenant.get().isAthensTenant()) { if ( ! token.isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': No NToken provided"); ZmsClient zmsClient = zmsClientFactory.createZmsClientWithAuthorizedServiceToken(token.get()); try { zmsClient.deleteApplication(tenant.get().getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); } catch (ZmsException ignored) { } zmsClient.addApplication(tenant.get().getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); } LockedApplication application = new LockedApplication(new Application(id), lock); store(application); log.info("Created " + application); return application; } }
if ( ! (id.instance().value().equals("default") || id.instance().value().matches("\\d+")))
public Application createApplication(ApplicationId id, Optional<NToken> token) { if ( ! (id.instance().isDefault() || id.instance().value().matches("\\d+"))) throw new UnsupportedOperationException("Only the instance names 'default' and names which are just the PR number are supported at the moment"); try (Lock lock = lock(id)) { if (asList(id.tenant()).stream().noneMatch(application -> application.id().application().equals(id.application()))) com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value()); Optional<Tenant> tenant = controller.tenants().tenant(new TenantId(id.tenant().value())); if ( ! tenant.isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist"); if (get(id).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application already exists"); if (get(dashToUnderscore(id)).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists"); if (id.instance().isDefault() && tenant.get().isAthensTenant()) { if ( ! token.isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': No NToken provided"); ZmsClient zmsClient = zmsClientFactory.createZmsClientWithAuthorizedServiceToken(token.get()); zmsClient.addApplication(tenant.get().getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); } LockedApplication application = new LockedApplication(new Application(id), lock); store(application); log.info("Created " + application); return application; } }
class ApplicationController { private static final Logger log = Logger.getLogger(ApplicationController.class.getName()); /** The controller owning this */ private final Controller controller; /** For permanent storage */ private final ControllerDb db; /** For working memory storage and sharing between controllers */ private final CuratorDb curator; private final ArtifactRepository artifactRepository; private final RotationRepository rotationRepository; private final AthenzClientFactory zmsClientFactory; private final NameService nameService; private final ConfigServerClient configserverClient; private final RoutingGenerator routingGenerator; private final Clock clock; private final DeploymentTrigger deploymentTrigger; ApplicationController(Controller controller, ControllerDb db, CuratorDb curator, AthenzClientFactory zmsClientFactory, RotationsConfig rotationsConfig, NameService nameService, ConfigServerClient configserverClient, ArtifactRepository artifactRepository, RoutingGenerator routingGenerator, Clock clock) { this.controller = controller; this.db = db; this.curator = curator; this.zmsClientFactory = zmsClientFactory; this.nameService = nameService; this.configserverClient = configserverClient; this.routingGenerator = routingGenerator; this.clock = clock; this.artifactRepository = artifactRepository; this.rotationRepository = new RotationRepository(rotationsConfig, this, curator); this.deploymentTrigger = new DeploymentTrigger(controller, curator, clock); for (Application application : db.listApplications()) { lockIfPresent(application.id(), this::store); } } /** Returns the application with the given id, or null if it is not present */ public Optional<Application> get(ApplicationId id) { return db.getApplication(id); } /** * Returns the application with the given id * * @throws IllegalArgumentException if it does not exist */ public Application require(ApplicationId id) { return get(id).orElseThrow(() -> new IllegalArgumentException(id + " not found")); } /** Returns a snapshot of all applications */ public List<Application> asList() { return db.listApplications(); } /** Returns all applications of a tenant */ public List<Application> asList(TenantName tenant) { return db.listApplications(new TenantId(tenant.value())); } /** * Set the rotations marked as 'global' either 'in' or 'out of' service. * * @return The canonical endpoint altered if any * @throws IOException if rotation status cannot be updated */ public List<String> setGlobalRotationStatus(DeploymentId deploymentId, EndpointStatus status) throws IOException { List<String> rotations = new ArrayList<>(); Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId); if (endpoint.isPresent()) { configserverClient.setGlobalRotationStatus(deploymentId, endpoint.get(), status); rotations.add(endpoint.get()); } return rotations; } /** * Get the endpoint status for the global endpoint of this application * * @return Map between the endpoint and the rotation status * @throws IOException if global rotation status cannot be determined */ public Map<String, EndpointStatus> getGlobalRotationStatus(DeploymentId deploymentId) throws IOException { Map<String, EndpointStatus> result = new HashMap<>(); Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId); if (endpoint.isPresent()) { EndpointStatus status = configserverClient.getGlobalRotationStatus(deploymentId, endpoint.get()); result.put(endpoint.get(), status); } return result; } /** * Global rotations (plural as we can have aliases) map to exactly one service endpoint. * This method finds that one service endpoint and strips the URI part that * the routingGenerator is wrapping around the endpoint. * * @param deploymentId The deployment to retrieve global service endpoint for * @return Empty if no global endpoint exist, otherwise the service endpoint ([clustername.]app.tenant.region.env) */ Optional<String> getCanonicalGlobalEndpoint(DeploymentId deploymentId) throws IOException { Map<String, RoutingEndpoint> hostToGlobalEndpoint = new HashMap<>(); Map<String, String> hostToCanonicalEndpoint = new HashMap<>(); for (RoutingEndpoint endpoint : routingGenerator.endpoints(deploymentId)) { try { URI uri = new URI(endpoint.getEndpoint()); String serviceEndpoint = uri.getHost(); if (serviceEndpoint == null) { throw new IOException("Unexpected endpoints returned from the Routing Generator"); } String canonicalEndpoint = serviceEndpoint.replaceAll(".vespa.yahooapis.com", ""); String hostname = endpoint.getHostname(); if (hostname != null) { if (endpoint.isGlobal()) { hostToGlobalEndpoint.put(hostname, endpoint); } else { hostToCanonicalEndpoint.put(hostname, canonicalEndpoint); } if (hostToGlobalEndpoint.containsKey(hostname) && hostToCanonicalEndpoint.containsKey(hostname)) { return Optional.of(hostToCanonicalEndpoint.get(hostname)); } } } catch (URISyntaxException use) { throw new IOException(use); } } return Optional.empty(); } /** * Creates a new application for an existing tenant. * * @throws IllegalArgumentException if the application already exists */ /** Deploys an application. If the application does not exist it is created. */ public ActivateResult deployApplication(ApplicationId applicationId, ZoneId zone, Optional<ApplicationPackage> applicationPackageFromDeployer, DeployOptions options, Optional<NToken> token) { try (Lock lock = lock(applicationId)) { LockedApplication application = get(applicationId) .map(app -> new LockedApplication(app, lock)) .orElseGet(() -> new LockedApplication(createApplication(applicationId, token), lock)); Version version; if (options.deployCurrentVersion) { version = application.versionIn(zone, controller); } else if (canDeployDirectlyTo(zone, options)) { version = options.vespaVersion.map(Version::new).orElse(controller.systemVersion()); } else if (! application.change().isPresent() && ! zone.environment().isManuallyDeployed()) { return unexpectedDeployment(applicationId, zone, applicationPackageFromDeployer); } else { version = application.deployVersionIn(zone, controller); } ApplicationVersion applicationVersion; ApplicationPackage applicationPackage; Optional<DeploymentJobs.JobType> job = DeploymentJobs.JobType.from(controller.system(), zone); if (canDownloadReportedApplicationVersion(application) && !canDeployDirectlyTo(zone, options)) { if (!job.isPresent()) { throw new IllegalArgumentException("Cannot determine job for zone " + zone); } applicationVersion = application.deployApplicationVersion(job.get(), controller, options.deployCurrentVersion) .orElseThrow(() -> new IllegalArgumentException("Cannot determine application version for " + applicationId)); if (canDownloadArtifact(applicationVersion)) { applicationPackage = new ApplicationPackage( artifactRepository.getApplicationPackage(applicationId, applicationVersion.id()) ); } else { applicationPackage = applicationPackageFromDeployer.orElseThrow( () -> new IllegalArgumentException("Application package with version " + applicationVersion.id() + " cannot be downloaded, and " + "no package was given by deployer")); } } else { applicationPackage = applicationPackageFromDeployer.orElseThrow( () -> new IllegalArgumentException("Application package must be given as new application " + "version is not known for " + applicationId) ); applicationVersion = toApplicationPackageRevision(applicationPackage, options.screwdriverBuildJob); } validate(applicationPackage.deploymentSpec()); if (!options.deployCurrentVersion && !canDownloadReportedApplicationVersion(application)) { if (application.change().application().isPresent()) { application = application.withChange(application.change().with(applicationVersion)); } if (!canDeployDirectlyTo(zone, options) && job.isPresent()) { JobStatus.JobRun triggering = getOrCreateTriggering(application, version, job.get()); application = application.withJobTriggering(job.get(), application.change(), triggering.at(), version, applicationVersion, triggering.reason()); } } if (!options.deployCurrentVersion) { application = application.with(applicationPackage.deploymentSpec()); application = application.with(applicationPackage.validationOverrides()); application = deleteRemovedDeployments(application); application = deleteUnreferencedDeploymentJobs(application); store(application); } if (!canDeployDirectlyTo(zone, options)) { if (!application.deploymentJobs().isDeployableTo(zone.environment(), application.change())) { throw new IllegalArgumentException("Rejecting deployment of " + application + " to " + zone + " as " + application.change() + " is not tested"); } Deployment existingDeployment = application.deployments().get(zone); if (zone.environment().isProduction() && existingDeployment != null && existingDeployment.version().isAfter(version)) { throw new IllegalArgumentException("Rejecting deployment of " + application + " to " + zone + " as the requested version " + version + " is older than" + " the current version " + existingDeployment.version()); } } application = withRotation(application, zone); Set<String> rotationNames = new HashSet<>(); Set<String> cnames = new HashSet<>(); application.rotation().ifPresent(applicationRotation -> { rotationNames.add(applicationRotation.id().asString()); cnames.add(applicationRotation.dnsName()); cnames.add(applicationRotation.secureDnsName()); }); options = withVersion(version, options); ConfigServerClient.PreparedApplication preparedApplication = configserverClient.prepare(new DeploymentId(applicationId, zone), options, cnames, rotationNames, applicationPackage.zippedContent()); preparedApplication.activate(); application = application.withNewDeployment(zone, applicationVersion, version, clock.instant()); store(application); return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse(), applicationPackage.zippedContent().length); } } /** Makes sure the application has a global rotation, if eligible. */ private LockedApplication withRotation(LockedApplication application, ZoneId zone) { if (zone.environment() == Environment.prod && application.deploymentSpec().globalServiceId().isPresent()) { try (RotationLock rotationLock = rotationRepository.lock()) { Rotation rotation = rotationRepository.getRotation(application, rotationLock); application = application.with(rotation.id()); store(application); registerRotationInDns(rotation, application.rotation().get().dnsName()); registerRotationInDns(rotation, application.rotation().get().secureDnsName()); } } return application; } private ActivateResult unexpectedDeployment(ApplicationId applicationId, ZoneId zone, Optional<ApplicationPackage> applicationPackage) { Log logEntry = new Log(); logEntry.level = "WARNING"; logEntry.time = clock.instant().toEpochMilli(); logEntry.message = "Ignoring deployment of " + require(applicationId) + " to " + zone + " as a deployment is not currently expected"; PrepareResponse prepareResponse = new PrepareResponse(); prepareResponse.log = Collections.singletonList(logEntry); prepareResponse.configChangeActions = new ConfigChangeActions(Collections.emptyList(), Collections.emptyList()); return new ActivateResult(new RevisionId(applicationPackage.map(ApplicationPackage::hash) .orElse("0")), prepareResponse, applicationPackage.map(a -> a.zippedContent().length).orElse(0)); } private LockedApplication deleteRemovedDeployments(LockedApplication application) { List<Deployment> deploymentsToRemove = application.productionDeployments().values().stream() .filter(deployment -> ! application.deploymentSpec().includes(deployment.zone().environment(), Optional.of(deployment.zone().region()))) .collect(Collectors.toList()); if (deploymentsToRemove.isEmpty()) return application; if ( ! application.validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant())) throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application + " is deployed in " + deploymentsToRemove.stream() .map(deployment -> deployment.zone().region().value()) .collect(Collectors.joining(", ")) + ", but does not include " + (deploymentsToRemove.size() > 1 ? "these zones" : "this zone") + " in deployment.xml"); LockedApplication applicationWithRemoval = application; for (Deployment deployment : deploymentsToRemove) applicationWithRemoval = deactivate(applicationWithRemoval, deployment.zone()); return applicationWithRemoval; } private LockedApplication deleteUnreferencedDeploymentJobs(LockedApplication application) { for (DeploymentJobs.JobType job : application.deploymentJobs().jobStatus().keySet()) { Optional<ZoneId> zone = job.zone(controller.system()); if ( ! job.isProduction() || (zone.isPresent() && application.deploymentSpec().includes(zone.get().environment(), zone.map(ZoneId::region)))) continue; application = application.withoutDeploymentJob(job); } return application; } /** * Returns the existing triggering of the given type from this application, * or an incomplete one created in this method if none is present * This is needed (only) in the case where some external entity triggers a job. */ private JobStatus.JobRun getOrCreateTriggering(Application application, Version version, DeploymentJobs.JobType jobType) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return incompleteTriggeringEvent(version); if ( ! status.lastTriggered().isPresent()) return incompleteTriggeringEvent(version); return status.lastTriggered().get(); } private JobStatus.JobRun incompleteTriggeringEvent(Version version) { return new JobStatus.JobRun(-1, version, ApplicationVersion.unknown, false, "", clock.instant()); } private DeployOptions withVersion(Version version, DeployOptions options) { return new DeployOptions(options.screwdriverBuildJob, Optional.of(version), options.ignoreValidationErrors, options.deployCurrentVersion); } private ApplicationVersion toApplicationPackageRevision(ApplicationPackage applicationPackage, Optional<ScrewdriverBuildJob> buildJob) { if ( ! buildJob.isPresent()) return ApplicationVersion.from(applicationPackage.hash()); GitRevision gitRevision = buildJob.get().gitRevision; if (gitRevision.repository == null || gitRevision.branch == null || gitRevision.commit == null) return ApplicationVersion.from(applicationPackage.hash()); return ApplicationVersion.from(applicationPackage.hash(), new SourceRevision(gitRevision.repository.id(), gitRevision.branch.id(), gitRevision.commit.id())); } /** Register a DNS name for rotation */ private void registerRotationInDns(Rotation rotation, String dnsName) { try { Optional<Record> record = nameService.findRecord(Record.Type.CNAME, RecordName.from(dnsName)); RecordData rotationName = RecordData.fqdn(rotation.name()); if (record.isPresent()) { if ( ! record.get().data().equals(rotationName)) { nameService.updateRecord(record.get().id(), rotationName); log.info("Updated mapping for record ID " + record.get().id().asString() + ": '" + dnsName + "' -> '" + rotation.name() + "'"); } } else { RecordId id = nameService.createCname(RecordName.from(dnsName), rotationName); log.info("Registered mapping with record ID " + id.asString() + ": '" + dnsName + "' -> '" + rotation.name() + "'"); } } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to register CNAME", e); } } /** Returns the endpoints of the deployment, or empty if obtaining them failed */ public Optional<InstanceEndpoints> getDeploymentEndpoints(DeploymentId deploymentId) { try { List<RoutingEndpoint> endpoints = routingGenerator.endpoints(deploymentId); List<URI> endPointUrls = new ArrayList<>(); for (RoutingEndpoint endpoint : endpoints) { try { endPointUrls.add(new URI(endpoint.getEndpoint())); } catch (URISyntaxException e) { throw new RuntimeException("Routing generator returned illegal url's", e); } } return Optional.of(new InstanceEndpoints(endPointUrls)); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to get endpoint information for " + deploymentId + ": " + Exceptions.toMessageString(e)); return Optional.empty(); } } /** * Deletes the the given application. All known instances of the applications will be deleted, * including PR instances. * * @throws IllegalArgumentException if the application has deployments or the caller is not authorized * @throws NotExistsException if no instances of the application exist */ public void deleteApplication(ApplicationId applicationId, Optional<NToken> token) { List<ApplicationId> instances = controller.applications().asList(applicationId.tenant()) .stream() .map(Application::id) .filter(id -> id.application().equals(applicationId.application()) && id.tenant().equals(applicationId.tenant())) .collect(Collectors.toList()); if (instances.isEmpty()) { throw new NotExistsException("Could not delete application '" + applicationId + "': Application not found"); } instances.forEach(id -> lockOrThrow(id, application -> { if ( ! application.deployments().isEmpty()) throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments"); Tenant tenant = controller.tenants().tenant(new TenantId(id.tenant().value())).get(); if (tenant.isAthensTenant() && ! token.isPresent()) throw new IllegalArgumentException("Could not delete '" + application + "': No NToken provided"); if (id.instance().isDefault() && tenant.isAthensTenant()) { zmsClientFactory.createZmsClientWithAuthorizedServiceToken(token.get()) .deleteApplication(tenant.getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); } db.deleteApplication(id); log.info("Deleted " + application); })); } /** * Replace any previous version of this application by this instance * * @param application a locked application to store */ public void store(LockedApplication application) { db.store(application); } /** * Acquire a locked application to modify and store, if there is an application with the given id. * * @param applicationId ID of the application to lock and get. * @param action Function which acts on the locked application. */ public void lockIfPresent(ApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { get(applicationId).map(application -> new LockedApplication(application, lock)).ifPresent(action); } } /** * Acquire a locked application to modify and store, or throw an exception if no application has the given id. * * @param applicationId ID of the application to lock and require. * @param action Function which acts on the locked application. * @throws IllegalArgumentException when application does not exist. */ public void lockOrThrow(ApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { action.accept(new LockedApplication(require(applicationId), lock)); } } public void notifyJobCompletion(JobReport report) { if ( ! get(report.applicationId()).isPresent()) { log.log(Level.WARNING, "Ignoring completion of job of project '" + report.projectId() + "': Unknown application '" + report.applicationId() + "'"); return; } deploymentTrigger.triggerFromCompletion(report); } /** * Tells config server to schedule a restart of all nodes in this deployment * * @param hostname If non-empty, restart will only be scheduled for this host */ public void restart(DeploymentId deploymentId, Optional<Hostname> hostname) { try { configserverClient.restart(deploymentId, hostname); } catch (NoInstanceException e) { throw new IllegalArgumentException("Could not restart " + deploymentId + ": No such deployment"); } } /** Deactivate application in the given zone */ public void deactivate(Application application, ZoneId zone) { deactivate(application, zone, Optional.empty(), false); } /** Deactivate a known deployment of the given application */ public void deactivate(Application application, Deployment deployment, boolean requireThatDeploymentHasExpired) { deactivate(application, deployment.zone(), Optional.of(deployment), requireThatDeploymentHasExpired); } private void deactivate(Application application, ZoneId zone, Optional<Deployment> deployment, boolean requireThatDeploymentHasExpired) { if (requireThatDeploymentHasExpired && deployment.isPresent() && ! DeploymentExpirer.hasExpired(controller.zoneRegistry(), deployment.get(), clock.instant())) return; lockOrThrow(application.id(), lockedApplication -> store(deactivate(lockedApplication, zone))); } /** * Deactivates a locked application without storing it * * @return the application with the deployment in the given zone removed */ private LockedApplication deactivate(LockedApplication application, ZoneId zone) { try { configserverClient.deactivate(new DeploymentId(application.id(), zone)); } catch (NoInstanceException ignored) { } return application.withoutDeploymentIn(zone); } public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; } private ApplicationId dashToUnderscore(ApplicationId id) { return ApplicationId.from(id.tenant().value(), id.application().value().replaceAll("-", "_"), id.instance().value()); } public ConfigServerClient configserverClient() { return configserverClient; } /** * Returns a lock which provides exclusive rights to changing this application. * Any operation which stores an application need to first acquire this lock, then read, modify * and store the application, and finally release (close) the lock. */ Lock lock(ApplicationId application) { return curator.lock(application, Duration.ofMinutes(10)); } /** Returns whether a direct deployment to given zone is allowed */ private static boolean canDeployDirectlyTo(ZoneId zone, DeployOptions options) { return ! options.screwdriverBuildJob.isPresent() || options.screwdriverBuildJob.get().screwdriverId == null || zone.environment().isManuallyDeployed(); } /** Returns whether artifact for given version number is available in artifact repository */ private static boolean canDownloadArtifact(ApplicationVersion applicationVersion) { return applicationVersion.buildNumber().isPresent() && applicationVersion.source().isPresent(); } /** Returns whether component has reported a version number that is availabe in artifact repository */ private static boolean canDownloadReportedApplicationVersion(Application application) { return Optional.ofNullable(application.deploymentJobs().jobStatus().get(DeploymentJobs.JobType.component)) .flatMap(JobStatus::lastSuccess) .map(JobStatus.JobRun::applicationVersion) .filter(ApplicationController::canDownloadArtifact) .isPresent(); } /** Verify that each of the production zones listed in the deployment spec exist in this system. */ private void validate(DeploymentSpec deploymentSpec) { deploymentSpec.zones().stream() .filter(zone -> zone.environment() == Environment.prod) .forEach(zone -> { if ( ! controller.zoneRegistry().hasZone(ZoneId.from(zone.environment(), zone.region().orElse(null)))) throw new IllegalArgumentException("Zone " + zone + " in deployment spec was not found in this system!"); }); } public RotationRepository rotationRepository() { return rotationRepository; } }
class ApplicationController { private static final Logger log = Logger.getLogger(ApplicationController.class.getName()); /** The controller owning this */ private final Controller controller; /** For permanent storage */ private final ControllerDb db; /** For working memory storage and sharing between controllers */ private final CuratorDb curator; private final ArtifactRepository artifactRepository; private final RotationRepository rotationRepository; private final AthenzClientFactory zmsClientFactory; private final NameService nameService; private final ConfigServerClient configserverClient; private final RoutingGenerator routingGenerator; private final Clock clock; private final DeploymentTrigger deploymentTrigger; ApplicationController(Controller controller, ControllerDb db, CuratorDb curator, AthenzClientFactory zmsClientFactory, RotationsConfig rotationsConfig, NameService nameService, ConfigServerClient configserverClient, ArtifactRepository artifactRepository, RoutingGenerator routingGenerator, Clock clock) { this.controller = controller; this.db = db; this.curator = curator; this.zmsClientFactory = zmsClientFactory; this.nameService = nameService; this.configserverClient = configserverClient; this.routingGenerator = routingGenerator; this.clock = clock; this.artifactRepository = artifactRepository; this.rotationRepository = new RotationRepository(rotationsConfig, this, curator); this.deploymentTrigger = new DeploymentTrigger(controller, curator, clock); for (Application application : db.listApplications()) { lockIfPresent(application.id(), this::store); } } /** Returns the application with the given id, or null if it is not present */ public Optional<Application> get(ApplicationId id) { return db.getApplication(id); } /** * Returns the application with the given id * * @throws IllegalArgumentException if it does not exist */ public Application require(ApplicationId id) { return get(id).orElseThrow(() -> new IllegalArgumentException(id + " not found")); } /** Returns a snapshot of all applications */ public List<Application> asList() { return db.listApplications(); } /** Returns all applications of a tenant */ public List<Application> asList(TenantName tenant) { return db.listApplications(new TenantId(tenant.value())); } /** * Set the rotations marked as 'global' either 'in' or 'out of' service. * * @return The canonical endpoint altered if any * @throws IOException if rotation status cannot be updated */ public List<String> setGlobalRotationStatus(DeploymentId deploymentId, EndpointStatus status) throws IOException { List<String> rotations = new ArrayList<>(); Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId); if (endpoint.isPresent()) { configserverClient.setGlobalRotationStatus(deploymentId, endpoint.get(), status); rotations.add(endpoint.get()); } return rotations; } /** * Get the endpoint status for the global endpoint of this application * * @return Map between the endpoint and the rotation status * @throws IOException if global rotation status cannot be determined */ public Map<String, EndpointStatus> getGlobalRotationStatus(DeploymentId deploymentId) throws IOException { Map<String, EndpointStatus> result = new HashMap<>(); Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId); if (endpoint.isPresent()) { EndpointStatus status = configserverClient.getGlobalRotationStatus(deploymentId, endpoint.get()); result.put(endpoint.get(), status); } return result; } /** * Global rotations (plural as we can have aliases) map to exactly one service endpoint. * This method finds that one service endpoint and strips the URI part that * the routingGenerator is wrapping around the endpoint. * * @param deploymentId The deployment to retrieve global service endpoint for * @return Empty if no global endpoint exist, otherwise the service endpoint ([clustername.]app.tenant.region.env) */ Optional<String> getCanonicalGlobalEndpoint(DeploymentId deploymentId) throws IOException { Map<String, RoutingEndpoint> hostToGlobalEndpoint = new HashMap<>(); Map<String, String> hostToCanonicalEndpoint = new HashMap<>(); for (RoutingEndpoint endpoint : routingGenerator.endpoints(deploymentId)) { try { URI uri = new URI(endpoint.getEndpoint()); String serviceEndpoint = uri.getHost(); if (serviceEndpoint == null) { throw new IOException("Unexpected endpoints returned from the Routing Generator"); } String canonicalEndpoint = serviceEndpoint.replaceAll(".vespa.yahooapis.com", ""); String hostname = endpoint.getHostname(); if (hostname != null) { if (endpoint.isGlobal()) { hostToGlobalEndpoint.put(hostname, endpoint); } else { hostToCanonicalEndpoint.put(hostname, canonicalEndpoint); } if (hostToGlobalEndpoint.containsKey(hostname) && hostToCanonicalEndpoint.containsKey(hostname)) { return Optional.of(hostToCanonicalEndpoint.get(hostname)); } } } catch (URISyntaxException use) { throw new IOException(use); } } return Optional.empty(); } /** * Creates a new application for an existing tenant. * * @throws IllegalArgumentException if the application already exists */ /** Deploys an application. If the application does not exist it is created. */ public ActivateResult deployApplication(ApplicationId applicationId, ZoneId zone, Optional<ApplicationPackage> applicationPackageFromDeployer, DeployOptions options) { try (Lock lock = lock(applicationId)) { LockedApplication application = get(applicationId) .map(app -> new LockedApplication(app, lock)) .orElseGet(() -> new LockedApplication(createApplication(applicationId, Optional.empty()), lock)); Version version; if (options.deployCurrentVersion) { version = application.versionIn(zone, controller); } else if (canDeployDirectlyTo(zone, options)) { version = options.vespaVersion.map(Version::new).orElse(controller.systemVersion()); } else if (! application.change().isPresent() && ! zone.environment().isManuallyDeployed()) { return unexpectedDeployment(applicationId, zone, applicationPackageFromDeployer); } else { version = application.deployVersionIn(zone, controller); } ApplicationVersion applicationVersion; ApplicationPackage applicationPackage; Optional<DeploymentJobs.JobType> job = DeploymentJobs.JobType.from(controller.system(), zone); if (canDownloadReportedApplicationVersion(application) && !canDeployDirectlyTo(zone, options)) { if (!job.isPresent()) { throw new IllegalArgumentException("Cannot determine job for zone " + zone); } applicationVersion = application.deployApplicationVersion(job.get(), controller, options.deployCurrentVersion) .orElseThrow(() -> new IllegalArgumentException("Cannot determine application version for " + applicationId)); if (canDownloadArtifact(applicationVersion)) { applicationPackage = new ApplicationPackage( artifactRepository.getApplicationPackage(applicationId, applicationVersion.id()) ); } else { applicationPackage = applicationPackageFromDeployer.orElseThrow( () -> new IllegalArgumentException("Application package with version " + applicationVersion.id() + " cannot be downloaded, and " + "no package was given by deployer")); } } else { applicationPackage = applicationPackageFromDeployer.orElseThrow( () -> new IllegalArgumentException("Application package must be given as new application " + "version is not known for " + applicationId) ); applicationVersion = toApplicationPackageRevision(applicationPackage, options.screwdriverBuildJob); } validate(applicationPackage.deploymentSpec()); if (!options.deployCurrentVersion && !canDownloadReportedApplicationVersion(application)) { if (application.change().application().isPresent()) { application = application.withChange(application.change().with(applicationVersion)); } if (!canDeployDirectlyTo(zone, options) && job.isPresent()) { JobStatus.JobRun triggering = getOrCreateTriggering(application, version, job.get()); application = application.withJobTriggering(job.get(), application.change(), triggering.at(), version, applicationVersion, triggering.reason()); } } if (!options.deployCurrentVersion) { application = application.with(applicationPackage.deploymentSpec()); application = application.with(applicationPackage.validationOverrides()); application = deleteRemovedDeployments(application); application = deleteUnreferencedDeploymentJobs(application); store(application); } if (!canDeployDirectlyTo(zone, options)) { if (!application.deploymentJobs().isDeployableTo(zone.environment(), application.change())) { throw new IllegalArgumentException("Rejecting deployment of " + application + " to " + zone + " as " + application.change() + " is not tested"); } Deployment existingDeployment = application.deployments().get(zone); if (zone.environment().isProduction() && existingDeployment != null && existingDeployment.version().isAfter(version)) { throw new IllegalArgumentException("Rejecting deployment of " + application + " to " + zone + " as the requested version " + version + " is older than" + " the current version " + existingDeployment.version()); } } application = withRotation(application, zone); Set<String> rotationNames = new HashSet<>(); Set<String> cnames = new HashSet<>(); application.rotation().ifPresent(applicationRotation -> { rotationNames.add(applicationRotation.id().asString()); cnames.add(applicationRotation.dnsName()); cnames.add(applicationRotation.secureDnsName()); }); options = withVersion(version, options); ConfigServerClient.PreparedApplication preparedApplication = configserverClient.prepare(new DeploymentId(applicationId, zone), options, cnames, rotationNames, applicationPackage.zippedContent()); preparedApplication.activate(); application = application.withNewDeployment(zone, applicationVersion, version, clock.instant()); store(application); return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse(), applicationPackage.zippedContent().length); } } /** Makes sure the application has a global rotation, if eligible. */ private LockedApplication withRotation(LockedApplication application, ZoneId zone) { if (zone.environment() == Environment.prod && application.deploymentSpec().globalServiceId().isPresent()) { try (RotationLock rotationLock = rotationRepository.lock()) { Rotation rotation = rotationRepository.getRotation(application, rotationLock); application = application.with(rotation.id()); store(application); registerRotationInDns(rotation, application.rotation().get().dnsName()); registerRotationInDns(rotation, application.rotation().get().secureDnsName()); } } return application; } private ActivateResult unexpectedDeployment(ApplicationId applicationId, ZoneId zone, Optional<ApplicationPackage> applicationPackage) { Log logEntry = new Log(); logEntry.level = "WARNING"; logEntry.time = clock.instant().toEpochMilli(); logEntry.message = "Ignoring deployment of " + require(applicationId) + " to " + zone + " as a deployment is not currently expected"; PrepareResponse prepareResponse = new PrepareResponse(); prepareResponse.log = Collections.singletonList(logEntry); prepareResponse.configChangeActions = new ConfigChangeActions(Collections.emptyList(), Collections.emptyList()); return new ActivateResult(new RevisionId(applicationPackage.map(ApplicationPackage::hash) .orElse("0")), prepareResponse, applicationPackage.map(a -> a.zippedContent().length).orElse(0)); } private LockedApplication deleteRemovedDeployments(LockedApplication application) { List<Deployment> deploymentsToRemove = application.productionDeployments().values().stream() .filter(deployment -> ! application.deploymentSpec().includes(deployment.zone().environment(), Optional.of(deployment.zone().region()))) .collect(Collectors.toList()); if (deploymentsToRemove.isEmpty()) return application; if ( ! application.validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant())) throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application + " is deployed in " + deploymentsToRemove.stream() .map(deployment -> deployment.zone().region().value()) .collect(Collectors.joining(", ")) + ", but does not include " + (deploymentsToRemove.size() > 1 ? "these zones" : "this zone") + " in deployment.xml"); LockedApplication applicationWithRemoval = application; for (Deployment deployment : deploymentsToRemove) applicationWithRemoval = deactivate(applicationWithRemoval, deployment.zone()); return applicationWithRemoval; } private LockedApplication deleteUnreferencedDeploymentJobs(LockedApplication application) { for (DeploymentJobs.JobType job : application.deploymentJobs().jobStatus().keySet()) { Optional<ZoneId> zone = job.zone(controller.system()); if ( ! job.isProduction() || (zone.isPresent() && application.deploymentSpec().includes(zone.get().environment(), zone.map(ZoneId::region)))) continue; application = application.withoutDeploymentJob(job); } return application; } /** * Returns the existing triggering of the given type from this application, * or an incomplete one created in this method if none is present * This is needed (only) in the case where some external entity triggers a job. */ private JobStatus.JobRun getOrCreateTriggering(Application application, Version version, DeploymentJobs.JobType jobType) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return incompleteTriggeringEvent(version); if ( ! status.lastTriggered().isPresent()) return incompleteTriggeringEvent(version); return status.lastTriggered().get(); } private JobStatus.JobRun incompleteTriggeringEvent(Version version) { return new JobStatus.JobRun(-1, version, ApplicationVersion.unknown, false, "", clock.instant()); } private DeployOptions withVersion(Version version, DeployOptions options) { return new DeployOptions(options.screwdriverBuildJob, Optional.of(version), options.ignoreValidationErrors, options.deployCurrentVersion); } private ApplicationVersion toApplicationPackageRevision(ApplicationPackage applicationPackage, Optional<ScrewdriverBuildJob> buildJob) { if ( ! buildJob.isPresent()) return ApplicationVersion.from(applicationPackage.hash()); GitRevision gitRevision = buildJob.get().gitRevision; if (gitRevision.repository == null || gitRevision.branch == null || gitRevision.commit == null) return ApplicationVersion.from(applicationPackage.hash()); return ApplicationVersion.from(applicationPackage.hash(), new SourceRevision(gitRevision.repository.id(), gitRevision.branch.id(), gitRevision.commit.id())); } /** Register a DNS name for rotation */ private void registerRotationInDns(Rotation rotation, String dnsName) { try { Optional<Record> record = nameService.findRecord(Record.Type.CNAME, RecordName.from(dnsName)); RecordData rotationName = RecordData.fqdn(rotation.name()); if (record.isPresent()) { if ( ! record.get().data().equals(rotationName)) { nameService.updateRecord(record.get().id(), rotationName); log.info("Updated mapping for record ID " + record.get().id().asString() + ": '" + dnsName + "' -> '" + rotation.name() + "'"); } } else { RecordId id = nameService.createCname(RecordName.from(dnsName), rotationName); log.info("Registered mapping with record ID " + id.asString() + ": '" + dnsName + "' -> '" + rotation.name() + "'"); } } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to register CNAME", e); } } /** Returns the endpoints of the deployment, or empty if obtaining them failed */ public Optional<InstanceEndpoints> getDeploymentEndpoints(DeploymentId deploymentId) { try { List<RoutingEndpoint> endpoints = routingGenerator.endpoints(deploymentId); List<URI> endPointUrls = new ArrayList<>(); for (RoutingEndpoint endpoint : endpoints) { try { endPointUrls.add(new URI(endpoint.getEndpoint())); } catch (URISyntaxException e) { throw new RuntimeException("Routing generator returned illegal url's", e); } } return Optional.of(new InstanceEndpoints(endPointUrls)); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to get endpoint information for " + deploymentId + ": " + Exceptions.toMessageString(e)); return Optional.empty(); } } /** * Deletes the the given application. All known instances of the applications will be deleted, * including PR instances. * * @throws IllegalArgumentException if the application has deployments or the caller is not authorized * @throws NotExistsException if no instances of the application exist */ public void deleteApplication(ApplicationId applicationId, Optional<NToken> token) { List<ApplicationId> instances = controller.applications().asList(applicationId.tenant()) .stream() .map(Application::id) .filter(id -> id.application().equals(applicationId.application()) && id.tenant().equals(applicationId.tenant())) .collect(Collectors.toList()); if (instances.isEmpty()) { throw new NotExistsException("Could not delete application '" + applicationId + "': Application not found"); } instances.forEach(id -> lockOrThrow(id, application -> { if ( ! application.deployments().isEmpty()) throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments"); Tenant tenant = controller.tenants().tenant(new TenantId(id.tenant().value())).get(); if (tenant.isAthensTenant() && ! token.isPresent()) throw new IllegalArgumentException("Could not delete '" + application + "': No NToken provided"); if (id.instance().isDefault() && tenant.isAthensTenant()) { zmsClientFactory.createZmsClientWithAuthorizedServiceToken(token.get()) .deleteApplication(tenant.getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); } db.deleteApplication(id); log.info("Deleted " + application); })); } /** * Replace any previous version of this application by this instance * * @param application a locked application to store */ public void store(LockedApplication application) { db.store(application); } /** * Acquire a locked application to modify and store, if there is an application with the given id. * * @param applicationId ID of the application to lock and get. * @param action Function which acts on the locked application. */ public void lockIfPresent(ApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { get(applicationId).map(application -> new LockedApplication(application, lock)).ifPresent(action); } } /** * Acquire a locked application to modify and store, or throw an exception if no application has the given id. * * @param applicationId ID of the application to lock and require. * @param action Function which acts on the locked application. * @throws IllegalArgumentException when application does not exist. */ public void lockOrThrow(ApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { action.accept(new LockedApplication(require(applicationId), lock)); } } public void notifyJobCompletion(JobReport report) { if ( ! get(report.applicationId()).isPresent()) { log.log(Level.WARNING, "Ignoring completion of job of project '" + report.projectId() + "': Unknown application '" + report.applicationId() + "'"); return; } deploymentTrigger.triggerFromCompletion(report); } /** * Tells config server to schedule a restart of all nodes in this deployment * * @param hostname If non-empty, restart will only be scheduled for this host */ public void restart(DeploymentId deploymentId, Optional<Hostname> hostname) { try { configserverClient.restart(deploymentId, hostname); } catch (NoInstanceException e) { throw new IllegalArgumentException("Could not restart " + deploymentId + ": No such deployment"); } } /** Deactivate application in the given zone */ public void deactivate(Application application, ZoneId zone) { deactivate(application, zone, Optional.empty(), false); } /** Deactivate a known deployment of the given application */ public void deactivate(Application application, Deployment deployment, boolean requireThatDeploymentHasExpired) { deactivate(application, deployment.zone(), Optional.of(deployment), requireThatDeploymentHasExpired); } private void deactivate(Application application, ZoneId zone, Optional<Deployment> deployment, boolean requireThatDeploymentHasExpired) { if (requireThatDeploymentHasExpired && deployment.isPresent() && ! DeploymentExpirer.hasExpired(controller.zoneRegistry(), deployment.get(), clock.instant())) return; lockOrThrow(application.id(), lockedApplication -> store(deactivate(lockedApplication, zone))); } /** * Deactivates a locked application without storing it * * @return the application with the deployment in the given zone removed */ private LockedApplication deactivate(LockedApplication application, ZoneId zone) { try { configserverClient.deactivate(new DeploymentId(application.id(), zone)); } catch (NoInstanceException ignored) { } return application.withoutDeploymentIn(zone); } public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; } private ApplicationId dashToUnderscore(ApplicationId id) { return ApplicationId.from(id.tenant().value(), id.application().value().replaceAll("-", "_"), id.instance().value()); } public ConfigServerClient configserverClient() { return configserverClient; } /** * Returns a lock which provides exclusive rights to changing this application. * Any operation which stores an application need to first acquire this lock, then read, modify * and store the application, and finally release (close) the lock. */ Lock lock(ApplicationId application) { return curator.lock(application, Duration.ofMinutes(10)); } /** Returns whether a direct deployment to given zone is allowed */ private static boolean canDeployDirectlyTo(ZoneId zone, DeployOptions options) { return ! options.screwdriverBuildJob.isPresent() || options.screwdriverBuildJob.get().screwdriverId == null || zone.environment().isManuallyDeployed(); } /** Returns whether artifact for given version number is available in artifact repository */ private static boolean canDownloadArtifact(ApplicationVersion applicationVersion) { return applicationVersion.buildNumber().isPresent() && applicationVersion.source().isPresent(); } /** Returns whether component has reported a version number that is availabe in artifact repository */ private static boolean canDownloadReportedApplicationVersion(Application application) { return Optional.ofNullable(application.deploymentJobs().jobStatus().get(DeploymentJobs.JobType.component)) .flatMap(JobStatus::lastSuccess) .map(JobStatus.JobRun::applicationVersion) .filter(ApplicationController::canDownloadArtifact) .isPresent(); } /** Verify that each of the production zones listed in the deployment spec exist in this system. */ private void validate(DeploymentSpec deploymentSpec) { deploymentSpec.zones().stream() .filter(zone -> zone.environment() == Environment.prod) .forEach(zone -> { if ( ! controller.zoneRegistry().hasZone(ZoneId.from(zone.environment(), zone.region().orElse(null)))) throw new IllegalArgumentException("Zone " + zone + " in deployment spec was not found in this system!"); }); } public RotationRepository rotationRepository() { return rotationRepository; } }
Aha. Well, when it is used that way, it is always "default". (From the create-handler.) The problem is that we don't have a createApplication separate from createInstance, which is what this createApplication code _really_ is, and which is the reason we need the validation there. This will all get fixed _some day_, let's hope :)
public Application createApplication(ApplicationId id, Optional<NToken> token) { if ( ! (id.instance().value().equals("default") || id.instance().value().matches("\\d+"))) throw new UnsupportedOperationException("Only the instance names 'default' and names which are just the PR number are supported at the moment"); try (Lock lock = lock(id)) { com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value()); Optional<Tenant> tenant = controller.tenants().tenant(new TenantId(id.tenant().value())); if ( ! tenant.isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist"); if (get(id).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application already exists"); if (get(dashToUnderscore(id)).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists"); if (tenant.get().isAthensTenant()) { if ( ! token.isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': No NToken provided"); ZmsClient zmsClient = zmsClientFactory.createZmsClientWithAuthorizedServiceToken(token.get()); try { zmsClient.deleteApplication(tenant.get().getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); } catch (ZmsException ignored) { } zmsClient.addApplication(tenant.get().getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); } LockedApplication application = new LockedApplication(new Application(id), lock); store(application); log.info("Created " + application); return application; } }
if ( ! (id.instance().value().equals("default") || id.instance().value().matches("\\d+")))
public Application createApplication(ApplicationId id, Optional<NToken> token) { if ( ! (id.instance().isDefault() || id.instance().value().matches("\\d+"))) throw new UnsupportedOperationException("Only the instance names 'default' and names which are just the PR number are supported at the moment"); try (Lock lock = lock(id)) { if (asList(id.tenant()).stream().noneMatch(application -> application.id().application().equals(id.application()))) com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value()); Optional<Tenant> tenant = controller.tenants().tenant(new TenantId(id.tenant().value())); if ( ! tenant.isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist"); if (get(id).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application already exists"); if (get(dashToUnderscore(id)).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists"); if (id.instance().isDefault() && tenant.get().isAthensTenant()) { if ( ! token.isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': No NToken provided"); ZmsClient zmsClient = zmsClientFactory.createZmsClientWithAuthorizedServiceToken(token.get()); zmsClient.addApplication(tenant.get().getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); } LockedApplication application = new LockedApplication(new Application(id), lock); store(application); log.info("Created " + application); return application; } }
class ApplicationController { private static final Logger log = Logger.getLogger(ApplicationController.class.getName()); /** The controller owning this */ private final Controller controller; /** For permanent storage */ private final ControllerDb db; /** For working memory storage and sharing between controllers */ private final CuratorDb curator; private final ArtifactRepository artifactRepository; private final RotationRepository rotationRepository; private final AthenzClientFactory zmsClientFactory; private final NameService nameService; private final ConfigServerClient configserverClient; private final RoutingGenerator routingGenerator; private final Clock clock; private final DeploymentTrigger deploymentTrigger; ApplicationController(Controller controller, ControllerDb db, CuratorDb curator, AthenzClientFactory zmsClientFactory, RotationsConfig rotationsConfig, NameService nameService, ConfigServerClient configserverClient, ArtifactRepository artifactRepository, RoutingGenerator routingGenerator, Clock clock) { this.controller = controller; this.db = db; this.curator = curator; this.zmsClientFactory = zmsClientFactory; this.nameService = nameService; this.configserverClient = configserverClient; this.routingGenerator = routingGenerator; this.clock = clock; this.artifactRepository = artifactRepository; this.rotationRepository = new RotationRepository(rotationsConfig, this, curator); this.deploymentTrigger = new DeploymentTrigger(controller, curator, clock); for (Application application : db.listApplications()) { lockIfPresent(application.id(), this::store); } } /** Returns the application with the given id, or null if it is not present */ public Optional<Application> get(ApplicationId id) { return db.getApplication(id); } /** * Returns the application with the given id * * @throws IllegalArgumentException if it does not exist */ public Application require(ApplicationId id) { return get(id).orElseThrow(() -> new IllegalArgumentException(id + " not found")); } /** Returns a snapshot of all applications */ public List<Application> asList() { return db.listApplications(); } /** Returns all applications of a tenant */ public List<Application> asList(TenantName tenant) { return db.listApplications(new TenantId(tenant.value())); } /** * Set the rotations marked as 'global' either 'in' or 'out of' service. * * @return The canonical endpoint altered if any * @throws IOException if rotation status cannot be updated */ public List<String> setGlobalRotationStatus(DeploymentId deploymentId, EndpointStatus status) throws IOException { List<String> rotations = new ArrayList<>(); Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId); if (endpoint.isPresent()) { configserverClient.setGlobalRotationStatus(deploymentId, endpoint.get(), status); rotations.add(endpoint.get()); } return rotations; } /** * Get the endpoint status for the global endpoint of this application * * @return Map between the endpoint and the rotation status * @throws IOException if global rotation status cannot be determined */ public Map<String, EndpointStatus> getGlobalRotationStatus(DeploymentId deploymentId) throws IOException { Map<String, EndpointStatus> result = new HashMap<>(); Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId); if (endpoint.isPresent()) { EndpointStatus status = configserverClient.getGlobalRotationStatus(deploymentId, endpoint.get()); result.put(endpoint.get(), status); } return result; } /** * Global rotations (plural as we can have aliases) map to exactly one service endpoint. * This method finds that one service endpoint and strips the URI part that * the routingGenerator is wrapping around the endpoint. * * @param deploymentId The deployment to retrieve global service endpoint for * @return Empty if no global endpoint exist, otherwise the service endpoint ([clustername.]app.tenant.region.env) */ Optional<String> getCanonicalGlobalEndpoint(DeploymentId deploymentId) throws IOException { Map<String, RoutingEndpoint> hostToGlobalEndpoint = new HashMap<>(); Map<String, String> hostToCanonicalEndpoint = new HashMap<>(); for (RoutingEndpoint endpoint : routingGenerator.endpoints(deploymentId)) { try { URI uri = new URI(endpoint.getEndpoint()); String serviceEndpoint = uri.getHost(); if (serviceEndpoint == null) { throw new IOException("Unexpected endpoints returned from the Routing Generator"); } String canonicalEndpoint = serviceEndpoint.replaceAll(".vespa.yahooapis.com", ""); String hostname = endpoint.getHostname(); if (hostname != null) { if (endpoint.isGlobal()) { hostToGlobalEndpoint.put(hostname, endpoint); } else { hostToCanonicalEndpoint.put(hostname, canonicalEndpoint); } if (hostToGlobalEndpoint.containsKey(hostname) && hostToCanonicalEndpoint.containsKey(hostname)) { return Optional.of(hostToCanonicalEndpoint.get(hostname)); } } } catch (URISyntaxException use) { throw new IOException(use); } } return Optional.empty(); } /** * Creates a new application for an existing tenant. * * @throws IllegalArgumentException if the application already exists */ /** Deploys an application. If the application does not exist it is created. */ public ActivateResult deployApplication(ApplicationId applicationId, ZoneId zone, Optional<ApplicationPackage> applicationPackageFromDeployer, DeployOptions options, Optional<NToken> token) { try (Lock lock = lock(applicationId)) { LockedApplication application = get(applicationId) .map(app -> new LockedApplication(app, lock)) .orElseGet(() -> new LockedApplication(createApplication(applicationId, token), lock)); Version version; if (options.deployCurrentVersion) { version = application.versionIn(zone, controller); } else if (canDeployDirectlyTo(zone, options)) { version = options.vespaVersion.map(Version::new).orElse(controller.systemVersion()); } else if (! application.change().isPresent() && ! zone.environment().isManuallyDeployed()) { return unexpectedDeployment(applicationId, zone, applicationPackageFromDeployer); } else { version = application.deployVersionIn(zone, controller); } ApplicationVersion applicationVersion; ApplicationPackage applicationPackage; Optional<DeploymentJobs.JobType> job = DeploymentJobs.JobType.from(controller.system(), zone); if (canDownloadReportedApplicationVersion(application) && !canDeployDirectlyTo(zone, options)) { if (!job.isPresent()) { throw new IllegalArgumentException("Cannot determine job for zone " + zone); } applicationVersion = application.deployApplicationVersion(job.get(), controller, options.deployCurrentVersion) .orElseThrow(() -> new IllegalArgumentException("Cannot determine application version for " + applicationId)); if (canDownloadArtifact(applicationVersion)) { applicationPackage = new ApplicationPackage( artifactRepository.getApplicationPackage(applicationId, applicationVersion.id()) ); } else { applicationPackage = applicationPackageFromDeployer.orElseThrow( () -> new IllegalArgumentException("Application package with version " + applicationVersion.id() + " cannot be downloaded, and " + "no package was given by deployer")); } } else { applicationPackage = applicationPackageFromDeployer.orElseThrow( () -> new IllegalArgumentException("Application package must be given as new application " + "version is not known for " + applicationId) ); applicationVersion = toApplicationPackageRevision(applicationPackage, options.screwdriverBuildJob); } validate(applicationPackage.deploymentSpec()); if (!options.deployCurrentVersion && !canDownloadReportedApplicationVersion(application)) { if (application.change().application().isPresent()) { application = application.withChange(application.change().with(applicationVersion)); } if (!canDeployDirectlyTo(zone, options) && job.isPresent()) { JobStatus.JobRun triggering = getOrCreateTriggering(application, version, job.get()); application = application.withJobTriggering(job.get(), application.change(), triggering.at(), version, applicationVersion, triggering.reason()); } } if (!options.deployCurrentVersion) { application = application.with(applicationPackage.deploymentSpec()); application = application.with(applicationPackage.validationOverrides()); application = deleteRemovedDeployments(application); application = deleteUnreferencedDeploymentJobs(application); store(application); } if (!canDeployDirectlyTo(zone, options)) { if (!application.deploymentJobs().isDeployableTo(zone.environment(), application.change())) { throw new IllegalArgumentException("Rejecting deployment of " + application + " to " + zone + " as " + application.change() + " is not tested"); } Deployment existingDeployment = application.deployments().get(zone); if (zone.environment().isProduction() && existingDeployment != null && existingDeployment.version().isAfter(version)) { throw new IllegalArgumentException("Rejecting deployment of " + application + " to " + zone + " as the requested version " + version + " is older than" + " the current version " + existingDeployment.version()); } } application = withRotation(application, zone); Set<String> rotationNames = new HashSet<>(); Set<String> cnames = new HashSet<>(); application.rotation().ifPresent(applicationRotation -> { rotationNames.add(applicationRotation.id().asString()); cnames.add(applicationRotation.dnsName()); cnames.add(applicationRotation.secureDnsName()); }); options = withVersion(version, options); ConfigServerClient.PreparedApplication preparedApplication = configserverClient.prepare(new DeploymentId(applicationId, zone), options, cnames, rotationNames, applicationPackage.zippedContent()); preparedApplication.activate(); application = application.withNewDeployment(zone, applicationVersion, version, clock.instant()); store(application); return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse(), applicationPackage.zippedContent().length); } } /** Makes sure the application has a global rotation, if eligible. */ private LockedApplication withRotation(LockedApplication application, ZoneId zone) { if (zone.environment() == Environment.prod && application.deploymentSpec().globalServiceId().isPresent()) { try (RotationLock rotationLock = rotationRepository.lock()) { Rotation rotation = rotationRepository.getRotation(application, rotationLock); application = application.with(rotation.id()); store(application); registerRotationInDns(rotation, application.rotation().get().dnsName()); registerRotationInDns(rotation, application.rotation().get().secureDnsName()); } } return application; } private ActivateResult unexpectedDeployment(ApplicationId applicationId, ZoneId zone, Optional<ApplicationPackage> applicationPackage) { Log logEntry = new Log(); logEntry.level = "WARNING"; logEntry.time = clock.instant().toEpochMilli(); logEntry.message = "Ignoring deployment of " + require(applicationId) + " to " + zone + " as a deployment is not currently expected"; PrepareResponse prepareResponse = new PrepareResponse(); prepareResponse.log = Collections.singletonList(logEntry); prepareResponse.configChangeActions = new ConfigChangeActions(Collections.emptyList(), Collections.emptyList()); return new ActivateResult(new RevisionId(applicationPackage.map(ApplicationPackage::hash) .orElse("0")), prepareResponse, applicationPackage.map(a -> a.zippedContent().length).orElse(0)); } private LockedApplication deleteRemovedDeployments(LockedApplication application) { List<Deployment> deploymentsToRemove = application.productionDeployments().values().stream() .filter(deployment -> ! application.deploymentSpec().includes(deployment.zone().environment(), Optional.of(deployment.zone().region()))) .collect(Collectors.toList()); if (deploymentsToRemove.isEmpty()) return application; if ( ! application.validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant())) throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application + " is deployed in " + deploymentsToRemove.stream() .map(deployment -> deployment.zone().region().value()) .collect(Collectors.joining(", ")) + ", but does not include " + (deploymentsToRemove.size() > 1 ? "these zones" : "this zone") + " in deployment.xml"); LockedApplication applicationWithRemoval = application; for (Deployment deployment : deploymentsToRemove) applicationWithRemoval = deactivate(applicationWithRemoval, deployment.zone()); return applicationWithRemoval; } private LockedApplication deleteUnreferencedDeploymentJobs(LockedApplication application) { for (DeploymentJobs.JobType job : application.deploymentJobs().jobStatus().keySet()) { Optional<ZoneId> zone = job.zone(controller.system()); if ( ! job.isProduction() || (zone.isPresent() && application.deploymentSpec().includes(zone.get().environment(), zone.map(ZoneId::region)))) continue; application = application.withoutDeploymentJob(job); } return application; } /** * Returns the existing triggering of the given type from this application, * or an incomplete one created in this method if none is present * This is needed (only) in the case where some external entity triggers a job. */ private JobStatus.JobRun getOrCreateTriggering(Application application, Version version, DeploymentJobs.JobType jobType) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return incompleteTriggeringEvent(version); if ( ! status.lastTriggered().isPresent()) return incompleteTriggeringEvent(version); return status.lastTriggered().get(); } private JobStatus.JobRun incompleteTriggeringEvent(Version version) { return new JobStatus.JobRun(-1, version, ApplicationVersion.unknown, false, "", clock.instant()); } private DeployOptions withVersion(Version version, DeployOptions options) { return new DeployOptions(options.screwdriverBuildJob, Optional.of(version), options.ignoreValidationErrors, options.deployCurrentVersion); } private ApplicationVersion toApplicationPackageRevision(ApplicationPackage applicationPackage, Optional<ScrewdriverBuildJob> buildJob) { if ( ! buildJob.isPresent()) return ApplicationVersion.from(applicationPackage.hash()); GitRevision gitRevision = buildJob.get().gitRevision; if (gitRevision.repository == null || gitRevision.branch == null || gitRevision.commit == null) return ApplicationVersion.from(applicationPackage.hash()); return ApplicationVersion.from(applicationPackage.hash(), new SourceRevision(gitRevision.repository.id(), gitRevision.branch.id(), gitRevision.commit.id())); } /** Register a DNS name for rotation */ private void registerRotationInDns(Rotation rotation, String dnsName) { try { Optional<Record> record = nameService.findRecord(Record.Type.CNAME, RecordName.from(dnsName)); RecordData rotationName = RecordData.fqdn(rotation.name()); if (record.isPresent()) { if ( ! record.get().data().equals(rotationName)) { nameService.updateRecord(record.get().id(), rotationName); log.info("Updated mapping for record ID " + record.get().id().asString() + ": '" + dnsName + "' -> '" + rotation.name() + "'"); } } else { RecordId id = nameService.createCname(RecordName.from(dnsName), rotationName); log.info("Registered mapping with record ID " + id.asString() + ": '" + dnsName + "' -> '" + rotation.name() + "'"); } } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to register CNAME", e); } } /** Returns the endpoints of the deployment, or empty if obtaining them failed */ public Optional<InstanceEndpoints> getDeploymentEndpoints(DeploymentId deploymentId) { try { List<RoutingEndpoint> endpoints = routingGenerator.endpoints(deploymentId); List<URI> endPointUrls = new ArrayList<>(); for (RoutingEndpoint endpoint : endpoints) { try { endPointUrls.add(new URI(endpoint.getEndpoint())); } catch (URISyntaxException e) { throw new RuntimeException("Routing generator returned illegal url's", e); } } return Optional.of(new InstanceEndpoints(endPointUrls)); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to get endpoint information for " + deploymentId + ": " + Exceptions.toMessageString(e)); return Optional.empty(); } } /** * Deletes the the given application. All known instances of the applications will be deleted, * including PR instances. * * @throws IllegalArgumentException if the application has deployments or the caller is not authorized * @throws NotExistsException if no instances of the application exist */ public void deleteApplication(ApplicationId applicationId, Optional<NToken> token) { List<ApplicationId> instances = controller.applications().asList(applicationId.tenant()) .stream() .map(Application::id) .filter(id -> id.application().equals(applicationId.application()) && id.tenant().equals(applicationId.tenant())) .collect(Collectors.toList()); if (instances.isEmpty()) { throw new NotExistsException("Could not delete application '" + applicationId + "': Application not found"); } instances.forEach(id -> lockOrThrow(id, application -> { if ( ! application.deployments().isEmpty()) throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments"); Tenant tenant = controller.tenants().tenant(new TenantId(id.tenant().value())).get(); if (tenant.isAthensTenant() && ! token.isPresent()) throw new IllegalArgumentException("Could not delete '" + application + "': No NToken provided"); if (id.instance().isDefault() && tenant.isAthensTenant()) { zmsClientFactory.createZmsClientWithAuthorizedServiceToken(token.get()) .deleteApplication(tenant.getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); } db.deleteApplication(id); log.info("Deleted " + application); })); } /** * Replace any previous version of this application by this instance * * @param application a locked application to store */ public void store(LockedApplication application) { db.store(application); } /** * Acquire a locked application to modify and store, if there is an application with the given id. * * @param applicationId ID of the application to lock and get. * @param action Function which acts on the locked application. */ public void lockIfPresent(ApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { get(applicationId).map(application -> new LockedApplication(application, lock)).ifPresent(action); } } /** * Acquire a locked application to modify and store, or throw an exception if no application has the given id. * * @param applicationId ID of the application to lock and require. * @param action Function which acts on the locked application. * @throws IllegalArgumentException when application does not exist. */ public void lockOrThrow(ApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { action.accept(new LockedApplication(require(applicationId), lock)); } } public void notifyJobCompletion(JobReport report) { if ( ! get(report.applicationId()).isPresent()) { log.log(Level.WARNING, "Ignoring completion of job of project '" + report.projectId() + "': Unknown application '" + report.applicationId() + "'"); return; } deploymentTrigger.triggerFromCompletion(report); } /** * Tells config server to schedule a restart of all nodes in this deployment * * @param hostname If non-empty, restart will only be scheduled for this host */ public void restart(DeploymentId deploymentId, Optional<Hostname> hostname) { try { configserverClient.restart(deploymentId, hostname); } catch (NoInstanceException e) { throw new IllegalArgumentException("Could not restart " + deploymentId + ": No such deployment"); } } /** Deactivate application in the given zone */ public void deactivate(Application application, ZoneId zone) { deactivate(application, zone, Optional.empty(), false); } /** Deactivate a known deployment of the given application */ public void deactivate(Application application, Deployment deployment, boolean requireThatDeploymentHasExpired) { deactivate(application, deployment.zone(), Optional.of(deployment), requireThatDeploymentHasExpired); } private void deactivate(Application application, ZoneId zone, Optional<Deployment> deployment, boolean requireThatDeploymentHasExpired) { if (requireThatDeploymentHasExpired && deployment.isPresent() && ! DeploymentExpirer.hasExpired(controller.zoneRegistry(), deployment.get(), clock.instant())) return; lockOrThrow(application.id(), lockedApplication -> store(deactivate(lockedApplication, zone))); } /** * Deactivates a locked application without storing it * * @return the application with the deployment in the given zone removed */ private LockedApplication deactivate(LockedApplication application, ZoneId zone) { try { configserverClient.deactivate(new DeploymentId(application.id(), zone)); } catch (NoInstanceException ignored) { } return application.withoutDeploymentIn(zone); } public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; } private ApplicationId dashToUnderscore(ApplicationId id) { return ApplicationId.from(id.tenant().value(), id.application().value().replaceAll("-", "_"), id.instance().value()); } public ConfigServerClient configserverClient() { return configserverClient; } /** * Returns a lock which provides exclusive rights to changing this application. * Any operation which stores an application need to first acquire this lock, then read, modify * and store the application, and finally release (close) the lock. */ Lock lock(ApplicationId application) { return curator.lock(application, Duration.ofMinutes(10)); } /** Returns whether a direct deployment to given zone is allowed */ private static boolean canDeployDirectlyTo(ZoneId zone, DeployOptions options) { return ! options.screwdriverBuildJob.isPresent() || options.screwdriverBuildJob.get().screwdriverId == null || zone.environment().isManuallyDeployed(); } /** Returns whether artifact for given version number is available in artifact repository */ private static boolean canDownloadArtifact(ApplicationVersion applicationVersion) { return applicationVersion.buildNumber().isPresent() && applicationVersion.source().isPresent(); } /** Returns whether component has reported a version number that is availabe in artifact repository */ private static boolean canDownloadReportedApplicationVersion(Application application) { return Optional.ofNullable(application.deploymentJobs().jobStatus().get(DeploymentJobs.JobType.component)) .flatMap(JobStatus::lastSuccess) .map(JobStatus.JobRun::applicationVersion) .filter(ApplicationController::canDownloadArtifact) .isPresent(); } /** Verify that each of the production zones listed in the deployment spec exist in this system. */ private void validate(DeploymentSpec deploymentSpec) { deploymentSpec.zones().stream() .filter(zone -> zone.environment() == Environment.prod) .forEach(zone -> { if ( ! controller.zoneRegistry().hasZone(ZoneId.from(zone.environment(), zone.region().orElse(null)))) throw new IllegalArgumentException("Zone " + zone + " in deployment spec was not found in this system!"); }); } public RotationRepository rotationRepository() { return rotationRepository; } }
class ApplicationController { private static final Logger log = Logger.getLogger(ApplicationController.class.getName()); /** The controller owning this */ private final Controller controller; /** For permanent storage */ private final ControllerDb db; /** For working memory storage and sharing between controllers */ private final CuratorDb curator; private final ArtifactRepository artifactRepository; private final RotationRepository rotationRepository; private final AthenzClientFactory zmsClientFactory; private final NameService nameService; private final ConfigServerClient configserverClient; private final RoutingGenerator routingGenerator; private final Clock clock; private final DeploymentTrigger deploymentTrigger; ApplicationController(Controller controller, ControllerDb db, CuratorDb curator, AthenzClientFactory zmsClientFactory, RotationsConfig rotationsConfig, NameService nameService, ConfigServerClient configserverClient, ArtifactRepository artifactRepository, RoutingGenerator routingGenerator, Clock clock) { this.controller = controller; this.db = db; this.curator = curator; this.zmsClientFactory = zmsClientFactory; this.nameService = nameService; this.configserverClient = configserverClient; this.routingGenerator = routingGenerator; this.clock = clock; this.artifactRepository = artifactRepository; this.rotationRepository = new RotationRepository(rotationsConfig, this, curator); this.deploymentTrigger = new DeploymentTrigger(controller, curator, clock); for (Application application : db.listApplications()) { lockIfPresent(application.id(), this::store); } } /** Returns the application with the given id, or null if it is not present */ public Optional<Application> get(ApplicationId id) { return db.getApplication(id); } /** * Returns the application with the given id * * @throws IllegalArgumentException if it does not exist */ public Application require(ApplicationId id) { return get(id).orElseThrow(() -> new IllegalArgumentException(id + " not found")); } /** Returns a snapshot of all applications */ public List<Application> asList() { return db.listApplications(); } /** Returns all applications of a tenant */ public List<Application> asList(TenantName tenant) { return db.listApplications(new TenantId(tenant.value())); } /** * Set the rotations marked as 'global' either 'in' or 'out of' service. * * @return The canonical endpoint altered if any * @throws IOException if rotation status cannot be updated */ public List<String> setGlobalRotationStatus(DeploymentId deploymentId, EndpointStatus status) throws IOException { List<String> rotations = new ArrayList<>(); Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId); if (endpoint.isPresent()) { configserverClient.setGlobalRotationStatus(deploymentId, endpoint.get(), status); rotations.add(endpoint.get()); } return rotations; } /** * Get the endpoint status for the global endpoint of this application * * @return Map between the endpoint and the rotation status * @throws IOException if global rotation status cannot be determined */ public Map<String, EndpointStatus> getGlobalRotationStatus(DeploymentId deploymentId) throws IOException { Map<String, EndpointStatus> result = new HashMap<>(); Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId); if (endpoint.isPresent()) { EndpointStatus status = configserverClient.getGlobalRotationStatus(deploymentId, endpoint.get()); result.put(endpoint.get(), status); } return result; } /** * Global rotations (plural as we can have aliases) map to exactly one service endpoint. * This method finds that one service endpoint and strips the URI part that * the routingGenerator is wrapping around the endpoint. * * @param deploymentId The deployment to retrieve global service endpoint for * @return Empty if no global endpoint exist, otherwise the service endpoint ([clustername.]app.tenant.region.env) */ Optional<String> getCanonicalGlobalEndpoint(DeploymentId deploymentId) throws IOException { Map<String, RoutingEndpoint> hostToGlobalEndpoint = new HashMap<>(); Map<String, String> hostToCanonicalEndpoint = new HashMap<>(); for (RoutingEndpoint endpoint : routingGenerator.endpoints(deploymentId)) { try { URI uri = new URI(endpoint.getEndpoint()); String serviceEndpoint = uri.getHost(); if (serviceEndpoint == null) { throw new IOException("Unexpected endpoints returned from the Routing Generator"); } String canonicalEndpoint = serviceEndpoint.replaceAll(".vespa.yahooapis.com", ""); String hostname = endpoint.getHostname(); if (hostname != null) { if (endpoint.isGlobal()) { hostToGlobalEndpoint.put(hostname, endpoint); } else { hostToCanonicalEndpoint.put(hostname, canonicalEndpoint); } if (hostToGlobalEndpoint.containsKey(hostname) && hostToCanonicalEndpoint.containsKey(hostname)) { return Optional.of(hostToCanonicalEndpoint.get(hostname)); } } } catch (URISyntaxException use) { throw new IOException(use); } } return Optional.empty(); } /** * Creates a new application for an existing tenant. * * @throws IllegalArgumentException if the application already exists */ /** Deploys an application. If the application does not exist it is created. */ public ActivateResult deployApplication(ApplicationId applicationId, ZoneId zone, Optional<ApplicationPackage> applicationPackageFromDeployer, DeployOptions options) { try (Lock lock = lock(applicationId)) { LockedApplication application = get(applicationId) .map(app -> new LockedApplication(app, lock)) .orElseGet(() -> new LockedApplication(createApplication(applicationId, Optional.empty()), lock)); Version version; if (options.deployCurrentVersion) { version = application.versionIn(zone, controller); } else if (canDeployDirectlyTo(zone, options)) { version = options.vespaVersion.map(Version::new).orElse(controller.systemVersion()); } else if (! application.change().isPresent() && ! zone.environment().isManuallyDeployed()) { return unexpectedDeployment(applicationId, zone, applicationPackageFromDeployer); } else { version = application.deployVersionIn(zone, controller); } ApplicationVersion applicationVersion; ApplicationPackage applicationPackage; Optional<DeploymentJobs.JobType> job = DeploymentJobs.JobType.from(controller.system(), zone); if (canDownloadReportedApplicationVersion(application) && !canDeployDirectlyTo(zone, options)) { if (!job.isPresent()) { throw new IllegalArgumentException("Cannot determine job for zone " + zone); } applicationVersion = application.deployApplicationVersion(job.get(), controller, options.deployCurrentVersion) .orElseThrow(() -> new IllegalArgumentException("Cannot determine application version for " + applicationId)); if (canDownloadArtifact(applicationVersion)) { applicationPackage = new ApplicationPackage( artifactRepository.getApplicationPackage(applicationId, applicationVersion.id()) ); } else { applicationPackage = applicationPackageFromDeployer.orElseThrow( () -> new IllegalArgumentException("Application package with version " + applicationVersion.id() + " cannot be downloaded, and " + "no package was given by deployer")); } } else { applicationPackage = applicationPackageFromDeployer.orElseThrow( () -> new IllegalArgumentException("Application package must be given as new application " + "version is not known for " + applicationId) ); applicationVersion = toApplicationPackageRevision(applicationPackage, options.screwdriverBuildJob); } validate(applicationPackage.deploymentSpec()); if (!options.deployCurrentVersion && !canDownloadReportedApplicationVersion(application)) { if (application.change().application().isPresent()) { application = application.withChange(application.change().with(applicationVersion)); } if (!canDeployDirectlyTo(zone, options) && job.isPresent()) { JobStatus.JobRun triggering = getOrCreateTriggering(application, version, job.get()); application = application.withJobTriggering(job.get(), application.change(), triggering.at(), version, applicationVersion, triggering.reason()); } } if (!options.deployCurrentVersion) { application = application.with(applicationPackage.deploymentSpec()); application = application.with(applicationPackage.validationOverrides()); application = deleteRemovedDeployments(application); application = deleteUnreferencedDeploymentJobs(application); store(application); } if (!canDeployDirectlyTo(zone, options)) { if (!application.deploymentJobs().isDeployableTo(zone.environment(), application.change())) { throw new IllegalArgumentException("Rejecting deployment of " + application + " to " + zone + " as " + application.change() + " is not tested"); } Deployment existingDeployment = application.deployments().get(zone); if (zone.environment().isProduction() && existingDeployment != null && existingDeployment.version().isAfter(version)) { throw new IllegalArgumentException("Rejecting deployment of " + application + " to " + zone + " as the requested version " + version + " is older than" + " the current version " + existingDeployment.version()); } } application = withRotation(application, zone); Set<String> rotationNames = new HashSet<>(); Set<String> cnames = new HashSet<>(); application.rotation().ifPresent(applicationRotation -> { rotationNames.add(applicationRotation.id().asString()); cnames.add(applicationRotation.dnsName()); cnames.add(applicationRotation.secureDnsName()); }); options = withVersion(version, options); ConfigServerClient.PreparedApplication preparedApplication = configserverClient.prepare(new DeploymentId(applicationId, zone), options, cnames, rotationNames, applicationPackage.zippedContent()); preparedApplication.activate(); application = application.withNewDeployment(zone, applicationVersion, version, clock.instant()); store(application); return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse(), applicationPackage.zippedContent().length); } } /** Makes sure the application has a global rotation, if eligible. */ private LockedApplication withRotation(LockedApplication application, ZoneId zone) { if (zone.environment() == Environment.prod && application.deploymentSpec().globalServiceId().isPresent()) { try (RotationLock rotationLock = rotationRepository.lock()) { Rotation rotation = rotationRepository.getRotation(application, rotationLock); application = application.with(rotation.id()); store(application); registerRotationInDns(rotation, application.rotation().get().dnsName()); registerRotationInDns(rotation, application.rotation().get().secureDnsName()); } } return application; } private ActivateResult unexpectedDeployment(ApplicationId applicationId, ZoneId zone, Optional<ApplicationPackage> applicationPackage) { Log logEntry = new Log(); logEntry.level = "WARNING"; logEntry.time = clock.instant().toEpochMilli(); logEntry.message = "Ignoring deployment of " + require(applicationId) + " to " + zone + " as a deployment is not currently expected"; PrepareResponse prepareResponse = new PrepareResponse(); prepareResponse.log = Collections.singletonList(logEntry); prepareResponse.configChangeActions = new ConfigChangeActions(Collections.emptyList(), Collections.emptyList()); return new ActivateResult(new RevisionId(applicationPackage.map(ApplicationPackage::hash) .orElse("0")), prepareResponse, applicationPackage.map(a -> a.zippedContent().length).orElse(0)); } private LockedApplication deleteRemovedDeployments(LockedApplication application) { List<Deployment> deploymentsToRemove = application.productionDeployments().values().stream() .filter(deployment -> ! application.deploymentSpec().includes(deployment.zone().environment(), Optional.of(deployment.zone().region()))) .collect(Collectors.toList()); if (deploymentsToRemove.isEmpty()) return application; if ( ! application.validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant())) throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application + " is deployed in " + deploymentsToRemove.stream() .map(deployment -> deployment.zone().region().value()) .collect(Collectors.joining(", ")) + ", but does not include " + (deploymentsToRemove.size() > 1 ? "these zones" : "this zone") + " in deployment.xml"); LockedApplication applicationWithRemoval = application; for (Deployment deployment : deploymentsToRemove) applicationWithRemoval = deactivate(applicationWithRemoval, deployment.zone()); return applicationWithRemoval; } private LockedApplication deleteUnreferencedDeploymentJobs(LockedApplication application) { for (DeploymentJobs.JobType job : application.deploymentJobs().jobStatus().keySet()) { Optional<ZoneId> zone = job.zone(controller.system()); if ( ! job.isProduction() || (zone.isPresent() && application.deploymentSpec().includes(zone.get().environment(), zone.map(ZoneId::region)))) continue; application = application.withoutDeploymentJob(job); } return application; } /** * Returns the existing triggering of the given type from this application, * or an incomplete one created in this method if none is present * This is needed (only) in the case where some external entity triggers a job. */ private JobStatus.JobRun getOrCreateTriggering(Application application, Version version, DeploymentJobs.JobType jobType) { JobStatus status = application.deploymentJobs().jobStatus().get(jobType); if (status == null) return incompleteTriggeringEvent(version); if ( ! status.lastTriggered().isPresent()) return incompleteTriggeringEvent(version); return status.lastTriggered().get(); } private JobStatus.JobRun incompleteTriggeringEvent(Version version) { return new JobStatus.JobRun(-1, version, ApplicationVersion.unknown, false, "", clock.instant()); } private DeployOptions withVersion(Version version, DeployOptions options) { return new DeployOptions(options.screwdriverBuildJob, Optional.of(version), options.ignoreValidationErrors, options.deployCurrentVersion); } private ApplicationVersion toApplicationPackageRevision(ApplicationPackage applicationPackage, Optional<ScrewdriverBuildJob> buildJob) { if ( ! buildJob.isPresent()) return ApplicationVersion.from(applicationPackage.hash()); GitRevision gitRevision = buildJob.get().gitRevision; if (gitRevision.repository == null || gitRevision.branch == null || gitRevision.commit == null) return ApplicationVersion.from(applicationPackage.hash()); return ApplicationVersion.from(applicationPackage.hash(), new SourceRevision(gitRevision.repository.id(), gitRevision.branch.id(), gitRevision.commit.id())); } /** Register a DNS name for rotation */ private void registerRotationInDns(Rotation rotation, String dnsName) { try { Optional<Record> record = nameService.findRecord(Record.Type.CNAME, RecordName.from(dnsName)); RecordData rotationName = RecordData.fqdn(rotation.name()); if (record.isPresent()) { if ( ! record.get().data().equals(rotationName)) { nameService.updateRecord(record.get().id(), rotationName); log.info("Updated mapping for record ID " + record.get().id().asString() + ": '" + dnsName + "' -> '" + rotation.name() + "'"); } } else { RecordId id = nameService.createCname(RecordName.from(dnsName), rotationName); log.info("Registered mapping with record ID " + id.asString() + ": '" + dnsName + "' -> '" + rotation.name() + "'"); } } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to register CNAME", e); } } /** Returns the endpoints of the deployment, or empty if obtaining them failed */ public Optional<InstanceEndpoints> getDeploymentEndpoints(DeploymentId deploymentId) { try { List<RoutingEndpoint> endpoints = routingGenerator.endpoints(deploymentId); List<URI> endPointUrls = new ArrayList<>(); for (RoutingEndpoint endpoint : endpoints) { try { endPointUrls.add(new URI(endpoint.getEndpoint())); } catch (URISyntaxException e) { throw new RuntimeException("Routing generator returned illegal url's", e); } } return Optional.of(new InstanceEndpoints(endPointUrls)); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to get endpoint information for " + deploymentId + ": " + Exceptions.toMessageString(e)); return Optional.empty(); } } /** * Deletes the the given application. All known instances of the applications will be deleted, * including PR instances. * * @throws IllegalArgumentException if the application has deployments or the caller is not authorized * @throws NotExistsException if no instances of the application exist */ public void deleteApplication(ApplicationId applicationId, Optional<NToken> token) { List<ApplicationId> instances = controller.applications().asList(applicationId.tenant()) .stream() .map(Application::id) .filter(id -> id.application().equals(applicationId.application()) && id.tenant().equals(applicationId.tenant())) .collect(Collectors.toList()); if (instances.isEmpty()) { throw new NotExistsException("Could not delete application '" + applicationId + "': Application not found"); } instances.forEach(id -> lockOrThrow(id, application -> { if ( ! application.deployments().isEmpty()) throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments"); Tenant tenant = controller.tenants().tenant(new TenantId(id.tenant().value())).get(); if (tenant.isAthensTenant() && ! token.isPresent()) throw new IllegalArgumentException("Could not delete '" + application + "': No NToken provided"); if (id.instance().isDefault() && tenant.isAthensTenant()) { zmsClientFactory.createZmsClientWithAuthorizedServiceToken(token.get()) .deleteApplication(tenant.getAthensDomain().get(), new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value())); } db.deleteApplication(id); log.info("Deleted " + application); })); } /** * Replace any previous version of this application by this instance * * @param application a locked application to store */ public void store(LockedApplication application) { db.store(application); } /** * Acquire a locked application to modify and store, if there is an application with the given id. * * @param applicationId ID of the application to lock and get. * @param action Function which acts on the locked application. */ public void lockIfPresent(ApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { get(applicationId).map(application -> new LockedApplication(application, lock)).ifPresent(action); } } /** * Acquire a locked application to modify and store, or throw an exception if no application has the given id. * * @param applicationId ID of the application to lock and require. * @param action Function which acts on the locked application. * @throws IllegalArgumentException when application does not exist. */ public void lockOrThrow(ApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { action.accept(new LockedApplication(require(applicationId), lock)); } } public void notifyJobCompletion(JobReport report) { if ( ! get(report.applicationId()).isPresent()) { log.log(Level.WARNING, "Ignoring completion of job of project '" + report.projectId() + "': Unknown application '" + report.applicationId() + "'"); return; } deploymentTrigger.triggerFromCompletion(report); } /** * Tells config server to schedule a restart of all nodes in this deployment * * @param hostname If non-empty, restart will only be scheduled for this host */ public void restart(DeploymentId deploymentId, Optional<Hostname> hostname) { try { configserverClient.restart(deploymentId, hostname); } catch (NoInstanceException e) { throw new IllegalArgumentException("Could not restart " + deploymentId + ": No such deployment"); } } /** Deactivate application in the given zone */ public void deactivate(Application application, ZoneId zone) { deactivate(application, zone, Optional.empty(), false); } /** Deactivate a known deployment of the given application */ public void deactivate(Application application, Deployment deployment, boolean requireThatDeploymentHasExpired) { deactivate(application, deployment.zone(), Optional.of(deployment), requireThatDeploymentHasExpired); } private void deactivate(Application application, ZoneId zone, Optional<Deployment> deployment, boolean requireThatDeploymentHasExpired) { if (requireThatDeploymentHasExpired && deployment.isPresent() && ! DeploymentExpirer.hasExpired(controller.zoneRegistry(), deployment.get(), clock.instant())) return; lockOrThrow(application.id(), lockedApplication -> store(deactivate(lockedApplication, zone))); } /** * Deactivates a locked application without storing it * * @return the application with the deployment in the given zone removed */ private LockedApplication deactivate(LockedApplication application, ZoneId zone) { try { configserverClient.deactivate(new DeploymentId(application.id(), zone)); } catch (NoInstanceException ignored) { } return application.withoutDeploymentIn(zone); } public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; } private ApplicationId dashToUnderscore(ApplicationId id) { return ApplicationId.from(id.tenant().value(), id.application().value().replaceAll("-", "_"), id.instance().value()); } public ConfigServerClient configserverClient() { return configserverClient; } /** * Returns a lock which provides exclusive rights to changing this application. * Any operation which stores an application need to first acquire this lock, then read, modify * and store the application, and finally release (close) the lock. */ Lock lock(ApplicationId application) { return curator.lock(application, Duration.ofMinutes(10)); } /** Returns whether a direct deployment to given zone is allowed */ private static boolean canDeployDirectlyTo(ZoneId zone, DeployOptions options) { return ! options.screwdriverBuildJob.isPresent() || options.screwdriverBuildJob.get().screwdriverId == null || zone.environment().isManuallyDeployed(); } /** Returns whether artifact for given version number is available in artifact repository */ private static boolean canDownloadArtifact(ApplicationVersion applicationVersion) { return applicationVersion.buildNumber().isPresent() && applicationVersion.source().isPresent(); } /** Returns whether component has reported a version number that is availabe in artifact repository */ private static boolean canDownloadReportedApplicationVersion(Application application) { return Optional.ofNullable(application.deploymentJobs().jobStatus().get(DeploymentJobs.JobType.component)) .flatMap(JobStatus::lastSuccess) .map(JobStatus.JobRun::applicationVersion) .filter(ApplicationController::canDownloadArtifact) .isPresent(); } /** Verify that each of the production zones listed in the deployment spec exist in this system. */ private void validate(DeploymentSpec deploymentSpec) { deploymentSpec.zones().stream() .filter(zone -> zone.environment() == Environment.prod) .forEach(zone -> { if ( ! controller.zoneRegistry().hasZone(ZoneId.from(zone.environment(), zone.region().orElse(null)))) throw new IllegalArgumentException("Zone " + zone + " in deployment spec was not found in this system!"); }); } public RotationRepository rotationRepository() { return rotationRepository; } }
I suggest just returning `Collections.emptyList` as this exception seems to always be ignored.
private List<Node> applyFieldRecursive(List<Node> childNodes, String name, Inspector value) { switch (name) { case HARDWARE_FAILURE_DESCRIPTION: return childNodes.stream() .map(child -> child.with(child.status().withHardwareFailureDescription(asOptionalString(value)))) .collect(Collectors.toList()); case WANT_TO_RETIRE: return childNodes.stream() .map(child -> child.with(child.status().withWantToRetire(asBoolean(value)))) .collect(Collectors.toList()); default : throw new IllegalArgumentException("Field " + name + " is not recursive"); } }
throw new IllegalArgumentException("Field " + name + " is not recursive");
private List<Node> applyFieldRecursive(List<Node> childNodes, String name, Inspector value) { switch (name) { case HARDWARE_FAILURE_DESCRIPTION: return childNodes.stream() .map(child -> child.with(child.status().withHardwareFailureDescription(asOptionalString(value)))) .collect(Collectors.toList()); case WANT_TO_RETIRE: return childNodes.stream() .map(child -> child.with(child.status().withWantToRetire(asBoolean(value)))) .collect(Collectors.toList()); default : throw new IllegalArgumentException("Field " + name + " is not recursive"); } }
class NodePatcher { private static final String HARDWARE_FAILURE_DESCRIPTION = "hardwareFailureDescription"; private static final String WANT_TO_RETIRE = "wantToRetire"; private final NodeFlavors nodeFlavors; private final Inspector inspector; private final NodeRepository nodeRepository; private Node node; private List<Node> children; private boolean childrenModified = false; public NodePatcher(NodeFlavors nodeFlavors, InputStream json, Node node, NodeRepository nodeRepository) { try { this.nodeFlavors = nodeFlavors; inspector = SlimeUtils.jsonToSlime(IOUtils.readBytes(json, 1000 * 1000)).get(); this.node = node; this.nodeRepository = nodeRepository; this.children = node.type() == NodeType.host ? nodeRepository.getChildNodes(node.hostname()) : Collections.emptyList(); } catch (IOException e) { throw new RuntimeException("Error reading request body", e); } } /** * Apply the json to the node and return all nodes affected by the patch. * More than 1 node may be affected if e.g. the node is a Docker host, which may have * children that must be updated in a consistent manner. */ public List<Node> apply() { inspector.traverse((String name, Inspector value) -> { try { node = applyField(name, value); } catch (IllegalArgumentException e) { throw new IllegalArgumentException("Could not set field '" + name + "'", e); } try { children = applyFieldRecursive(children, name, value); childrenModified = true; } catch (IllegalArgumentException e) { } } ); List<Node> nodes = childrenModified ? new ArrayList<>(children) : new ArrayList<>(); nodes.add(node); return nodes; } private Node applyField(String name, Inspector value) { switch (name) { case "convergedStateVersion" : return node; case "currentRebootGeneration" : return node.withCurrentRebootGeneration(asLong(value), nodeRepository.clock().instant()); case "currentRestartGeneration" : return patchCurrentRestartGeneration(asLong(value)); case "currentDockerImage" : Version versionFromImage = Optional.of(asString(value)) .filter(s -> !s.isEmpty()) .map(DockerImage::new) .map(DockerImage::tagAsVersion) .orElse(Version.emptyVersion); return node.with(node.status().withVespaVersion(versionFromImage)); case "currentVespaVersion" : return node.with(node.status().withVespaVersion(Version.fromString(asString(value)))); case "currentHostedVersion" : return node; case "failCount" : return node.with(node.status().setFailCount(asLong(value).intValue())); case "flavor" : return node.with(nodeFlavors.getFlavorOrThrow(asString(value))); case HARDWARE_FAILURE_DESCRIPTION: return node.with(node.status().withHardwareFailureDescription(asOptionalString(value))); case "parentHostname" : return node.withParentHostname(asString(value)); case "ipAddresses" : return node.withIpAddresses(asStringSet(value)); case "additionalIpAddresses" : return node.withAdditionalIpAddresses(asStringSet(value)); case WANT_TO_RETIRE : return node.with(node.status().withWantToRetire(asBoolean(value))); case "wantToDeprovision" : return node.with(node.status().withWantToDeprovision(asBoolean(value))); case "hardwareDivergence" : return node.with(node.status().withHardwareDivergence(removeQuotedNulls(asOptionalString(value)))); default : throw new IllegalArgumentException("Could not apply field '" + name + "' on a node: No such modifiable field"); } } private Set<String> asStringSet(Inspector field) { if ( ! field.type().equals(Type.ARRAY)) throw new IllegalArgumentException("Expected an ARRAY value, got a " + field.type()); TreeSet<String> strings = new TreeSet<>(); for (int i = 0; i < field.entries(); i++) { Inspector entry = field.entry(i); if ( ! entry.type().equals(Type.STRING)) throw new IllegalArgumentException("Expected a STRING value, got a " + entry.type()); strings.add(entry.asString()); } return strings; } private Node patchCurrentRestartGeneration(Long value) { Optional<Allocation> allocation = node.allocation(); if (allocation.isPresent()) return node.with(allocation.get().withRestart(allocation.get().restartGeneration().withCurrent(value))); else throw new IllegalArgumentException("Node is not allocated"); } private Long asLong(Inspector field) { if ( ! field.type().equals(Type.LONG)) throw new IllegalArgumentException("Expected a LONG value, got a " + field.type()); return field.asLong(); } private String asString(Inspector field) { if ( ! field.type().equals(Type.STRING)) throw new IllegalArgumentException("Expected a STRING value, got a " + field.type()); return field.asString(); } private Optional<String> asOptionalString(Inspector field) { return field.type().equals(Type.NIX) ? Optional.empty() : Optional.of(asString(field)); } private Optional<String> removeQuotedNulls(Optional<String> value) { return value.filter(v -> !v.equals("null")); } private boolean asBoolean(Inspector field) { if ( ! field.type().equals(Type.BOOL)) throw new IllegalArgumentException("Expected a BOOL value, got a " + field.type()); return field.asBool(); } }
class NodePatcher { private static final String HARDWARE_FAILURE_DESCRIPTION = "hardwareFailureDescription"; private static final String WANT_TO_RETIRE = "wantToRetire"; private final NodeFlavors nodeFlavors; private final Inspector inspector; private final NodeRepository nodeRepository; private Node node; private List<Node> children; private boolean childrenModified = false; public NodePatcher(NodeFlavors nodeFlavors, InputStream json, Node node, NodeRepository nodeRepository) { try { this.nodeFlavors = nodeFlavors; inspector = SlimeUtils.jsonToSlime(IOUtils.readBytes(json, 1000 * 1000)).get(); this.node = node; this.nodeRepository = nodeRepository; this.children = node.type() == NodeType.host ? nodeRepository.getChildNodes(node.hostname()) : Collections.emptyList(); } catch (IOException e) { throw new RuntimeException("Error reading request body", e); } } /** * Apply the json to the node and return all nodes affected by the patch. * More than 1 node may be affected if e.g. the node is a Docker host, which may have * children that must be updated in a consistent manner. */ public List<Node> apply() { inspector.traverse((String name, Inspector value) -> { try { node = applyField(name, value); } catch (IllegalArgumentException e) { throw new IllegalArgumentException("Could not set field '" + name + "'", e); } try { children = applyFieldRecursive(children, name, value); childrenModified = true; } catch (IllegalArgumentException e) { } } ); List<Node> nodes = childrenModified ? new ArrayList<>(children) : new ArrayList<>(); nodes.add(node); return nodes; } private Node applyField(String name, Inspector value) { switch (name) { case "convergedStateVersion" : return node; case "currentRebootGeneration" : return node.withCurrentRebootGeneration(asLong(value), nodeRepository.clock().instant()); case "currentRestartGeneration" : return patchCurrentRestartGeneration(asLong(value)); case "currentDockerImage" : Version versionFromImage = Optional.of(asString(value)) .filter(s -> !s.isEmpty()) .map(DockerImage::new) .map(DockerImage::tagAsVersion) .orElse(Version.emptyVersion); return node.with(node.status().withVespaVersion(versionFromImage)); case "currentVespaVersion" : return node.with(node.status().withVespaVersion(Version.fromString(asString(value)))); case "currentHostedVersion" : return node; case "failCount" : return node.with(node.status().setFailCount(asLong(value).intValue())); case "flavor" : return node.with(nodeFlavors.getFlavorOrThrow(asString(value))); case HARDWARE_FAILURE_DESCRIPTION: return node.with(node.status().withHardwareFailureDescription(asOptionalString(value))); case "parentHostname" : return node.withParentHostname(asString(value)); case "ipAddresses" : return node.withIpAddresses(asStringSet(value)); case "additionalIpAddresses" : return node.withAdditionalIpAddresses(asStringSet(value)); case WANT_TO_RETIRE : return node.with(node.status().withWantToRetire(asBoolean(value))); case "wantToDeprovision" : return node.with(node.status().withWantToDeprovision(asBoolean(value))); case "hardwareDivergence" : return node.with(node.status().withHardwareDivergence(removeQuotedNulls(asOptionalString(value)))); default : throw new IllegalArgumentException("Could not apply field '" + name + "' on a node: No such modifiable field"); } } private Set<String> asStringSet(Inspector field) { if ( ! field.type().equals(Type.ARRAY)) throw new IllegalArgumentException("Expected an ARRAY value, got a " + field.type()); TreeSet<String> strings = new TreeSet<>(); for (int i = 0; i < field.entries(); i++) { Inspector entry = field.entry(i); if ( ! entry.type().equals(Type.STRING)) throw new IllegalArgumentException("Expected a STRING value, got a " + entry.type()); strings.add(entry.asString()); } return strings; } private Node patchCurrentRestartGeneration(Long value) { Optional<Allocation> allocation = node.allocation(); if (allocation.isPresent()) return node.with(allocation.get().withRestart(allocation.get().restartGeneration().withCurrent(value))); else throw new IllegalArgumentException("Node is not allocated"); } private Long asLong(Inspector field) { if ( ! field.type().equals(Type.LONG)) throw new IllegalArgumentException("Expected a LONG value, got a " + field.type()); return field.asLong(); } private String asString(Inspector field) { if ( ! field.type().equals(Type.STRING)) throw new IllegalArgumentException("Expected a STRING value, got a " + field.type()); return field.asString(); } private Optional<String> asOptionalString(Inspector field) { return field.type().equals(Type.NIX) ? Optional.empty() : Optional.of(asString(field)); } private Optional<String> removeQuotedNulls(Optional<String> value) { return value.filter(v -> !v.equals("null")); } private boolean asBoolean(Inspector field) { if ( ! field.type().equals(Type.BOOL)) throw new IllegalArgumentException("Expected a BOOL value, got a " + field.type()); return field.asBool(); } }
Yes, but that would require that we save the return in a temporary variable and only overwrite `nodes` if the temporary variable is not empty. I prefer it this way because at least this one has some symmetry with the `applyField()` call above.
private List<Node> applyFieldRecursive(List<Node> childNodes, String name, Inspector value) { switch (name) { case HARDWARE_FAILURE_DESCRIPTION: return childNodes.stream() .map(child -> child.with(child.status().withHardwareFailureDescription(asOptionalString(value)))) .collect(Collectors.toList()); case WANT_TO_RETIRE: return childNodes.stream() .map(child -> child.with(child.status().withWantToRetire(asBoolean(value)))) .collect(Collectors.toList()); default : throw new IllegalArgumentException("Field " + name + " is not recursive"); } }
throw new IllegalArgumentException("Field " + name + " is not recursive");
private List<Node> applyFieldRecursive(List<Node> childNodes, String name, Inspector value) { switch (name) { case HARDWARE_FAILURE_DESCRIPTION: return childNodes.stream() .map(child -> child.with(child.status().withHardwareFailureDescription(asOptionalString(value)))) .collect(Collectors.toList()); case WANT_TO_RETIRE: return childNodes.stream() .map(child -> child.with(child.status().withWantToRetire(asBoolean(value)))) .collect(Collectors.toList()); default : throw new IllegalArgumentException("Field " + name + " is not recursive"); } }
class NodePatcher { private static final String HARDWARE_FAILURE_DESCRIPTION = "hardwareFailureDescription"; private static final String WANT_TO_RETIRE = "wantToRetire"; private final NodeFlavors nodeFlavors; private final Inspector inspector; private final NodeRepository nodeRepository; private Node node; private List<Node> children; private boolean childrenModified = false; public NodePatcher(NodeFlavors nodeFlavors, InputStream json, Node node, NodeRepository nodeRepository) { try { this.nodeFlavors = nodeFlavors; inspector = SlimeUtils.jsonToSlime(IOUtils.readBytes(json, 1000 * 1000)).get(); this.node = node; this.nodeRepository = nodeRepository; this.children = node.type() == NodeType.host ? nodeRepository.getChildNodes(node.hostname()) : Collections.emptyList(); } catch (IOException e) { throw new RuntimeException("Error reading request body", e); } } /** * Apply the json to the node and return all nodes affected by the patch. * More than 1 node may be affected if e.g. the node is a Docker host, which may have * children that must be updated in a consistent manner. */ public List<Node> apply() { inspector.traverse((String name, Inspector value) -> { try { node = applyField(name, value); } catch (IllegalArgumentException e) { throw new IllegalArgumentException("Could not set field '" + name + "'", e); } try { children = applyFieldRecursive(children, name, value); childrenModified = true; } catch (IllegalArgumentException e) { } } ); List<Node> nodes = childrenModified ? new ArrayList<>(children) : new ArrayList<>(); nodes.add(node); return nodes; } private Node applyField(String name, Inspector value) { switch (name) { case "convergedStateVersion" : return node; case "currentRebootGeneration" : return node.withCurrentRebootGeneration(asLong(value), nodeRepository.clock().instant()); case "currentRestartGeneration" : return patchCurrentRestartGeneration(asLong(value)); case "currentDockerImage" : Version versionFromImage = Optional.of(asString(value)) .filter(s -> !s.isEmpty()) .map(DockerImage::new) .map(DockerImage::tagAsVersion) .orElse(Version.emptyVersion); return node.with(node.status().withVespaVersion(versionFromImage)); case "currentVespaVersion" : return node.with(node.status().withVespaVersion(Version.fromString(asString(value)))); case "currentHostedVersion" : return node; case "failCount" : return node.with(node.status().setFailCount(asLong(value).intValue())); case "flavor" : return node.with(nodeFlavors.getFlavorOrThrow(asString(value))); case HARDWARE_FAILURE_DESCRIPTION: return node.with(node.status().withHardwareFailureDescription(asOptionalString(value))); case "parentHostname" : return node.withParentHostname(asString(value)); case "ipAddresses" : return node.withIpAddresses(asStringSet(value)); case "additionalIpAddresses" : return node.withAdditionalIpAddresses(asStringSet(value)); case WANT_TO_RETIRE : return node.with(node.status().withWantToRetire(asBoolean(value))); case "wantToDeprovision" : return node.with(node.status().withWantToDeprovision(asBoolean(value))); case "hardwareDivergence" : return node.with(node.status().withHardwareDivergence(removeQuotedNulls(asOptionalString(value)))); default : throw new IllegalArgumentException("Could not apply field '" + name + "' on a node: No such modifiable field"); } } private Set<String> asStringSet(Inspector field) { if ( ! field.type().equals(Type.ARRAY)) throw new IllegalArgumentException("Expected an ARRAY value, got a " + field.type()); TreeSet<String> strings = new TreeSet<>(); for (int i = 0; i < field.entries(); i++) { Inspector entry = field.entry(i); if ( ! entry.type().equals(Type.STRING)) throw new IllegalArgumentException("Expected a STRING value, got a " + entry.type()); strings.add(entry.asString()); } return strings; } private Node patchCurrentRestartGeneration(Long value) { Optional<Allocation> allocation = node.allocation(); if (allocation.isPresent()) return node.with(allocation.get().withRestart(allocation.get().restartGeneration().withCurrent(value))); else throw new IllegalArgumentException("Node is not allocated"); } private Long asLong(Inspector field) { if ( ! field.type().equals(Type.LONG)) throw new IllegalArgumentException("Expected a LONG value, got a " + field.type()); return field.asLong(); } private String asString(Inspector field) { if ( ! field.type().equals(Type.STRING)) throw new IllegalArgumentException("Expected a STRING value, got a " + field.type()); return field.asString(); } private Optional<String> asOptionalString(Inspector field) { return field.type().equals(Type.NIX) ? Optional.empty() : Optional.of(asString(field)); } private Optional<String> removeQuotedNulls(Optional<String> value) { return value.filter(v -> !v.equals("null")); } private boolean asBoolean(Inspector field) { if ( ! field.type().equals(Type.BOOL)) throw new IllegalArgumentException("Expected a BOOL value, got a " + field.type()); return field.asBool(); } }
class NodePatcher { private static final String HARDWARE_FAILURE_DESCRIPTION = "hardwareFailureDescription"; private static final String WANT_TO_RETIRE = "wantToRetire"; private final NodeFlavors nodeFlavors; private final Inspector inspector; private final NodeRepository nodeRepository; private Node node; private List<Node> children; private boolean childrenModified = false; public NodePatcher(NodeFlavors nodeFlavors, InputStream json, Node node, NodeRepository nodeRepository) { try { this.nodeFlavors = nodeFlavors; inspector = SlimeUtils.jsonToSlime(IOUtils.readBytes(json, 1000 * 1000)).get(); this.node = node; this.nodeRepository = nodeRepository; this.children = node.type() == NodeType.host ? nodeRepository.getChildNodes(node.hostname()) : Collections.emptyList(); } catch (IOException e) { throw new RuntimeException("Error reading request body", e); } } /** * Apply the json to the node and return all nodes affected by the patch. * More than 1 node may be affected if e.g. the node is a Docker host, which may have * children that must be updated in a consistent manner. */ public List<Node> apply() { inspector.traverse((String name, Inspector value) -> { try { node = applyField(name, value); } catch (IllegalArgumentException e) { throw new IllegalArgumentException("Could not set field '" + name + "'", e); } try { children = applyFieldRecursive(children, name, value); childrenModified = true; } catch (IllegalArgumentException e) { } } ); List<Node> nodes = childrenModified ? new ArrayList<>(children) : new ArrayList<>(); nodes.add(node); return nodes; } private Node applyField(String name, Inspector value) { switch (name) { case "convergedStateVersion" : return node; case "currentRebootGeneration" : return node.withCurrentRebootGeneration(asLong(value), nodeRepository.clock().instant()); case "currentRestartGeneration" : return patchCurrentRestartGeneration(asLong(value)); case "currentDockerImage" : Version versionFromImage = Optional.of(asString(value)) .filter(s -> !s.isEmpty()) .map(DockerImage::new) .map(DockerImage::tagAsVersion) .orElse(Version.emptyVersion); return node.with(node.status().withVespaVersion(versionFromImage)); case "currentVespaVersion" : return node.with(node.status().withVespaVersion(Version.fromString(asString(value)))); case "currentHostedVersion" : return node; case "failCount" : return node.with(node.status().setFailCount(asLong(value).intValue())); case "flavor" : return node.with(nodeFlavors.getFlavorOrThrow(asString(value))); case HARDWARE_FAILURE_DESCRIPTION: return node.with(node.status().withHardwareFailureDescription(asOptionalString(value))); case "parentHostname" : return node.withParentHostname(asString(value)); case "ipAddresses" : return node.withIpAddresses(asStringSet(value)); case "additionalIpAddresses" : return node.withAdditionalIpAddresses(asStringSet(value)); case WANT_TO_RETIRE : return node.with(node.status().withWantToRetire(asBoolean(value))); case "wantToDeprovision" : return node.with(node.status().withWantToDeprovision(asBoolean(value))); case "hardwareDivergence" : return node.with(node.status().withHardwareDivergence(removeQuotedNulls(asOptionalString(value)))); default : throw new IllegalArgumentException("Could not apply field '" + name + "' on a node: No such modifiable field"); } } private Set<String> asStringSet(Inspector field) { if ( ! field.type().equals(Type.ARRAY)) throw new IllegalArgumentException("Expected an ARRAY value, got a " + field.type()); TreeSet<String> strings = new TreeSet<>(); for (int i = 0; i < field.entries(); i++) { Inspector entry = field.entry(i); if ( ! entry.type().equals(Type.STRING)) throw new IllegalArgumentException("Expected a STRING value, got a " + entry.type()); strings.add(entry.asString()); } return strings; } private Node patchCurrentRestartGeneration(Long value) { Optional<Allocation> allocation = node.allocation(); if (allocation.isPresent()) return node.with(allocation.get().withRestart(allocation.get().restartGeneration().withCurrent(value))); else throw new IllegalArgumentException("Node is not allocated"); } private Long asLong(Inspector field) { if ( ! field.type().equals(Type.LONG)) throw new IllegalArgumentException("Expected a LONG value, got a " + field.type()); return field.asLong(); } private String asString(Inspector field) { if ( ! field.type().equals(Type.STRING)) throw new IllegalArgumentException("Expected a STRING value, got a " + field.type()); return field.asString(); } private Optional<String> asOptionalString(Inspector field) { return field.type().equals(Type.NIX) ? Optional.empty() : Optional.of(asString(field)); } private Optional<String> removeQuotedNulls(Optional<String> value) { return value.filter(v -> !v.equals("null")); } private boolean asBoolean(Inspector field) { if ( ! field.type().equals(Type.BOOL)) throw new IllegalArgumentException("Expected a BOOL value, got a " + field.type()); return field.asBool(); } }
How about we define the content of this instead of passing JSON through? I suggest a mandatory "type" and "message" field and an optional "details" field, as that can be mapped cleanly to ErrorMessage.
private void fill(List<FastHit> hits, byte[] slimeBytes) { com.yahoo.slime.Inspector root = BinaryFormat.decode(slimeBytes).get(); Inspector summaries = new SlimeAdapter(root.field("docsums")); if ( ! summaries.valid()) throw new IllegalArgumentException("Expected a Slime root object containing a 'docsums' field"); for (int i = 0; i < hits.size(); i++) { fill(hits.get(i), summaries.entry(i).field("docsum")); } com.yahoo.slime.Inspector errors = root.field("errors"); errors.traverse((ArrayTraverser)(int index, com.yahoo.slime.Inspector value) -> { ByteArrayOutputStream os = new ByteArrayOutputStream(1024); try { new JsonFormat(true).encode(os, value); } catch (IOException e) { throw new IllegalArgumentException(e); } if ( "timeout".equalsIgnoreCase(value.field("type").asString())) { result.hits().addError(ErrorMessage.createTimeout(Utf8.toString(os.toByteArray()))); } else { result.hits().addError(ErrorMessage.createUnspecifiedError(Utf8.toString(os.toByteArray()))); } }); }
new JsonFormat(true).encode(os, value);
private void fill(List<FastHit> hits, byte[] slimeBytes) { com.yahoo.slime.Inspector root = BinaryFormat.decode(slimeBytes).get(); com.yahoo.slime.Inspector errors = root.field("errors"); boolean hasErrors = errors.valid() && (errors.entries() > 0); if (hasErrors) { addErrors(errors); } Inspector summaries = new SlimeAdapter(root.field("docsums")); if ( ! summaries.valid() && !hasErrors) throw new IllegalArgumentException("Expected a Slime root object containing a 'docsums' field"); for (int i = 0; i < hits.size(); i++) { fill(hits.get(i), summaries.entry(i).field("docsum")); } }
class GetDocsumsResponseReceiver { private final BlockingQueue<Client.GetDocsumsResponseOrError> responses; private final Compressor compressor; private final Result result; /** Whether we have already logged/notified about an error - to avoid spamming */ private boolean hasReportedError = false; /** The number of responses we should receive (and process) before this is complete */ private int outstandingResponses; public GetDocsumsResponseReceiver(int requestCount, Compressor compressor, Result result) { this.compressor = compressor; responses = new LinkedBlockingQueue<>(requestCount); outstandingResponses = requestCount; this.result = result; } /** Called by a thread belonging to the client when a valid response becomes available */ public void receive(Client.GetDocsumsResponseOrError response) { responses.add(response); } private void throwTimeout() throws TimeoutException { throw new TimeoutException("Timed out waiting for summary data. " + outstandingResponses + " responses outstanding."); } /** * Call this from the dispatcher thread to initiate and complete processing of responses. * This will block until all responses are available and processed, or to timeout. */ public void processResponses(Query query) throws TimeoutException { try { while (outstandingResponses > 0) { long timeLeftMs = query.getTimeLeft(); if (timeLeftMs <= 0) { throwTimeout(); } Client.GetDocsumsResponseOrError response = responses.poll(timeLeftMs, TimeUnit.MILLISECONDS); if (response == null) throwTimeout(); processResponse(response); outstandingResponses--; } } catch (InterruptedException e) { } } private void processResponse(Client.GetDocsumsResponseOrError responseOrError) { if (responseOrError.error().isPresent()) { if (hasReportedError) return; String error = responseOrError.error().get(); result.hits().addError(ErrorMessage.createBackendCommunicationError(error)); log.log(Level.WARNING, "Error fetching summary data: "+ error); } else { Client.GetDocsumsResponse response = responseOrError.response().get(); CompressionType compression = CompressionType.valueOf(response.compression()); byte[] slimeBytes = compressor.decompress(response.compressedSlimeBytes(), compression, response.uncompressedSize()); fill(response.hitsContext(), slimeBytes); } } private void fill(FastHit hit, Inspector summary) { hit.reserve(summary.fieldCount()); summary.traverse((String name, Inspector value) -> { hit.setField(name, nativeTypeOf(value)); }); } private Object nativeTypeOf(Inspector inspector) { switch (inspector.type()) { case ARRAY: return inspector; case OBJECT: return inspector; case BOOL: return inspector.asBool(); case DATA: return inspector.asData(); case DOUBLE: return inspector.asDouble(); case LONG: return inspector.asLong(); case STRING: return inspector.asString(); case EMPTY : return null; default: throw new IllegalArgumentException("Unexpected Slime type " + inspector.type()); } } }
class GetDocsumsResponseReceiver { private final BlockingQueue<Client.GetDocsumsResponseOrError> responses; private final Compressor compressor; private final Result result; /** Whether we have already logged/notified about an error - to avoid spamming */ private boolean hasReportedError = false; /** The number of responses we should receive (and process) before this is complete */ private int outstandingResponses; public GetDocsumsResponseReceiver(int requestCount, Compressor compressor, Result result) { this.compressor = compressor; responses = new LinkedBlockingQueue<>(requestCount); outstandingResponses = requestCount; this.result = result; } /** Called by a thread belonging to the client when a valid response becomes available */ public void receive(Client.GetDocsumsResponseOrError response) { responses.add(response); } private void throwTimeout() throws TimeoutException { throw new TimeoutException("Timed out waiting for summary data. " + outstandingResponses + " responses outstanding."); } /** * Call this from the dispatcher thread to initiate and complete processing of responses. * This will block until all responses are available and processed, or to timeout. */ public void processResponses(Query query) throws TimeoutException { try { while (outstandingResponses > 0) { long timeLeftMs = query.getTimeLeft(); if (timeLeftMs <= 0) { throwTimeout(); } Client.GetDocsumsResponseOrError response = responses.poll(timeLeftMs, TimeUnit.MILLISECONDS); if (response == null) throwTimeout(); processResponse(response); outstandingResponses--; } } catch (InterruptedException e) { } } private void processResponse(Client.GetDocsumsResponseOrError responseOrError) { if (responseOrError.error().isPresent()) { if (hasReportedError) return; String error = responseOrError.error().get(); result.hits().addError(ErrorMessage.createBackendCommunicationError(error)); log.log(Level.WARNING, "Error fetching summary data: "+ error); } else { Client.GetDocsumsResponse response = responseOrError.response().get(); CompressionType compression = CompressionType.valueOf(response.compression()); byte[] slimeBytes = compressor.decompress(response.compressedSlimeBytes(), compression, response.uncompressedSize()); fill(response.hitsContext(), slimeBytes); } } private void addErrors(com.yahoo.slime.Inspector errors) { errors.traverse((ArrayTraverser) (int index, com.yahoo.slime.Inspector value) -> { int errorCode = ("timeout".equalsIgnoreCase(value.field("type").asString())) ? Error.TIMEOUT.code : Error.UNSPECIFIED.code; result.hits().addError(new ErrorMessage(errorCode, value.field("message").asString(), value.field("details").asString())); }); } private void fill(FastHit hit, Inspector summary) { hit.reserve(summary.fieldCount()); summary.traverse((String name, Inspector value) -> { hit.setField(name, nativeTypeOf(value)); }); } private Object nativeTypeOf(Inspector inspector) { switch (inspector.type()) { case ARRAY: return inspector; case OBJECT: return inspector; case BOOL: return inspector.asBool(); case DATA: return inspector.asData(); case DOUBLE: return inspector.asDouble(); case LONG: return inspector.asLong(); case STRING: return inspector.asString(); case EMPTY : return null; default: throw new IllegalArgumentException("Unexpected Slime type " + inspector.type()); } } }
This will lose the error information if we ever return only errors. It would be (slightly) more robust to parse errors first and then summaries, and only fail the latter if there are no errors already.
private void fill(List<FastHit> hits, byte[] slimeBytes) { com.yahoo.slime.Inspector root = BinaryFormat.decode(slimeBytes).get(); Inspector summaries = new SlimeAdapter(root.field("docsums")); if ( ! summaries.valid()) throw new IllegalArgumentException("Expected a Slime root object containing a 'docsums' field"); for (int i = 0; i < hits.size(); i++) { fill(hits.get(i), summaries.entry(i).field("docsum")); } com.yahoo.slime.Inspector errors = root.field("errors"); errors.traverse((ArrayTraverser)(int index, com.yahoo.slime.Inspector value) -> { ByteArrayOutputStream os = new ByteArrayOutputStream(1024); try { new JsonFormat(true).encode(os, value); } catch (IOException e) { throw new IllegalArgumentException(e); } if ( "timeout".equalsIgnoreCase(value.field("type").asString())) { result.hits().addError(ErrorMessage.createTimeout(Utf8.toString(os.toByteArray()))); } else { result.hits().addError(ErrorMessage.createUnspecifiedError(Utf8.toString(os.toByteArray()))); } }); }
if ( ! summaries.valid())
private void fill(List<FastHit> hits, byte[] slimeBytes) { com.yahoo.slime.Inspector root = BinaryFormat.decode(slimeBytes).get(); com.yahoo.slime.Inspector errors = root.field("errors"); boolean hasErrors = errors.valid() && (errors.entries() > 0); if (hasErrors) { addErrors(errors); } Inspector summaries = new SlimeAdapter(root.field("docsums")); if ( ! summaries.valid() && !hasErrors) throw new IllegalArgumentException("Expected a Slime root object containing a 'docsums' field"); for (int i = 0; i < hits.size(); i++) { fill(hits.get(i), summaries.entry(i).field("docsum")); } }
class GetDocsumsResponseReceiver { private final BlockingQueue<Client.GetDocsumsResponseOrError> responses; private final Compressor compressor; private final Result result; /** Whether we have already logged/notified about an error - to avoid spamming */ private boolean hasReportedError = false; /** The number of responses we should receive (and process) before this is complete */ private int outstandingResponses; public GetDocsumsResponseReceiver(int requestCount, Compressor compressor, Result result) { this.compressor = compressor; responses = new LinkedBlockingQueue<>(requestCount); outstandingResponses = requestCount; this.result = result; } /** Called by a thread belonging to the client when a valid response becomes available */ public void receive(Client.GetDocsumsResponseOrError response) { responses.add(response); } private void throwTimeout() throws TimeoutException { throw new TimeoutException("Timed out waiting for summary data. " + outstandingResponses + " responses outstanding."); } /** * Call this from the dispatcher thread to initiate and complete processing of responses. * This will block until all responses are available and processed, or to timeout. */ public void processResponses(Query query) throws TimeoutException { try { while (outstandingResponses > 0) { long timeLeftMs = query.getTimeLeft(); if (timeLeftMs <= 0) { throwTimeout(); } Client.GetDocsumsResponseOrError response = responses.poll(timeLeftMs, TimeUnit.MILLISECONDS); if (response == null) throwTimeout(); processResponse(response); outstandingResponses--; } } catch (InterruptedException e) { } } private void processResponse(Client.GetDocsumsResponseOrError responseOrError) { if (responseOrError.error().isPresent()) { if (hasReportedError) return; String error = responseOrError.error().get(); result.hits().addError(ErrorMessage.createBackendCommunicationError(error)); log.log(Level.WARNING, "Error fetching summary data: "+ error); } else { Client.GetDocsumsResponse response = responseOrError.response().get(); CompressionType compression = CompressionType.valueOf(response.compression()); byte[] slimeBytes = compressor.decompress(response.compressedSlimeBytes(), compression, response.uncompressedSize()); fill(response.hitsContext(), slimeBytes); } } private void fill(FastHit hit, Inspector summary) { hit.reserve(summary.fieldCount()); summary.traverse((String name, Inspector value) -> { hit.setField(name, nativeTypeOf(value)); }); } private Object nativeTypeOf(Inspector inspector) { switch (inspector.type()) { case ARRAY: return inspector; case OBJECT: return inspector; case BOOL: return inspector.asBool(); case DATA: return inspector.asData(); case DOUBLE: return inspector.asDouble(); case LONG: return inspector.asLong(); case STRING: return inspector.asString(); case EMPTY : return null; default: throw new IllegalArgumentException("Unexpected Slime type " + inspector.type()); } } }
class GetDocsumsResponseReceiver { private final BlockingQueue<Client.GetDocsumsResponseOrError> responses; private final Compressor compressor; private final Result result; /** Whether we have already logged/notified about an error - to avoid spamming */ private boolean hasReportedError = false; /** The number of responses we should receive (and process) before this is complete */ private int outstandingResponses; public GetDocsumsResponseReceiver(int requestCount, Compressor compressor, Result result) { this.compressor = compressor; responses = new LinkedBlockingQueue<>(requestCount); outstandingResponses = requestCount; this.result = result; } /** Called by a thread belonging to the client when a valid response becomes available */ public void receive(Client.GetDocsumsResponseOrError response) { responses.add(response); } private void throwTimeout() throws TimeoutException { throw new TimeoutException("Timed out waiting for summary data. " + outstandingResponses + " responses outstanding."); } /** * Call this from the dispatcher thread to initiate and complete processing of responses. * This will block until all responses are available and processed, or to timeout. */ public void processResponses(Query query) throws TimeoutException { try { while (outstandingResponses > 0) { long timeLeftMs = query.getTimeLeft(); if (timeLeftMs <= 0) { throwTimeout(); } Client.GetDocsumsResponseOrError response = responses.poll(timeLeftMs, TimeUnit.MILLISECONDS); if (response == null) throwTimeout(); processResponse(response); outstandingResponses--; } } catch (InterruptedException e) { } } private void processResponse(Client.GetDocsumsResponseOrError responseOrError) { if (responseOrError.error().isPresent()) { if (hasReportedError) return; String error = responseOrError.error().get(); result.hits().addError(ErrorMessage.createBackendCommunicationError(error)); log.log(Level.WARNING, "Error fetching summary data: "+ error); } else { Client.GetDocsumsResponse response = responseOrError.response().get(); CompressionType compression = CompressionType.valueOf(response.compression()); byte[] slimeBytes = compressor.decompress(response.compressedSlimeBytes(), compression, response.uncompressedSize()); fill(response.hitsContext(), slimeBytes); } } private void addErrors(com.yahoo.slime.Inspector errors) { errors.traverse((ArrayTraverser) (int index, com.yahoo.slime.Inspector value) -> { int errorCode = ("timeout".equalsIgnoreCase(value.field("type").asString())) ? Error.TIMEOUT.code : Error.UNSPECIFIED.code; result.hits().addError(new ErrorMessage(errorCode, value.field("message").asString(), value.field("details").asString())); }); } private void fill(FastHit hit, Inspector summary) { hit.reserve(summary.fieldCount()); summary.traverse((String name, Inspector value) -> { hit.setField(name, nativeTypeOf(value)); }); } private Object nativeTypeOf(Inspector inspector) { switch (inspector.type()) { case ARRAY: return inspector; case OBJECT: return inspector; case BOOL: return inspector.asBool(); case DATA: return inspector.asData(); case DOUBLE: return inspector.asDouble(); case LONG: return inspector.asLong(); case STRING: return inspector.asString(); case EMPTY : return null; default: throw new IllegalArgumentException("Unexpected Slime type " + inspector.type()); } } }
Done.
private void fill(List<FastHit> hits, byte[] slimeBytes) { com.yahoo.slime.Inspector root = BinaryFormat.decode(slimeBytes).get(); Inspector summaries = new SlimeAdapter(root.field("docsums")); if ( ! summaries.valid()) throw new IllegalArgumentException("Expected a Slime root object containing a 'docsums' field"); for (int i = 0; i < hits.size(); i++) { fill(hits.get(i), summaries.entry(i).field("docsum")); } com.yahoo.slime.Inspector errors = root.field("errors"); errors.traverse((ArrayTraverser)(int index, com.yahoo.slime.Inspector value) -> { ByteArrayOutputStream os = new ByteArrayOutputStream(1024); try { new JsonFormat(true).encode(os, value); } catch (IOException e) { throw new IllegalArgumentException(e); } if ( "timeout".equalsIgnoreCase(value.field("type").asString())) { result.hits().addError(ErrorMessage.createTimeout(Utf8.toString(os.toByteArray()))); } else { result.hits().addError(ErrorMessage.createUnspecifiedError(Utf8.toString(os.toByteArray()))); } }); }
new JsonFormat(true).encode(os, value);
private void fill(List<FastHit> hits, byte[] slimeBytes) { com.yahoo.slime.Inspector root = BinaryFormat.decode(slimeBytes).get(); com.yahoo.slime.Inspector errors = root.field("errors"); boolean hasErrors = errors.valid() && (errors.entries() > 0); if (hasErrors) { addErrors(errors); } Inspector summaries = new SlimeAdapter(root.field("docsums")); if ( ! summaries.valid() && !hasErrors) throw new IllegalArgumentException("Expected a Slime root object containing a 'docsums' field"); for (int i = 0; i < hits.size(); i++) { fill(hits.get(i), summaries.entry(i).field("docsum")); } }
class GetDocsumsResponseReceiver { private final BlockingQueue<Client.GetDocsumsResponseOrError> responses; private final Compressor compressor; private final Result result; /** Whether we have already logged/notified about an error - to avoid spamming */ private boolean hasReportedError = false; /** The number of responses we should receive (and process) before this is complete */ private int outstandingResponses; public GetDocsumsResponseReceiver(int requestCount, Compressor compressor, Result result) { this.compressor = compressor; responses = new LinkedBlockingQueue<>(requestCount); outstandingResponses = requestCount; this.result = result; } /** Called by a thread belonging to the client when a valid response becomes available */ public void receive(Client.GetDocsumsResponseOrError response) { responses.add(response); } private void throwTimeout() throws TimeoutException { throw new TimeoutException("Timed out waiting for summary data. " + outstandingResponses + " responses outstanding."); } /** * Call this from the dispatcher thread to initiate and complete processing of responses. * This will block until all responses are available and processed, or to timeout. */ public void processResponses(Query query) throws TimeoutException { try { while (outstandingResponses > 0) { long timeLeftMs = query.getTimeLeft(); if (timeLeftMs <= 0) { throwTimeout(); } Client.GetDocsumsResponseOrError response = responses.poll(timeLeftMs, TimeUnit.MILLISECONDS); if (response == null) throwTimeout(); processResponse(response); outstandingResponses--; } } catch (InterruptedException e) { } } private void processResponse(Client.GetDocsumsResponseOrError responseOrError) { if (responseOrError.error().isPresent()) { if (hasReportedError) return; String error = responseOrError.error().get(); result.hits().addError(ErrorMessage.createBackendCommunicationError(error)); log.log(Level.WARNING, "Error fetching summary data: "+ error); } else { Client.GetDocsumsResponse response = responseOrError.response().get(); CompressionType compression = CompressionType.valueOf(response.compression()); byte[] slimeBytes = compressor.decompress(response.compressedSlimeBytes(), compression, response.uncompressedSize()); fill(response.hitsContext(), slimeBytes); } } private void fill(FastHit hit, Inspector summary) { hit.reserve(summary.fieldCount()); summary.traverse((String name, Inspector value) -> { hit.setField(name, nativeTypeOf(value)); }); } private Object nativeTypeOf(Inspector inspector) { switch (inspector.type()) { case ARRAY: return inspector; case OBJECT: return inspector; case BOOL: return inspector.asBool(); case DATA: return inspector.asData(); case DOUBLE: return inspector.asDouble(); case LONG: return inspector.asLong(); case STRING: return inspector.asString(); case EMPTY : return null; default: throw new IllegalArgumentException("Unexpected Slime type " + inspector.type()); } } }
class GetDocsumsResponseReceiver { private final BlockingQueue<Client.GetDocsumsResponseOrError> responses; private final Compressor compressor; private final Result result; /** Whether we have already logged/notified about an error - to avoid spamming */ private boolean hasReportedError = false; /** The number of responses we should receive (and process) before this is complete */ private int outstandingResponses; public GetDocsumsResponseReceiver(int requestCount, Compressor compressor, Result result) { this.compressor = compressor; responses = new LinkedBlockingQueue<>(requestCount); outstandingResponses = requestCount; this.result = result; } /** Called by a thread belonging to the client when a valid response becomes available */ public void receive(Client.GetDocsumsResponseOrError response) { responses.add(response); } private void throwTimeout() throws TimeoutException { throw new TimeoutException("Timed out waiting for summary data. " + outstandingResponses + " responses outstanding."); } /** * Call this from the dispatcher thread to initiate and complete processing of responses. * This will block until all responses are available and processed, or to timeout. */ public void processResponses(Query query) throws TimeoutException { try { while (outstandingResponses > 0) { long timeLeftMs = query.getTimeLeft(); if (timeLeftMs <= 0) { throwTimeout(); } Client.GetDocsumsResponseOrError response = responses.poll(timeLeftMs, TimeUnit.MILLISECONDS); if (response == null) throwTimeout(); processResponse(response); outstandingResponses--; } } catch (InterruptedException e) { } } private void processResponse(Client.GetDocsumsResponseOrError responseOrError) { if (responseOrError.error().isPresent()) { if (hasReportedError) return; String error = responseOrError.error().get(); result.hits().addError(ErrorMessage.createBackendCommunicationError(error)); log.log(Level.WARNING, "Error fetching summary data: "+ error); } else { Client.GetDocsumsResponse response = responseOrError.response().get(); CompressionType compression = CompressionType.valueOf(response.compression()); byte[] slimeBytes = compressor.decompress(response.compressedSlimeBytes(), compression, response.uncompressedSize()); fill(response.hitsContext(), slimeBytes); } } private void addErrors(com.yahoo.slime.Inspector errors) { errors.traverse((ArrayTraverser) (int index, com.yahoo.slime.Inspector value) -> { int errorCode = ("timeout".equalsIgnoreCase(value.field("type").asString())) ? Error.TIMEOUT.code : Error.UNSPECIFIED.code; result.hits().addError(new ErrorMessage(errorCode, value.field("message").asString(), value.field("details").asString())); }); } private void fill(FastHit hit, Inspector summary) { hit.reserve(summary.fieldCount()); summary.traverse((String name, Inspector value) -> { hit.setField(name, nativeTypeOf(value)); }); } private Object nativeTypeOf(Inspector inspector) { switch (inspector.type()) { case ARRAY: return inspector; case OBJECT: return inspector; case BOOL: return inspector.asBool(); case DATA: return inspector.asData(); case DOUBLE: return inspector.asDouble(); case LONG: return inspector.asLong(); case STRING: return inspector.asString(); case EMPTY : return null; default: throw new IllegalArgumentException("Unexpected Slime type " + inspector.type()); } } }
Could consider `String.format` to make string easier to read
protected static Options createOptions() { Options options = new Options(); options.addOption("h", "help", false, "Show this syntax page."); options.addOption(Option.builder("d") .longOpt("datahandler") .hasArg(true) .argName("target") .desc("Send results to the given target.") .build()); options.addOption(Option.builder("s") .longOpt("selection") .hasArg(true) .argName("selection") .desc("What documents to visit.") .build()); options.addOption(Option.builder("f") .longOpt("from") .hasArg(true) .argName("timestamp") .desc("Only visit from the given timestamp (microseconds).") .type(Number.class) .build()); options.addOption(Option.builder("t") .longOpt("to") .hasArg(true) .argName("timestamp") .desc("Only visit up to the given timestamp (microseconds).") .type(Number.class).build()); options.addOption("e", "headersonly", false, "Only visit headers of documents.[Deprecated]"); options.addOption(Option.builder("l") .longOpt("fieldset") .hasArg(true) .argName("fieldset") .desc("Retrieve the specified fields only (see http: .build()); options.addOption(Option.builder() .longOpt("visitinconsistentbuckets") .hasArg(false) .desc("Don't wait for inconsistent buckets to become consistent.") .build()); options.addOption(Option.builder("m") .longOpt("maxpending") .hasArg(true) .argName("num") .desc("Maximum pending messages to data handlers per storage visitor.") .type(Number.class) .build()); options.addOption(Option.builder() .longOpt("maxpendingsuperbuckets") .hasArg(true) .argName("num") .desc("Maximum pending visitor messages from the vespa-visit client. If set, dynamic throttling of visitors will be disabled!") .type(Number.class) .build()); options.addOption(Option.builder("b") .longOpt("maxbuckets") .hasArg(true) .argName("num") .desc("Maximum buckets per visitor.") .type(Number.class) .build()); options.addOption("i", "printids", false, "Display only document identifiers."); options.addOption(Option.builder("p") .longOpt("progress") .hasArg(true) .argName("file") .desc("Use given file to track progress.") .build()); options.addOption(Option.builder("o") .longOpt("timeout") .hasArg(true) .argName("milliseconds") .desc("Time out visitor after given time.") .type(Number.class) .build()); options.addOption(Option.builder("u") .longOpt("buckettimeout") .hasArg(true) .argName("milliseconds") .desc("Fail visitor if visiting a single bucket takes longer than this (default same as timeout)") .type(Number.class) .build()); options.addOption(Option.builder() .longOpt("visitlibrary") .hasArg(true) .argName("string") .desc("Use the given visitor library.") .build()); options.addOption(Option.builder() .longOpt("libraryparam") .numberOfArgs(2) .argName("key> <val") .desc("Give the following parameter to the visitor.") .build()); options.addOption("r", "visitremoves", false, "Include information of removed documents."); options.addOption(Option.builder("c") .longOpt("cluster") .hasArg(true) .argName("cluster") .desc("Visit the given cluster.") .build()); options.addOption("v", "verbose", false, "Indent XML, show progress and info on STDERR."); options.addOption(Option.builder() .longOpt("statistics") .hasArg(true) .argName("args") .desc("Use CountVisitor for document statistics. Use comma-separated arguments.") .build()); options.addOption(Option.builder() .longOpt("abortonclusterdown") .hasArg(false) .desc("Abort if cluster is down.") .build()); options.addOption(Option.builder() .longOpt("maxhits") .hasArg(true) .argName("num") .desc("Abort visiting when we have received this many \"first pass\" documents. Only appropriate for visiting involving id.order. This is only an approximate number, all pending work will be completed and those documents will also be returned.") .type(Number.class) .build()); options.addOption(Option.builder() .longOpt("maxtotalhits") .hasArg(true) .argName("num") .desc("Abort visiting when we have received this many total documents. This is only an approximate number, all pending work will be completed and those documents will also be returned.") .type(Number.class) .build()); options.addOption(Option.builder() .longOpt("processtime") .hasArg(true) .argName("num") .desc("Sleep this amount of millisecs before processing message. (Debug option for pretending to be slow client)") .type(Number.class) .build()); options.addOption(Option.builder() .longOpt("priority") .hasArg(true) .argName("name") .desc("Priority used for each visitor. Defaults to NORMAL_3. " + "Use with care to avoid starving lower prioritized traffic in the cluster") .build()); options.addOption(Option.builder() .longOpt("ordering") .hasArg(true) .argName("order") .desc("Order to visit documents in. Only makes sense in conjunction with a document selection involving id.order. Legal values are \"ascending\" and \"descending\"") .build()); options.addOption(Option.builder() .longOpt("tracelevel") .hasArg(true) .argName("level") .desc("Tracelevel ([0-9]) to use for debugging purposes") .type(Number.class) .build()); options.addOption(Option.builder() .longOpt("skipbucketsonfatalerrors") .hasArg(false) .desc("Skip visiting super buckets with fatal error codes.") .build()); options.addOption(Option.builder() .longOpt("jsonoutput") .desc("Output documents as JSON") .hasArg(false) .build()); options.addOption(Option.builder() .longOpt("bucketspace") .hasArg(true) .argName("space") .desc("Bucket space to visit ('" + FixedBucketSpaces.defaultSpace() + "' or '" + FixedBucketSpaces.globalSpace() + "'). " + "If not specified, '" + FixedBucketSpaces.defaultSpace() + "' is used.") .build()); return options; }
"If not specified, '" + FixedBucketSpaces.defaultSpace() + "' is used.")
protected static Options createOptions() { Options options = new Options(); options.addOption("h", "help", false, "Show this syntax page."); options.addOption(Option.builder("d") .longOpt("datahandler") .hasArg(true) .argName("target") .desc("Send results to the given target.") .build()); options.addOption(Option.builder("s") .longOpt("selection") .hasArg(true) .argName("selection") .desc("What documents to visit.") .build()); options.addOption(Option.builder("f") .longOpt("from") .hasArg(true) .argName("timestamp") .desc("Only visit from the given timestamp (microseconds).") .type(Number.class) .build()); options.addOption(Option.builder("t") .longOpt("to") .hasArg(true) .argName("timestamp") .desc("Only visit up to the given timestamp (microseconds).") .type(Number.class).build()); options.addOption("e", "headersonly", false, "Only visit headers of documents.[Deprecated]"); options.addOption(Option.builder("l") .longOpt("fieldset") .hasArg(true) .argName("fieldset") .desc("Retrieve the specified fields only (see http: .build()); options.addOption(Option.builder() .longOpt("visitinconsistentbuckets") .hasArg(false) .desc("Don't wait for inconsistent buckets to become consistent.") .build()); options.addOption(Option.builder("m") .longOpt("maxpending") .hasArg(true) .argName("num") .desc("Maximum pending messages to data handlers per storage visitor.") .type(Number.class) .build()); options.addOption(Option.builder() .longOpt("maxpendingsuperbuckets") .hasArg(true) .argName("num") .desc("Maximum pending visitor messages from the vespa-visit client. If set, dynamic throttling of visitors will be disabled!") .type(Number.class) .build()); options.addOption(Option.builder("b") .longOpt("maxbuckets") .hasArg(true) .argName("num") .desc("Maximum buckets per visitor.") .type(Number.class) .build()); options.addOption("i", "printids", false, "Display only document identifiers."); options.addOption(Option.builder("p") .longOpt("progress") .hasArg(true) .argName("file") .desc("Use given file to track progress.") .build()); options.addOption(Option.builder("o") .longOpt("timeout") .hasArg(true) .argName("milliseconds") .desc("Time out visitor after given time.") .type(Number.class) .build()); options.addOption(Option.builder("u") .longOpt("buckettimeout") .hasArg(true) .argName("milliseconds") .desc("Fail visitor if visiting a single bucket takes longer than this (default same as timeout)") .type(Number.class) .build()); options.addOption(Option.builder() .longOpt("visitlibrary") .hasArg(true) .argName("string") .desc("Use the given visitor library.") .build()); options.addOption(Option.builder() .longOpt("libraryparam") .numberOfArgs(2) .argName("key> <val") .desc("Give the following parameter to the visitor.") .build()); options.addOption("r", "visitremoves", false, "Include information of removed documents."); options.addOption(Option.builder("c") .longOpt("cluster") .hasArg(true) .argName("cluster") .desc("Visit the given cluster.") .build()); options.addOption("v", "verbose", false, "Indent XML, show progress and info on STDERR."); options.addOption(Option.builder() .longOpt("statistics") .hasArg(true) .argName("args") .desc("Use CountVisitor for document statistics. Use comma-separated arguments.") .build()); options.addOption(Option.builder() .longOpt("abortonclusterdown") .hasArg(false) .desc("Abort if cluster is down.") .build()); options.addOption(Option.builder() .longOpt("maxhits") .hasArg(true) .argName("num") .desc("Abort visiting when we have received this many \"first pass\" documents. Only appropriate for visiting involving id.order. This is only an approximate number, all pending work will be completed and those documents will also be returned.") .type(Number.class) .build()); options.addOption(Option.builder() .longOpt("maxtotalhits") .hasArg(true) .argName("num") .desc("Abort visiting when we have received this many total documents. This is only an approximate number, all pending work will be completed and those documents will also be returned.") .type(Number.class) .build()); options.addOption(Option.builder() .longOpt("processtime") .hasArg(true) .argName("num") .desc("Sleep this amount of millisecs before processing message. (Debug option for pretending to be slow client)") .type(Number.class) .build()); options.addOption(Option.builder() .longOpt("priority") .hasArg(true) .argName("name") .desc("Priority used for each visitor. Defaults to NORMAL_3. " + "Use with care to avoid starving lower prioritized traffic in the cluster") .build()); options.addOption(Option.builder() .longOpt("ordering") .hasArg(true) .argName("order") .desc("Order to visit documents in. Only makes sense in conjunction with a document selection involving id.order. Legal values are \"ascending\" and \"descending\"") .build()); options.addOption(Option.builder() .longOpt("tracelevel") .hasArg(true) .argName("level") .desc("Tracelevel ([0-9]) to use for debugging purposes") .type(Number.class) .build()); options.addOption(Option.builder() .longOpt("skipbucketsonfatalerrors") .hasArg(false) .desc("Skip visiting super buckets with fatal error codes.") .build()); options.addOption(Option.builder() .longOpt("jsonoutput") .desc("Output documents as JSON") .hasArg(false) .build()); options.addOption(Option.builder() .longOpt("bucketspace") .hasArg(true) .argName("space") .desc(String.format("Bucket space to visit ('%s' or '%s'). If not specified, '%s' is used.", FixedBucketSpaces.defaultSpace(), FixedBucketSpaces.globalSpace(), FixedBucketSpaces.defaultSpace())) .build()); return options; }
class JvmRuntimeShutdownHookRegistrar implements ShutdownHookRegistrar { @Override public void registerShutdownHook(Thread thread) { Runtime.getRuntime().addShutdownHook(thread); } }
class JvmRuntimeShutdownHookRegistrar implements ShutdownHookRegistrar { @Override public void registerShutdownHook(Thread thread) { Runtime.getRuntime().addShutdownHook(thread); } }
Interesting article on splitting empty strings: https://chriszetter.com/blog/2017/10/29/splitting-strings/
public Stream<String> getOutputLinesStream() { if (output.isEmpty()) { return Stream.empty(); } return NEWLINE.splitAsStream(output); }
public Stream<String> getOutputLinesStream() { if (output.isEmpty()) { return Stream.empty(); } return NEWLINE.splitAsStream(output); }
class CommandResult { private static final Pattern NEWLINE = Pattern.compile("\\n"); private final CommandLine commandLine; private final int exitCode; private final String output; CommandResult(CommandLine commandLine, int exitCode, String output) { this.commandLine = commandLine; this.exitCode = exitCode; this.output = output; } public int getExitCode() { return exitCode; } /** Returns the output with leading and trailing white-space removed. */ public String getOutput() { return output.trim(); } public String getUntrimmedOutput() { return output; } /** Returns the output lines of the command, omitting trailing empty lines. */ public List<String> getOutputLines() { return getOutputLinesStream().collect(Collectors.toList()); } /** * Convenience method for getting the CommandLine, whose execution resulted in * this CommandResult instance. * * Warning: the CommandLine is mutable and may be changed by the caller of the execution * through other references! This is just a convenience method for getting that instance. */ public CommandLine getCommandLine() { return commandLine; } }
class CommandResult { private static final Pattern NEWLINE = Pattern.compile("\\n"); private final CommandLine commandLine; private final int exitCode; private final String output; CommandResult(CommandLine commandLine, int exitCode, String output) { this.commandLine = commandLine; this.exitCode = exitCode; this.output = output; } public int getExitCode() { return exitCode; } /** Returns the output with leading and trailing white-space removed. */ public String getOutput() { return output.trim(); } public String getUntrimmedOutput() { return output; } /** Returns the output lines of the command, omitting trailing empty lines. */ public List<String> getOutputLines() { return getOutputLinesStream().collect(Collectors.toList()); } /** * Convenience method for getting the CommandLine, whose execution resulted in * this CommandResult instance. * * Warning: the CommandLine is mutable and may be changed by the caller of the execution * through other references! This is just a convenience method for getting that instance. */ public CommandLine getCommandLine() { return commandLine; } }
How will `close()` be called? You need to define `child` in the resource scope?
public ChildProcess2Impl spawn(CommandLine commandLine) { List<String> arguments = commandLine.getArguments(); if (arguments.isEmpty()) { throw new IllegalArgumentException("No arguments specified - missing program to spawn"); } ProcessBuilder processBuilder = new ProcessBuilder(arguments); if (commandLine.getRedirectStderrToStdoutInsteadOfDiscard()) { processBuilder.redirectErrorStream(true); } else { processBuilder.redirectError(ProcessBuilder.Redirect.to(DEV_NULL)); } String temporaryFilePrefix = ProcessFactoryImpl.class.getSimpleName() + "-" + commandLine.programName() + "-"; FileAttribute<Set<PosixFilePermission>> fileAttribute = PosixFilePermissions.asFileAttribute( PosixFilePermissions.fromString("rw-------")); Path temporaryFile = uncheck(() -> Files.createTempFile( temporaryFilePrefix, ".out", fileAttribute)); try { processBuilder.redirectOutput(temporaryFile.toFile()); ProcessApi2 process = processStarter.start(processBuilder); ChildProcess2Impl child = new ChildProcess2Impl(commandLine, process, temporaryFile, timer); temporaryFile = null; return child; } finally { if (temporaryFile != null) { try { Files.delete(temporaryFile); } catch (IOException e) { logger.log(LogLevel.WARNING, "Failed to delete temporary file at " + temporaryFile, e); } } } }
public ChildProcess2Impl spawn(CommandLine commandLine) { List<String> arguments = commandLine.getArguments(); if (arguments.isEmpty()) { throw new IllegalArgumentException("No arguments specified - missing program to spawn"); } ProcessBuilder processBuilder = new ProcessBuilder(arguments); if (commandLine.getRedirectStderrToStdoutInsteadOfDiscard()) { processBuilder.redirectErrorStream(true); } else { processBuilder.redirectError(ProcessBuilder.Redirect.to(DEV_NULL)); } String temporaryFilePrefix = ProcessFactoryImpl.class.getSimpleName() + "-" + commandLine.programName() + "-"; FileAttribute<Set<PosixFilePermission>> fileAttribute = PosixFilePermissions.asFileAttribute( PosixFilePermissions.fromString("rw-------")); Path temporaryFile = uncheck(() -> Files.createTempFile( temporaryFilePrefix, ".out", fileAttribute)); try { processBuilder.redirectOutput(temporaryFile.toFile()); ProcessApi2 process = processStarter.start(processBuilder); return new ChildProcess2Impl(commandLine, process, temporaryFile, timer); } catch (RuntimeException | Error throwable) { try { Files.delete(temporaryFile); } catch (IOException ioException) { logger.log(LogLevel.WARNING, "Failed to delete temporary file at " + temporaryFile, ioException); } throw throwable; } }
class ProcessFactoryImpl implements ProcessFactory { private static final Logger logger = Logger.getLogger(ProcessFactoryImpl.class.getName()); private static final File DEV_NULL = new File("/dev/null"); private final ProcessStarter processStarter; private final Timer timer; ProcessFactoryImpl(ProcessStarter processStarter, Timer timer) { this.processStarter = processStarter; this.timer = timer; } @Override }
class ProcessFactoryImpl implements ProcessFactory { private static final Logger logger = Logger.getLogger(ProcessFactoryImpl.class.getName()); private static final File DEV_NULL = new File("/dev/null"); private final ProcessStarter processStarter; private final Timer timer; ProcessFactoryImpl(ProcessStarter processStarter, Timer timer) { this.processStarter = processStarter; this.timer = timer; } @Override }
The child is returned immediately, and the caller has to put it in resource scope.
public ChildProcess2Impl spawn(CommandLine commandLine) { List<String> arguments = commandLine.getArguments(); if (arguments.isEmpty()) { throw new IllegalArgumentException("No arguments specified - missing program to spawn"); } ProcessBuilder processBuilder = new ProcessBuilder(arguments); if (commandLine.getRedirectStderrToStdoutInsteadOfDiscard()) { processBuilder.redirectErrorStream(true); } else { processBuilder.redirectError(ProcessBuilder.Redirect.to(DEV_NULL)); } String temporaryFilePrefix = ProcessFactoryImpl.class.getSimpleName() + "-" + commandLine.programName() + "-"; FileAttribute<Set<PosixFilePermission>> fileAttribute = PosixFilePermissions.asFileAttribute( PosixFilePermissions.fromString("rw-------")); Path temporaryFile = uncheck(() -> Files.createTempFile( temporaryFilePrefix, ".out", fileAttribute)); try { processBuilder.redirectOutput(temporaryFile.toFile()); ProcessApi2 process = processStarter.start(processBuilder); ChildProcess2Impl child = new ChildProcess2Impl(commandLine, process, temporaryFile, timer); temporaryFile = null; return child; } finally { if (temporaryFile != null) { try { Files.delete(temporaryFile); } catch (IOException e) { logger.log(LogLevel.WARNING, "Failed to delete temporary file at " + temporaryFile, e); } } } }
public ChildProcess2Impl spawn(CommandLine commandLine) { List<String> arguments = commandLine.getArguments(); if (arguments.isEmpty()) { throw new IllegalArgumentException("No arguments specified - missing program to spawn"); } ProcessBuilder processBuilder = new ProcessBuilder(arguments); if (commandLine.getRedirectStderrToStdoutInsteadOfDiscard()) { processBuilder.redirectErrorStream(true); } else { processBuilder.redirectError(ProcessBuilder.Redirect.to(DEV_NULL)); } String temporaryFilePrefix = ProcessFactoryImpl.class.getSimpleName() + "-" + commandLine.programName() + "-"; FileAttribute<Set<PosixFilePermission>> fileAttribute = PosixFilePermissions.asFileAttribute( PosixFilePermissions.fromString("rw-------")); Path temporaryFile = uncheck(() -> Files.createTempFile( temporaryFilePrefix, ".out", fileAttribute)); try { processBuilder.redirectOutput(temporaryFile.toFile()); ProcessApi2 process = processStarter.start(processBuilder); return new ChildProcess2Impl(commandLine, process, temporaryFile, timer); } catch (RuntimeException | Error throwable) { try { Files.delete(temporaryFile); } catch (IOException ioException) { logger.log(LogLevel.WARNING, "Failed to delete temporary file at " + temporaryFile, ioException); } throw throwable; } }
class ProcessFactoryImpl implements ProcessFactory { private static final Logger logger = Logger.getLogger(ProcessFactoryImpl.class.getName()); private static final File DEV_NULL = new File("/dev/null"); private final ProcessStarter processStarter; private final Timer timer; ProcessFactoryImpl(ProcessStarter processStarter, Timer timer) { this.processStarter = processStarter; this.timer = timer; } @Override }
class ProcessFactoryImpl implements ProcessFactory { private static final Logger logger = Logger.getLogger(ProcessFactoryImpl.class.getName()); private static final File DEV_NULL = new File("/dev/null"); private final ProcessStarter processStarter; private final Timer timer; ProcessFactoryImpl(ProcessStarter processStarter, Timer timer) { this.processStarter = processStarter; this.timer = timer; } @Override }
Yes, let me do that.
public ChildProcess2Impl spawn(CommandLine commandLine) { List<String> arguments = commandLine.getArguments(); if (arguments.isEmpty()) { throw new IllegalArgumentException("No arguments specified - missing program to spawn"); } ProcessBuilder processBuilder = new ProcessBuilder(arguments); if (commandLine.getRedirectStderrToStdoutInsteadOfDiscard()) { processBuilder.redirectErrorStream(true); } else { processBuilder.redirectError(ProcessBuilder.Redirect.to(DEV_NULL)); } String temporaryFilePrefix = ProcessFactoryImpl.class.getSimpleName() + "-" + commandLine.programName() + "-"; FileAttribute<Set<PosixFilePermission>> fileAttribute = PosixFilePermissions.asFileAttribute( PosixFilePermissions.fromString("rw-------")); Path temporaryFile = uncheck(() -> Files.createTempFile( temporaryFilePrefix, ".out", fileAttribute)); try { processBuilder.redirectOutput(temporaryFile.toFile()); ProcessApi2 process = processStarter.start(processBuilder); ChildProcess2Impl child = new ChildProcess2Impl(commandLine, process, temporaryFile, timer); temporaryFile = null; return child; } finally { if (temporaryFile != null) { try { Files.delete(temporaryFile); } catch (IOException e) { logger.log(LogLevel.WARNING, "Failed to delete temporary file at " + temporaryFile, e); } } } }
public ChildProcess2Impl spawn(CommandLine commandLine) { List<String> arguments = commandLine.getArguments(); if (arguments.isEmpty()) { throw new IllegalArgumentException("No arguments specified - missing program to spawn"); } ProcessBuilder processBuilder = new ProcessBuilder(arguments); if (commandLine.getRedirectStderrToStdoutInsteadOfDiscard()) { processBuilder.redirectErrorStream(true); } else { processBuilder.redirectError(ProcessBuilder.Redirect.to(DEV_NULL)); } String temporaryFilePrefix = ProcessFactoryImpl.class.getSimpleName() + "-" + commandLine.programName() + "-"; FileAttribute<Set<PosixFilePermission>> fileAttribute = PosixFilePermissions.asFileAttribute( PosixFilePermissions.fromString("rw-------")); Path temporaryFile = uncheck(() -> Files.createTempFile( temporaryFilePrefix, ".out", fileAttribute)); try { processBuilder.redirectOutput(temporaryFile.toFile()); ProcessApi2 process = processStarter.start(processBuilder); return new ChildProcess2Impl(commandLine, process, temporaryFile, timer); } catch (RuntimeException | Error throwable) { try { Files.delete(temporaryFile); } catch (IOException ioException) { logger.log(LogLevel.WARNING, "Failed to delete temporary file at " + temporaryFile, ioException); } throw throwable; } }
class ProcessFactoryImpl implements ProcessFactory { private static final Logger logger = Logger.getLogger(ProcessFactoryImpl.class.getName()); private static final File DEV_NULL = new File("/dev/null"); private final ProcessStarter processStarter; private final Timer timer; ProcessFactoryImpl(ProcessStarter processStarter, Timer timer) { this.processStarter = processStarter; this.timer = timer; } @Override }
class ProcessFactoryImpl implements ProcessFactory { private static final Logger logger = Logger.getLogger(ProcessFactoryImpl.class.getName()); private static final File DEV_NULL = new File("/dev/null"); private final ProcessStarter processStarter; private final Timer timer; ProcessFactoryImpl(ProcessStarter processStarter, Timer timer) { this.processStarter = processStarter; this.timer = timer; } @Override }
Fixed
public ChildProcess2Impl spawn(CommandLine commandLine) { List<String> arguments = commandLine.getArguments(); if (arguments.isEmpty()) { throw new IllegalArgumentException("No arguments specified - missing program to spawn"); } ProcessBuilder processBuilder = new ProcessBuilder(arguments); if (commandLine.getRedirectStderrToStdoutInsteadOfDiscard()) { processBuilder.redirectErrorStream(true); } else { processBuilder.redirectError(ProcessBuilder.Redirect.to(DEV_NULL)); } String temporaryFilePrefix = ProcessFactoryImpl.class.getSimpleName() + "-" + commandLine.programName() + "-"; FileAttribute<Set<PosixFilePermission>> fileAttribute = PosixFilePermissions.asFileAttribute( PosixFilePermissions.fromString("rw-------")); Path temporaryFile = uncheck(() -> Files.createTempFile( temporaryFilePrefix, ".out", fileAttribute)); try { processBuilder.redirectOutput(temporaryFile.toFile()); ProcessApi2 process = processStarter.start(processBuilder); ChildProcess2Impl child = new ChildProcess2Impl(commandLine, process, temporaryFile, timer); temporaryFile = null; return child; } finally { if (temporaryFile != null) { try { Files.delete(temporaryFile); } catch (IOException e) { logger.log(LogLevel.WARNING, "Failed to delete temporary file at " + temporaryFile, e); } } } }
public ChildProcess2Impl spawn(CommandLine commandLine) { List<String> arguments = commandLine.getArguments(); if (arguments.isEmpty()) { throw new IllegalArgumentException("No arguments specified - missing program to spawn"); } ProcessBuilder processBuilder = new ProcessBuilder(arguments); if (commandLine.getRedirectStderrToStdoutInsteadOfDiscard()) { processBuilder.redirectErrorStream(true); } else { processBuilder.redirectError(ProcessBuilder.Redirect.to(DEV_NULL)); } String temporaryFilePrefix = ProcessFactoryImpl.class.getSimpleName() + "-" + commandLine.programName() + "-"; FileAttribute<Set<PosixFilePermission>> fileAttribute = PosixFilePermissions.asFileAttribute( PosixFilePermissions.fromString("rw-------")); Path temporaryFile = uncheck(() -> Files.createTempFile( temporaryFilePrefix, ".out", fileAttribute)); try { processBuilder.redirectOutput(temporaryFile.toFile()); ProcessApi2 process = processStarter.start(processBuilder); return new ChildProcess2Impl(commandLine, process, temporaryFile, timer); } catch (RuntimeException | Error throwable) { try { Files.delete(temporaryFile); } catch (IOException ioException) { logger.log(LogLevel.WARNING, "Failed to delete temporary file at " + temporaryFile, ioException); } throw throwable; } }
class ProcessFactoryImpl implements ProcessFactory { private static final Logger logger = Logger.getLogger(ProcessFactoryImpl.class.getName()); private static final File DEV_NULL = new File("/dev/null"); private final ProcessStarter processStarter; private final Timer timer; ProcessFactoryImpl(ProcessStarter processStarter, Timer timer) { this.processStarter = processStarter; this.timer = timer; } @Override }
class ProcessFactoryImpl implements ProcessFactory { private static final Logger logger = Logger.getLogger(ProcessFactoryImpl.class.getName()); private static final File DEV_NULL = new File("/dev/null"); private final ProcessStarter processStarter; private final Timer timer; ProcessFactoryImpl(ProcessStarter processStarter, Timer timer) { this.processStarter = processStarter; this.timer = timer; } @Override }
static import would make this less noisy.
public void watchdog_counts_deactivated_containers() { TestDriver driver = TestDriver.newSimpleApplicationInstanceWithoutOsgi(); ManualClock clock = new ManualClock(Instant.now()); ActiveContainerDeactivationWatchdog watchdog = new ActiveContainerDeactivationWatchdog(clock, Executors.newScheduledThreadPool(1), ActiveContainerDeactivationWatchdog.DEFAULT_DEACTIVATED_CONTAINERS_BEFORE_GC_THRESHOLD); MockMetric metric = new MockMetric(); ActiveContainer containerWithoutRetainedResources = new ActiveContainer(driver.newContainerBuilder()); watchdog.onContainerActivation(containerWithoutRetainedResources); watchdog.emitMetrics(metric); assertEquals(0, metric.totalCount); assertEquals(0, metric.withRetainedReferencesCount); watchdog.onContainerActivation(null); containerWithoutRetainedResources.release(); clock.advance(ActiveContainerDeactivationWatchdog.ACTIVE_CONTAINER_GRACE_PERIOD); watchdog.emitMetrics(metric); assertEquals(0, metric.totalCount); assertEquals(0, metric.withRetainedReferencesCount); clock.advance(Duration.ofSeconds(1)); watchdog.emitMetrics(metric); assertEquals(1, metric.totalCount); assertEquals(0, metric.withRetainedReferencesCount); ActiveContainer containerWithRetainedResources = new ActiveContainer(driver.newContainerBuilder()); try (ResourceReference ignoredRef = containerWithRetainedResources.refer()) { watchdog.onContainerActivation(containerWithRetainedResources); containerWithRetainedResources.release(); watchdog.onContainerActivation(null); clock.advance(ActiveContainerDeactivationWatchdog.ACTIVE_CONTAINER_GRACE_PERIOD.plusSeconds(1)); watchdog.emitMetrics(metric); assertEquals(2, metric.totalCount); assertEquals(1, metric.withRetainedReferencesCount); } }
ActiveContainerDeactivationWatchdog.DEFAULT_DEACTIVATED_CONTAINERS_BEFORE_GC_THRESHOLD);
public void watchdog_counts_deactivated_containers() { TestDriver driver = TestDriver.newSimpleApplicationInstanceWithoutOsgi(); ManualClock clock = new ManualClock(Instant.now()); ActiveContainerDeactivationWatchdog watchdog = new ActiveContainerDeactivationWatchdog(clock, Executors.newScheduledThreadPool(1), ActiveContainerDeactivationWatchdog.DEFAULT_DEACTIVATED_CONTAINERS_BEFORE_GC_THRESHOLD); MockMetric metric = new MockMetric(); ActiveContainer containerWithoutRetainedResources = new ActiveContainer(driver.newContainerBuilder()); watchdog.onContainerActivation(containerWithoutRetainedResources); watchdog.emitMetrics(metric); assertEquals(0, metric.totalCount); assertEquals(0, metric.withRetainedReferencesCount); watchdog.onContainerActivation(null); containerWithoutRetainedResources.release(); clock.advance(ActiveContainerDeactivationWatchdog.ACTIVE_CONTAINER_GRACE_PERIOD); watchdog.emitMetrics(metric); assertEquals(0, metric.totalCount); assertEquals(0, metric.withRetainedReferencesCount); clock.advance(Duration.ofSeconds(1)); watchdog.emitMetrics(metric); assertEquals(1, metric.totalCount); assertEquals(0, metric.withRetainedReferencesCount); ActiveContainer containerWithRetainedResources = new ActiveContainer(driver.newContainerBuilder()); try (ResourceReference ignoredRef = containerWithRetainedResources.refer()) { watchdog.onContainerActivation(containerWithRetainedResources); containerWithRetainedResources.release(); watchdog.onContainerActivation(null); clock.advance(ActiveContainerDeactivationWatchdog.ACTIVE_CONTAINER_GRACE_PERIOD.plusSeconds(1)); watchdog.emitMetrics(metric); assertEquals(2, metric.totalCount); assertEquals(1, metric.withRetainedReferencesCount); } }
class ActiveContainerDeactivationWatchdogTest { @Test @Test @Ignore("Ignored as it assumes phantom references are enqueued right after first GC have cleared the weak reference. " + "This is the case on most JVMs.") public void deactivated_container_destructed_if_its_reference_counter_is_nonzero() { ExecutorMock executor = new ExecutorMock(); ManualClock clock = new ManualClock(Instant.now()); ActiveContainerDeactivationWatchdog watchdog = new ActiveContainerDeactivationWatchdog(clock, executor, /*deactivatedContainersBeforeGcThreshold*/0); ActiveContainer container = new ActiveContainer(TestDriver.newSimpleApplicationInstanceWithoutOsgi().newContainerBuilder()); AtomicBoolean destructed = new AtomicBoolean(false); container.shutdown().notifyTermination(() -> destructed.set(true)); container.refer(); watchdog.onContainerActivation(container); container.release(); watchdog.onContainerActivation(null); WeakReference<ActiveContainer> containerWeakReference = new WeakReference<>(container); container = null; clock.advance(ActiveContainerDeactivationWatchdog.ACTIVE_CONTAINER_GRACE_PERIOD.plusSeconds(1)); executor.triggerGcCommand.run(); assertNull("Container is not GCed - probably because the watchdog has a concrete reference to it", containerWeakReference.get()); executor.enforceDestructionOfGarbageCollectedContainersCommand.run(); assertTrue("Destructor is not called on deactivated container", destructed.get()); } private static class MockMetric implements Metric { public int totalCount; public int withRetainedReferencesCount; @Override public void set(String key, Number val, Context ctx) { switch (key) { case ActiveContainerMetrics.TOTAL_DEACTIVATED_CONTAINERS: totalCount = val.intValue(); break; case ActiveContainerMetrics.DEACTIVATED_CONTAINERS_WITH_RETAINED_REFERENCES: withRetainedReferencesCount = val.intValue(); break; default: throw new UnsupportedOperationException(); } } @Override public void add(String key, Number val, Context ctx) { throw new UnsupportedOperationException(); } @Override public Context createContext(Map<String, ?> properties) { throw new UnsupportedOperationException(); } } private static class ExecutorMock extends ScheduledThreadPoolExecutor { public Runnable warnOnStaleContainersCommand; public Runnable triggerGcCommand; public Runnable enforceDestructionOfGarbageCollectedContainersCommand; private int registrationCounter = 0; public ExecutorMock() { super(1); } @Override public ScheduledFuture<?> scheduleAtFixedRate(Runnable command, long initialDelay, long period, TimeUnit unit) { if (registrationCounter == 0) { warnOnStaleContainersCommand = command; } else if (registrationCounter == 1) { triggerGcCommand = command; } else if (registrationCounter == 2) { enforceDestructionOfGarbageCollectedContainersCommand = command; } else { throw new IllegalStateException("Unexpected registration"); } ++registrationCounter; return null; } } }
class ActiveContainerDeactivationWatchdogTest { @Test @Test @Ignore("Ignored as it assumes phantom references are enqueued right after first GC have cleared the weak reference. " + "This is the case on most JVMs.") public void deactivated_container_destructed_if_its_reference_counter_is_nonzero() { ExecutorMock executor = new ExecutorMock(); ManualClock clock = new ManualClock(Instant.now()); ActiveContainerDeactivationWatchdog watchdog = new ActiveContainerDeactivationWatchdog(clock, executor, /*deactivatedContainersBeforeGcThreshold*/0); ActiveContainer container = new ActiveContainer(TestDriver.newSimpleApplicationInstanceWithoutOsgi().newContainerBuilder()); AtomicBoolean destructed = new AtomicBoolean(false); container.shutdown().notifyTermination(() -> destructed.set(true)); container.refer(); watchdog.onContainerActivation(container); container.release(); watchdog.onContainerActivation(null); WeakReference<ActiveContainer> containerWeakReference = new WeakReference<>(container); container = null; clock.advance(ActiveContainerDeactivationWatchdog.ACTIVE_CONTAINER_GRACE_PERIOD.plusSeconds(1)); executor.triggerGcCommand.run(); assertNull("Container is not GCed - probably because the watchdog has a concrete reference to it", containerWeakReference.get()); executor.enforceDestructionOfGarbageCollectedContainersCommand.run(); assertTrue("Destructor is not called on deactivated container", destructed.get()); } private static class MockMetric implements Metric { public int totalCount; public int withRetainedReferencesCount; @Override public void set(String key, Number val, Context ctx) { switch (key) { case ActiveContainerMetrics.TOTAL_DEACTIVATED_CONTAINERS: totalCount = val.intValue(); break; case ActiveContainerMetrics.DEACTIVATED_CONTAINERS_WITH_RETAINED_REFERENCES: withRetainedReferencesCount = val.intValue(); break; default: throw new UnsupportedOperationException(); } } @Override public void add(String key, Number val, Context ctx) { throw new UnsupportedOperationException(); } @Override public Context createContext(Map<String, ?> properties) { throw new UnsupportedOperationException(); } } private static class ExecutorMock extends ScheduledThreadPoolExecutor { public Runnable warnOnStaleContainersCommand; public Runnable triggerGcCommand; public Runnable enforceDestructionOfGarbageCollectedContainersCommand; private int registrationCounter = 0; public ExecutorMock() { super(1); } @Override public ScheduledFuture<?> scheduleAtFixedRate(Runnable command, long initialDelay, long period, TimeUnit unit) { if (registrationCounter == 0) { warnOnStaleContainersCommand = command; } else if (registrationCounter == 1) { triggerGcCommand = command; } else if (registrationCounter == 2) { enforceDestructionOfGarbageCollectedContainersCommand = command; } else { throw new IllegalStateException("Unexpected registration"); } ++registrationCounter; return null; } } }
static import...
public void deactivated_container_destructed_if_its_reference_counter_is_nonzero() { ExecutorMock executor = new ExecutorMock(); ManualClock clock = new ManualClock(Instant.now()); ActiveContainerDeactivationWatchdog watchdog = new ActiveContainerDeactivationWatchdog(clock, executor, /*deactivatedContainersBeforeGcThreshold*/0); ActiveContainer container = new ActiveContainer(TestDriver.newSimpleApplicationInstanceWithoutOsgi().newContainerBuilder()); AtomicBoolean destructed = new AtomicBoolean(false); container.shutdown().notifyTermination(() -> destructed.set(true)); container.refer(); watchdog.onContainerActivation(container); container.release(); watchdog.onContainerActivation(null); WeakReference<ActiveContainer> containerWeakReference = new WeakReference<>(container); container = null; clock.advance(ActiveContainerDeactivationWatchdog.ACTIVE_CONTAINER_GRACE_PERIOD.plusSeconds(1)); executor.triggerGcCommand.run(); assertNull("Container is not GCed - probably because the watchdog has a concrete reference to it", containerWeakReference.get()); executor.enforceDestructionOfGarbageCollectedContainersCommand.run(); assertTrue("Destructor is not called on deactivated container", destructed.get()); }
clock.advance(ActiveContainerDeactivationWatchdog.ACTIVE_CONTAINER_GRACE_PERIOD.plusSeconds(1));
public void deactivated_container_destructed_if_its_reference_counter_is_nonzero() { ExecutorMock executor = new ExecutorMock(); ManualClock clock = new ManualClock(Instant.now()); ActiveContainerDeactivationWatchdog watchdog = new ActiveContainerDeactivationWatchdog(clock, executor, /*deactivatedContainersBeforeGcThreshold*/0); ActiveContainer container = new ActiveContainer(TestDriver.newSimpleApplicationInstanceWithoutOsgi().newContainerBuilder()); AtomicBoolean destructed = new AtomicBoolean(false); container.shutdown().notifyTermination(() -> destructed.set(true)); container.refer(); watchdog.onContainerActivation(container); container.release(); watchdog.onContainerActivation(null); WeakReference<ActiveContainer> containerWeakReference = new WeakReference<>(container); container = null; clock.advance(ActiveContainerDeactivationWatchdog.ACTIVE_CONTAINER_GRACE_PERIOD.plusSeconds(1)); executor.triggerGcCommand.run(); assertNull("Container is not GCed - probably because the watchdog has a concrete reference to it", containerWeakReference.get()); executor.enforceDestructionOfGarbageCollectedContainersCommand.run(); assertTrue("Destructor is not called on deactivated container", destructed.get()); }
class ActiveContainerDeactivationWatchdogTest { @Test public void watchdog_counts_deactivated_containers() { TestDriver driver = TestDriver.newSimpleApplicationInstanceWithoutOsgi(); ManualClock clock = new ManualClock(Instant.now()); ActiveContainerDeactivationWatchdog watchdog = new ActiveContainerDeactivationWatchdog(clock, Executors.newScheduledThreadPool(1), ActiveContainerDeactivationWatchdog.DEFAULT_DEACTIVATED_CONTAINERS_BEFORE_GC_THRESHOLD); MockMetric metric = new MockMetric(); ActiveContainer containerWithoutRetainedResources = new ActiveContainer(driver.newContainerBuilder()); watchdog.onContainerActivation(containerWithoutRetainedResources); watchdog.emitMetrics(metric); assertEquals(0, metric.totalCount); assertEquals(0, metric.withRetainedReferencesCount); watchdog.onContainerActivation(null); containerWithoutRetainedResources.release(); clock.advance(ActiveContainerDeactivationWatchdog.ACTIVE_CONTAINER_GRACE_PERIOD); watchdog.emitMetrics(metric); assertEquals(0, metric.totalCount); assertEquals(0, metric.withRetainedReferencesCount); clock.advance(Duration.ofSeconds(1)); watchdog.emitMetrics(metric); assertEquals(1, metric.totalCount); assertEquals(0, metric.withRetainedReferencesCount); ActiveContainer containerWithRetainedResources = new ActiveContainer(driver.newContainerBuilder()); try (ResourceReference ignoredRef = containerWithRetainedResources.refer()) { watchdog.onContainerActivation(containerWithRetainedResources); containerWithRetainedResources.release(); watchdog.onContainerActivation(null); clock.advance(ActiveContainerDeactivationWatchdog.ACTIVE_CONTAINER_GRACE_PERIOD.plusSeconds(1)); watchdog.emitMetrics(metric); assertEquals(2, metric.totalCount); assertEquals(1, metric.withRetainedReferencesCount); } } @Test @Ignore("Ignored as it assumes phantom references are enqueued right after first GC have cleared the weak reference. " + "This is the case on most JVMs.") private static class MockMetric implements Metric { public int totalCount; public int withRetainedReferencesCount; @Override public void set(String key, Number val, Context ctx) { switch (key) { case ActiveContainerMetrics.TOTAL_DEACTIVATED_CONTAINERS: totalCount = val.intValue(); break; case ActiveContainerMetrics.DEACTIVATED_CONTAINERS_WITH_RETAINED_REFERENCES: withRetainedReferencesCount = val.intValue(); break; default: throw new UnsupportedOperationException(); } } @Override public void add(String key, Number val, Context ctx) { throw new UnsupportedOperationException(); } @Override public Context createContext(Map<String, ?> properties) { throw new UnsupportedOperationException(); } } private static class ExecutorMock extends ScheduledThreadPoolExecutor { public Runnable warnOnStaleContainersCommand; public Runnable triggerGcCommand; public Runnable enforceDestructionOfGarbageCollectedContainersCommand; private int registrationCounter = 0; public ExecutorMock() { super(1); } @Override public ScheduledFuture<?> scheduleAtFixedRate(Runnable command, long initialDelay, long period, TimeUnit unit) { if (registrationCounter == 0) { warnOnStaleContainersCommand = command; } else if (registrationCounter == 1) { triggerGcCommand = command; } else if (registrationCounter == 2) { enforceDestructionOfGarbageCollectedContainersCommand = command; } else { throw new IllegalStateException("Unexpected registration"); } ++registrationCounter; return null; } } }
class ActiveContainerDeactivationWatchdogTest { @Test public void watchdog_counts_deactivated_containers() { TestDriver driver = TestDriver.newSimpleApplicationInstanceWithoutOsgi(); ManualClock clock = new ManualClock(Instant.now()); ActiveContainerDeactivationWatchdog watchdog = new ActiveContainerDeactivationWatchdog(clock, Executors.newScheduledThreadPool(1), ActiveContainerDeactivationWatchdog.DEFAULT_DEACTIVATED_CONTAINERS_BEFORE_GC_THRESHOLD); MockMetric metric = new MockMetric(); ActiveContainer containerWithoutRetainedResources = new ActiveContainer(driver.newContainerBuilder()); watchdog.onContainerActivation(containerWithoutRetainedResources); watchdog.emitMetrics(metric); assertEquals(0, metric.totalCount); assertEquals(0, metric.withRetainedReferencesCount); watchdog.onContainerActivation(null); containerWithoutRetainedResources.release(); clock.advance(ActiveContainerDeactivationWatchdog.ACTIVE_CONTAINER_GRACE_PERIOD); watchdog.emitMetrics(metric); assertEquals(0, metric.totalCount); assertEquals(0, metric.withRetainedReferencesCount); clock.advance(Duration.ofSeconds(1)); watchdog.emitMetrics(metric); assertEquals(1, metric.totalCount); assertEquals(0, metric.withRetainedReferencesCount); ActiveContainer containerWithRetainedResources = new ActiveContainer(driver.newContainerBuilder()); try (ResourceReference ignoredRef = containerWithRetainedResources.refer()) { watchdog.onContainerActivation(containerWithRetainedResources); containerWithRetainedResources.release(); watchdog.onContainerActivation(null); clock.advance(ActiveContainerDeactivationWatchdog.ACTIVE_CONTAINER_GRACE_PERIOD.plusSeconds(1)); watchdog.emitMetrics(metric); assertEquals(2, metric.totalCount); assertEquals(1, metric.withRetainedReferencesCount); } } @Test @Ignore("Ignored as it assumes phantom references are enqueued right after first GC have cleared the weak reference. " + "This is the case on most JVMs.") private static class MockMetric implements Metric { public int totalCount; public int withRetainedReferencesCount; @Override public void set(String key, Number val, Context ctx) { switch (key) { case ActiveContainerMetrics.TOTAL_DEACTIVATED_CONTAINERS: totalCount = val.intValue(); break; case ActiveContainerMetrics.DEACTIVATED_CONTAINERS_WITH_RETAINED_REFERENCES: withRetainedReferencesCount = val.intValue(); break; default: throw new UnsupportedOperationException(); } } @Override public void add(String key, Number val, Context ctx) { throw new UnsupportedOperationException(); } @Override public Context createContext(Map<String, ?> properties) { throw new UnsupportedOperationException(); } } private static class ExecutorMock extends ScheduledThreadPoolExecutor { public Runnable warnOnStaleContainersCommand; public Runnable triggerGcCommand; public Runnable enforceDestructionOfGarbageCollectedContainersCommand; private int registrationCounter = 0; public ExecutorMock() { super(1); } @Override public ScheduledFuture<?> scheduleAtFixedRate(Runnable command, long initialDelay, long period, TimeUnit unit) { if (registrationCounter == 0) { warnOnStaleContainersCommand = command; } else if (registrationCounter == 1) { triggerGcCommand = command; } else if (registrationCounter == 2) { enforceDestructionOfGarbageCollectedContainersCommand = command; } else { throw new IllegalStateException("Unexpected registration"); } ++registrationCounter; return null; } } }
should there be a try catch finally ?
File close(long hash) { if (hasher.getValue() != hash) { throw new RuntimeException("xxhash from content (" + currentHash + ") is not equal to xxhash in request (" + hash + ")"); } File file = new File(fileReferenceDir, fileName); try { if (fileType == FileReferenceData.Type.compressed) { File decompressedDir = Files.createTempDirectory(tmpDir.toPath(), "archive").toFile(); log.log(LogLevel.DEBUG, "Archived file, unpacking " + inprogressFile + " to " + decompressedDir); CompressedFileReference.decompress(inprogressFile, decompressedDir); Files.delete(inprogressFile.toPath()); moveFileToDestination(decompressedDir, fileReferenceDir); } else { try { Files.createDirectories(fileReferenceDir.toPath()); } catch (IOException e) { log.log(LogLevel.ERROR, "Failed creating directory (" + fileReferenceDir.toPath() + "): " + e.getMessage(), e); throw new RuntimeException("Failed creating directory (" + fileReferenceDir.toPath() + "): ", e); } log.log(LogLevel.DEBUG, "Uncompressed file, moving to " + file.getAbsolutePath()); moveFileToDestination(inprogressFile, file); } } catch (IOException e) { log.log(LogLevel.ERROR, "Failed writing file: " + e.getMessage(), e); throw new RuntimeException("Failed writing file: ", e); } return file; }
Files.delete(inprogressFile.toPath());
File close(long hash) { if (hasher.getValue() != hash) { throw new RuntimeException("xxhash from content (" + currentHash + ") is not equal to xxhash in request (" + hash + ")"); } File file = new File(fileReferenceDir, fileName); try { if (fileType == FileReferenceData.Type.compressed) { File decompressedDir = Files.createTempDirectory(tmpDir.toPath(), "archive").toFile(); log.log(LogLevel.DEBUG, "Archived file, unpacking " + inprogressFile + " to " + decompressedDir); CompressedFileReference.decompress(inprogressFile, decompressedDir); moveFileToDestination(decompressedDir, fileReferenceDir); } else { try { Files.createDirectories(fileReferenceDir.toPath()); } catch (IOException e) { log.log(LogLevel.ERROR, "Failed creating directory (" + fileReferenceDir.toPath() + "): " + e.getMessage(), e); throw new RuntimeException("Failed creating directory (" + fileReferenceDir.toPath() + "): ", e); } log.log(LogLevel.DEBUG, "Uncompressed file, moving to " + file.getAbsolutePath()); moveFileToDestination(inprogressFile, file); } } catch (IOException e) { log.log(LogLevel.ERROR, "Failed writing file: " + e.getMessage(), e); throw new RuntimeException("Failed writing file: ", e); } finally { try { Files.delete(inprogressFile.toPath()); } catch (IOException e) { log.log(LogLevel.ERROR, "Failed deleting " + inprogressFile.getAbsolutePath() + ": " + e.getMessage(), e); } } return file; }
class Session { private final StreamingXXHash64 hasher; private final int sessionId; private final FileReference reference; private final FileReferenceData.Type fileType; private final String fileName; private final long fileSize; private long currentFileSize; private long currentPartId; private long currentHash; private final File fileReferenceDir; private final File tmpDir; private final File inprogressFile; Session(File downloadDirectory, File tmpDirectory, int sessionId, FileReference reference, FileReferenceData.Type fileType, String fileName, long fileSize) { this.hasher = XXHashFactory.fastestInstance().newStreamingHash64(0); this.sessionId = sessionId; this.reference = reference; this.fileType = fileType; this.fileName = fileName; this.fileSize = fileSize; currentFileSize = 0; currentPartId = 0; currentHash = 0; fileReferenceDir = new File(downloadDirectory, reference.value()); this.tmpDir = tmpDirectory; try { inprogressFile = Files.createTempFile(tmpDirectory.toPath(), fileName, ".inprogress").toFile(); } catch (IOException e) { String msg = "Failed creating temp file for inprogress file for " + fileName + " in '" + tmpDirectory.toPath() + "': "; log.log(LogLevel.ERROR, msg + e.getMessage(), e); throw new RuntimeException(msg, e); } } void addPart(int partId, byte [] part) { if (partId != currentPartId) { throw new IllegalStateException("Received partid " + partId + " while expecting " + currentPartId); } if (fileSize < currentFileSize + part.length) { throw new IllegalStateException("Received part would extend the file from " + currentFileSize + " to " + (currentFileSize + part.length) + ", but " + fileSize + " is max."); } try { Files.write(inprogressFile.toPath(), part, StandardOpenOption.WRITE, StandardOpenOption.APPEND); } catch (IOException e) { log.log(LogLevel.ERROR, "Failed writing to file(" + inprogressFile.toPath() + "): " + e.getMessage(), e); inprogressFile.delete(); throw new RuntimeException("Failed writing to file(" + inprogressFile.toPath() + "): ", e); } currentFileSize += part.length; currentPartId++; hasher.update(part, 0, part.length); } double percentageReceived() { return (double)currentFileSize/(double)fileSize; } }
class Session { private final StreamingXXHash64 hasher; private final int sessionId; private final FileReference reference; private final FileReferenceData.Type fileType; private final String fileName; private final long fileSize; private long currentFileSize; private long currentPartId; private long currentHash; private final File fileReferenceDir; private final File tmpDir; private final File inprogressFile; Session(File downloadDirectory, File tmpDirectory, int sessionId, FileReference reference, FileReferenceData.Type fileType, String fileName, long fileSize) { this.hasher = XXHashFactory.fastestInstance().newStreamingHash64(0); this.sessionId = sessionId; this.reference = reference; this.fileType = fileType; this.fileName = fileName; this.fileSize = fileSize; currentFileSize = 0; currentPartId = 0; currentHash = 0; fileReferenceDir = new File(downloadDirectory, reference.value()); this.tmpDir = tmpDirectory; try { inprogressFile = Files.createTempFile(tmpDirectory.toPath(), fileName, ".inprogress").toFile(); } catch (IOException e) { String msg = "Failed creating temp file for inprogress file for " + fileName + " in '" + tmpDirectory.toPath() + "': "; log.log(LogLevel.ERROR, msg + e.getMessage(), e); throw new RuntimeException(msg, e); } } void addPart(int partId, byte [] part) { if (partId != currentPartId) { throw new IllegalStateException("Received partid " + partId + " while expecting " + currentPartId); } if (fileSize < currentFileSize + part.length) { throw new IllegalStateException("Received part would extend the file from " + currentFileSize + " to " + (currentFileSize + part.length) + ", but " + fileSize + " is max."); } try { Files.write(inprogressFile.toPath(), part, StandardOpenOption.WRITE, StandardOpenOption.APPEND); } catch (IOException e) { log.log(LogLevel.ERROR, "Failed writing to file(" + inprogressFile.toPath() + "): " + e.getMessage(), e); inprogressFile.delete(); throw new RuntimeException("Failed writing to file(" + inprogressFile.toPath() + "): ", e); } currentFileSize += part.length; currentPartId++; hasher.update(part, 0, part.length); } double percentageReceived() { return (double)currentFileSize/(double)fileSize; } }
Yes, that's probably better, I'll look at it
File close(long hash) { if (hasher.getValue() != hash) { throw new RuntimeException("xxhash from content (" + currentHash + ") is not equal to xxhash in request (" + hash + ")"); } File file = new File(fileReferenceDir, fileName); try { if (fileType == FileReferenceData.Type.compressed) { File decompressedDir = Files.createTempDirectory(tmpDir.toPath(), "archive").toFile(); log.log(LogLevel.DEBUG, "Archived file, unpacking " + inprogressFile + " to " + decompressedDir); CompressedFileReference.decompress(inprogressFile, decompressedDir); Files.delete(inprogressFile.toPath()); moveFileToDestination(decompressedDir, fileReferenceDir); } else { try { Files.createDirectories(fileReferenceDir.toPath()); } catch (IOException e) { log.log(LogLevel.ERROR, "Failed creating directory (" + fileReferenceDir.toPath() + "): " + e.getMessage(), e); throw new RuntimeException("Failed creating directory (" + fileReferenceDir.toPath() + "): ", e); } log.log(LogLevel.DEBUG, "Uncompressed file, moving to " + file.getAbsolutePath()); moveFileToDestination(inprogressFile, file); } } catch (IOException e) { log.log(LogLevel.ERROR, "Failed writing file: " + e.getMessage(), e); throw new RuntimeException("Failed writing file: ", e); } return file; }
Files.delete(inprogressFile.toPath());
File close(long hash) { if (hasher.getValue() != hash) { throw new RuntimeException("xxhash from content (" + currentHash + ") is not equal to xxhash in request (" + hash + ")"); } File file = new File(fileReferenceDir, fileName); try { if (fileType == FileReferenceData.Type.compressed) { File decompressedDir = Files.createTempDirectory(tmpDir.toPath(), "archive").toFile(); log.log(LogLevel.DEBUG, "Archived file, unpacking " + inprogressFile + " to " + decompressedDir); CompressedFileReference.decompress(inprogressFile, decompressedDir); moveFileToDestination(decompressedDir, fileReferenceDir); } else { try { Files.createDirectories(fileReferenceDir.toPath()); } catch (IOException e) { log.log(LogLevel.ERROR, "Failed creating directory (" + fileReferenceDir.toPath() + "): " + e.getMessage(), e); throw new RuntimeException("Failed creating directory (" + fileReferenceDir.toPath() + "): ", e); } log.log(LogLevel.DEBUG, "Uncompressed file, moving to " + file.getAbsolutePath()); moveFileToDestination(inprogressFile, file); } } catch (IOException e) { log.log(LogLevel.ERROR, "Failed writing file: " + e.getMessage(), e); throw new RuntimeException("Failed writing file: ", e); } finally { try { Files.delete(inprogressFile.toPath()); } catch (IOException e) { log.log(LogLevel.ERROR, "Failed deleting " + inprogressFile.getAbsolutePath() + ": " + e.getMessage(), e); } } return file; }
class Session { private final StreamingXXHash64 hasher; private final int sessionId; private final FileReference reference; private final FileReferenceData.Type fileType; private final String fileName; private final long fileSize; private long currentFileSize; private long currentPartId; private long currentHash; private final File fileReferenceDir; private final File tmpDir; private final File inprogressFile; Session(File downloadDirectory, File tmpDirectory, int sessionId, FileReference reference, FileReferenceData.Type fileType, String fileName, long fileSize) { this.hasher = XXHashFactory.fastestInstance().newStreamingHash64(0); this.sessionId = sessionId; this.reference = reference; this.fileType = fileType; this.fileName = fileName; this.fileSize = fileSize; currentFileSize = 0; currentPartId = 0; currentHash = 0; fileReferenceDir = new File(downloadDirectory, reference.value()); this.tmpDir = tmpDirectory; try { inprogressFile = Files.createTempFile(tmpDirectory.toPath(), fileName, ".inprogress").toFile(); } catch (IOException e) { String msg = "Failed creating temp file for inprogress file for " + fileName + " in '" + tmpDirectory.toPath() + "': "; log.log(LogLevel.ERROR, msg + e.getMessage(), e); throw new RuntimeException(msg, e); } } void addPart(int partId, byte [] part) { if (partId != currentPartId) { throw new IllegalStateException("Received partid " + partId + " while expecting " + currentPartId); } if (fileSize < currentFileSize + part.length) { throw new IllegalStateException("Received part would extend the file from " + currentFileSize + " to " + (currentFileSize + part.length) + ", but " + fileSize + " is max."); } try { Files.write(inprogressFile.toPath(), part, StandardOpenOption.WRITE, StandardOpenOption.APPEND); } catch (IOException e) { log.log(LogLevel.ERROR, "Failed writing to file(" + inprogressFile.toPath() + "): " + e.getMessage(), e); inprogressFile.delete(); throw new RuntimeException("Failed writing to file(" + inprogressFile.toPath() + "): ", e); } currentFileSize += part.length; currentPartId++; hasher.update(part, 0, part.length); } double percentageReceived() { return (double)currentFileSize/(double)fileSize; } }
class Session { private final StreamingXXHash64 hasher; private final int sessionId; private final FileReference reference; private final FileReferenceData.Type fileType; private final String fileName; private final long fileSize; private long currentFileSize; private long currentPartId; private long currentHash; private final File fileReferenceDir; private final File tmpDir; private final File inprogressFile; Session(File downloadDirectory, File tmpDirectory, int sessionId, FileReference reference, FileReferenceData.Type fileType, String fileName, long fileSize) { this.hasher = XXHashFactory.fastestInstance().newStreamingHash64(0); this.sessionId = sessionId; this.reference = reference; this.fileType = fileType; this.fileName = fileName; this.fileSize = fileSize; currentFileSize = 0; currentPartId = 0; currentHash = 0; fileReferenceDir = new File(downloadDirectory, reference.value()); this.tmpDir = tmpDirectory; try { inprogressFile = Files.createTempFile(tmpDirectory.toPath(), fileName, ".inprogress").toFile(); } catch (IOException e) { String msg = "Failed creating temp file for inprogress file for " + fileName + " in '" + tmpDirectory.toPath() + "': "; log.log(LogLevel.ERROR, msg + e.getMessage(), e); throw new RuntimeException(msg, e); } } void addPart(int partId, byte [] part) { if (partId != currentPartId) { throw new IllegalStateException("Received partid " + partId + " while expecting " + currentPartId); } if (fileSize < currentFileSize + part.length) { throw new IllegalStateException("Received part would extend the file from " + currentFileSize + " to " + (currentFileSize + part.length) + ", but " + fileSize + " is max."); } try { Files.write(inprogressFile.toPath(), part, StandardOpenOption.WRITE, StandardOpenOption.APPEND); } catch (IOException e) { log.log(LogLevel.ERROR, "Failed writing to file(" + inprogressFile.toPath() + "): " + e.getMessage(), e); inprogressFile.delete(); throw new RuntimeException("Failed writing to file(" + inprogressFile.toPath() + "): ", e); } currentFileSize += part.length; currentPartId++; hasher.update(part, 0, part.length); } double percentageReceived() { return (double)currentFileSize/(double)fileSize; } }
what is the point of doing this one extra time here?
public LogHandler[] getLogHandlers() { handlers.toArray(); LogHandler[] h = new LogHandler[handlers.size()]; return handlers.toArray(h); }
handlers.toArray();
public LogHandler[] getLogHandlers() { LogHandler[] h = new LogHandler[handlers.size()]; return handlers.toArray(h); }
class LogDispatcher implements LogHandler, SelectLoopHook { private static final Logger log = Logger.getLogger(LogDispatcher.class.getName()); private final List<LogHandler> handlers = new CopyOnWriteArrayList<>(); private final AtomicInteger messageCount = new AtomicInteger(0); private final AtomicBoolean batchedMode = new AtomicBoolean(false); private final int batchSize = 5000; private final AtomicBoolean hasBeenShutDown = new AtomicBoolean(false); private List<LogMessage> currentBatchList = null; public LogDispatcher() { } /** * Dispatches a message to all the LogHandler instances we've * got registered. The main entry point for LogMessage instances * into the log server. * * @param msg The LogMessage instance we wish to dispatch to the * plugins */ public void handle(LogMessage msg) { if (msg == null) { throw new NullPointerException("LogMessage was null"); } if (batchedMode.get()) { addToBatch(msg); } else { send(msg); } messageCount.incrementAndGet(); } private void addToBatch(LogMessage msg) { List<LogMessage> toSend = null; synchronized (this) { if (currentBatchList == null) { currentBatchList = new ArrayList<LogMessage>(batchSize); currentBatchList.add(msg); return; } currentBatchList.add(msg); if (currentBatchList.size() == batchSize) { toSend = stealBatch(); } } flushBatch(toSend); } private void send(List<LogMessage> messages) { for (LogHandler ht : handlers) { ht.handle(messages); } } private void send(LogMessage message) { for (LogHandler ht : handlers) { ht.handle(message); } } private void flushBatch(List<LogMessage> todo) { if (todo == null) { return; } send(todo); } public void handle(List<LogMessage> messages) { throw new IllegalStateException("method not supported"); } /** * Set the batched mode. Note that this should only be set * at initialization time because it radically changes the * behavior of the dispatcher. When in batched mode, the * dispatcher will not enqueue single LogMessage instances * but lists of same. */ public void setBatchedMode(boolean batchedMode) { this.batchedMode.set(batchedMode); } private List<LogMessage> stealBatch() { List<LogMessage> toSend = null; synchronized (this) { toSend = currentBatchList; currentBatchList = null; } return toSend; } public void flush() { if (batchedMode.get()) { flushBatch(stealBatch()); } for (LogHandler h : handlers) { if (log.isLoggable(LogLevel.DEBUG)) { log.log(LogLevel.DEBUG, "Flushing " + h.toString()); } h.flush(); } } public void close() { if (hasBeenShutDown.getAndSet(true)) { throw new IllegalStateException("Shutdown already in progress"); } for (LogHandler ht : handlers) { if (ht instanceof Thread) { log.fine("Stopping " + ht); ((Thread) ht).interrupt(); } } handlers.clear(); log.log(LogLevel.DEBUG, "Logdispatcher shut down. Handled " + messageCount + " messages"); } /** * Register handler thread with the dispatcher. If the handler * thread has already been registered, we log a warning and * just do nothing. * <p> * If the thread is not alive it will be start()'ed. */ public void registerLogHandler(LogHandler ht) { if (hasBeenShutDown.get()) { throw new IllegalStateException("Tried to register LogHandler on LogDispatcher which was shut down"); } synchronized (this) { if (handlers.contains(ht)) { log.warning("LogHandler was already registered: " + ht); return; } handlers.add(ht); } if ((ht instanceof Thread) && (! ((Thread) ht).isAlive())) { ((Thread) ht).start(); } log.fine("Added (and possibly started) LogHandler " + ht); } /** * Make defensive copy and return array of LogHandlers. */ /** * Return message counter. * * @return Returns the number of messages that we have seen. */ public int getMessageCount() { return messageCount.get(); } /** * Hook which is called when the select loop has finished. */ public void selectLoopHook(boolean before) { if (batchedMode.get()) { flushBatch(stealBatch()); } } }
class LogDispatcher implements LogHandler, SelectLoopHook { private static final Logger log = Logger.getLogger(LogDispatcher.class.getName()); private final List<LogHandler> handlers = new CopyOnWriteArrayList<>(); private final AtomicInteger messageCount = new AtomicInteger(0); private final AtomicBoolean batchedMode = new AtomicBoolean(false); private final int batchSize = 5000; private final AtomicBoolean hasBeenShutDown = new AtomicBoolean(false); private List<LogMessage> currentBatchList = null; public LogDispatcher() { } /** * Dispatches a message to all the LogHandler instances we've * got registered. The main entry point for LogMessage instances * into the log server. * * @param msg The LogMessage instance we wish to dispatch to the * plugins */ public void handle(LogMessage msg) { if (msg == null) { throw new NullPointerException("LogMessage was null"); } if (batchedMode.get()) { addToBatch(msg); } else { send(msg); } messageCount.incrementAndGet(); } private void addToBatch(LogMessage msg) { List<LogMessage> toSend = null; synchronized (this) { if (currentBatchList == null) { currentBatchList = new ArrayList<LogMessage>(batchSize); currentBatchList.add(msg); return; } currentBatchList.add(msg); if (currentBatchList.size() == batchSize) { toSend = stealBatch(); } } flushBatch(toSend); } private void send(List<LogMessage> messages) { for (LogHandler ht : handlers) { ht.handle(messages); } } private void send(LogMessage message) { for (LogHandler ht : handlers) { ht.handle(message); } } private void flushBatch(List<LogMessage> todo) { if (todo == null) { return; } send(todo); } public void handle(List<LogMessage> messages) { throw new IllegalStateException("method not supported"); } /** * Set the batched mode. Note that this should only be set * at initialization time because it radically changes the * behavior of the dispatcher. When in batched mode, the * dispatcher will not enqueue single LogMessage instances * but lists of same. */ public void setBatchedMode(boolean batchedMode) { this.batchedMode.set(batchedMode); } private List<LogMessage> stealBatch() { List<LogMessage> toSend = null; synchronized (this) { toSend = currentBatchList; currentBatchList = null; } return toSend; } public void flush() { if (batchedMode.get()) { flushBatch(stealBatch()); } for (LogHandler h : handlers) { if (log.isLoggable(LogLevel.DEBUG)) { log.log(LogLevel.DEBUG, "Flushing " + h.toString()); } h.flush(); } } public void close() { if (hasBeenShutDown.getAndSet(true)) { throw new IllegalStateException("Shutdown already in progress"); } for (LogHandler ht : handlers) { if (ht instanceof Thread) { log.fine("Stopping " + ht); ((Thread) ht).interrupt(); } } handlers.clear(); log.log(LogLevel.DEBUG, "Logdispatcher shut down. Handled " + messageCount + " messages"); } /** * Register handler thread with the dispatcher. If the handler * thread has already been registered, we log a warning and * just do nothing. * <p> * If the thread is not alive it will be start()'ed. */ public void registerLogHandler(LogHandler ht) { if (hasBeenShutDown.get()) { throw new IllegalStateException("Tried to register LogHandler on LogDispatcher which was shut down"); } synchronized (this) { if (handlers.contains(ht)) { log.warning("LogHandler was already registered: " + ht); return; } handlers.add(ht); } if ((ht instanceof Thread) && (! ((Thread) ht).isAlive())) { ((Thread) ht).start(); } log.fine("Added (and possibly started) LogHandler " + ht); } /** * Make defensive copy and return array of LogHandlers. */ /** * Return message counter. * * @return Returns the number of messages that we have seen. */ public int getMessageCount() { return messageCount.get(); } /** * Hook which is called when the select loop has finished. */ public void selectLoopHook(boolean before) { if (batchedMode.get()) { flushBatch(stealBatch()); } } }
No point at all :) It was me listing the api by using intellij's autocomplete..
public LogHandler[] getLogHandlers() { handlers.toArray(); LogHandler[] h = new LogHandler[handlers.size()]; return handlers.toArray(h); }
handlers.toArray();
public LogHandler[] getLogHandlers() { LogHandler[] h = new LogHandler[handlers.size()]; return handlers.toArray(h); }
class LogDispatcher implements LogHandler, SelectLoopHook { private static final Logger log = Logger.getLogger(LogDispatcher.class.getName()); private final List<LogHandler> handlers = new CopyOnWriteArrayList<>(); private final AtomicInteger messageCount = new AtomicInteger(0); private final AtomicBoolean batchedMode = new AtomicBoolean(false); private final int batchSize = 5000; private final AtomicBoolean hasBeenShutDown = new AtomicBoolean(false); private List<LogMessage> currentBatchList = null; public LogDispatcher() { } /** * Dispatches a message to all the LogHandler instances we've * got registered. The main entry point for LogMessage instances * into the log server. * * @param msg The LogMessage instance we wish to dispatch to the * plugins */ public void handle(LogMessage msg) { if (msg == null) { throw new NullPointerException("LogMessage was null"); } if (batchedMode.get()) { addToBatch(msg); } else { send(msg); } messageCount.incrementAndGet(); } private void addToBatch(LogMessage msg) { List<LogMessage> toSend = null; synchronized (this) { if (currentBatchList == null) { currentBatchList = new ArrayList<LogMessage>(batchSize); currentBatchList.add(msg); return; } currentBatchList.add(msg); if (currentBatchList.size() == batchSize) { toSend = stealBatch(); } } flushBatch(toSend); } private void send(List<LogMessage> messages) { for (LogHandler ht : handlers) { ht.handle(messages); } } private void send(LogMessage message) { for (LogHandler ht : handlers) { ht.handle(message); } } private void flushBatch(List<LogMessage> todo) { if (todo == null) { return; } send(todo); } public void handle(List<LogMessage> messages) { throw new IllegalStateException("method not supported"); } /** * Set the batched mode. Note that this should only be set * at initialization time because it radically changes the * behavior of the dispatcher. When in batched mode, the * dispatcher will not enqueue single LogMessage instances * but lists of same. */ public void setBatchedMode(boolean batchedMode) { this.batchedMode.set(batchedMode); } private List<LogMessage> stealBatch() { List<LogMessage> toSend = null; synchronized (this) { toSend = currentBatchList; currentBatchList = null; } return toSend; } public void flush() { if (batchedMode.get()) { flushBatch(stealBatch()); } for (LogHandler h : handlers) { if (log.isLoggable(LogLevel.DEBUG)) { log.log(LogLevel.DEBUG, "Flushing " + h.toString()); } h.flush(); } } public void close() { if (hasBeenShutDown.getAndSet(true)) { throw new IllegalStateException("Shutdown already in progress"); } for (LogHandler ht : handlers) { if (ht instanceof Thread) { log.fine("Stopping " + ht); ((Thread) ht).interrupt(); } } handlers.clear(); log.log(LogLevel.DEBUG, "Logdispatcher shut down. Handled " + messageCount + " messages"); } /** * Register handler thread with the dispatcher. If the handler * thread has already been registered, we log a warning and * just do nothing. * <p> * If the thread is not alive it will be start()'ed. */ public void registerLogHandler(LogHandler ht) { if (hasBeenShutDown.get()) { throw new IllegalStateException("Tried to register LogHandler on LogDispatcher which was shut down"); } synchronized (this) { if (handlers.contains(ht)) { log.warning("LogHandler was already registered: " + ht); return; } handlers.add(ht); } if ((ht instanceof Thread) && (! ((Thread) ht).isAlive())) { ((Thread) ht).start(); } log.fine("Added (and possibly started) LogHandler " + ht); } /** * Make defensive copy and return array of LogHandlers. */ /** * Return message counter. * * @return Returns the number of messages that we have seen. */ public int getMessageCount() { return messageCount.get(); } /** * Hook which is called when the select loop has finished. */ public void selectLoopHook(boolean before) { if (batchedMode.get()) { flushBatch(stealBatch()); } } }
class LogDispatcher implements LogHandler, SelectLoopHook { private static final Logger log = Logger.getLogger(LogDispatcher.class.getName()); private final List<LogHandler> handlers = new CopyOnWriteArrayList<>(); private final AtomicInteger messageCount = new AtomicInteger(0); private final AtomicBoolean batchedMode = new AtomicBoolean(false); private final int batchSize = 5000; private final AtomicBoolean hasBeenShutDown = new AtomicBoolean(false); private List<LogMessage> currentBatchList = null; public LogDispatcher() { } /** * Dispatches a message to all the LogHandler instances we've * got registered. The main entry point for LogMessage instances * into the log server. * * @param msg The LogMessage instance we wish to dispatch to the * plugins */ public void handle(LogMessage msg) { if (msg == null) { throw new NullPointerException("LogMessage was null"); } if (batchedMode.get()) { addToBatch(msg); } else { send(msg); } messageCount.incrementAndGet(); } private void addToBatch(LogMessage msg) { List<LogMessage> toSend = null; synchronized (this) { if (currentBatchList == null) { currentBatchList = new ArrayList<LogMessage>(batchSize); currentBatchList.add(msg); return; } currentBatchList.add(msg); if (currentBatchList.size() == batchSize) { toSend = stealBatch(); } } flushBatch(toSend); } private void send(List<LogMessage> messages) { for (LogHandler ht : handlers) { ht.handle(messages); } } private void send(LogMessage message) { for (LogHandler ht : handlers) { ht.handle(message); } } private void flushBatch(List<LogMessage> todo) { if (todo == null) { return; } send(todo); } public void handle(List<LogMessage> messages) { throw new IllegalStateException("method not supported"); } /** * Set the batched mode. Note that this should only be set * at initialization time because it radically changes the * behavior of the dispatcher. When in batched mode, the * dispatcher will not enqueue single LogMessage instances * but lists of same. */ public void setBatchedMode(boolean batchedMode) { this.batchedMode.set(batchedMode); } private List<LogMessage> stealBatch() { List<LogMessage> toSend = null; synchronized (this) { toSend = currentBatchList; currentBatchList = null; } return toSend; } public void flush() { if (batchedMode.get()) { flushBatch(stealBatch()); } for (LogHandler h : handlers) { if (log.isLoggable(LogLevel.DEBUG)) { log.log(LogLevel.DEBUG, "Flushing " + h.toString()); } h.flush(); } } public void close() { if (hasBeenShutDown.getAndSet(true)) { throw new IllegalStateException("Shutdown already in progress"); } for (LogHandler ht : handlers) { if (ht instanceof Thread) { log.fine("Stopping " + ht); ((Thread) ht).interrupt(); } } handlers.clear(); log.log(LogLevel.DEBUG, "Logdispatcher shut down. Handled " + messageCount + " messages"); } /** * Register handler thread with the dispatcher. If the handler * thread has already been registered, we log a warning and * just do nothing. * <p> * If the thread is not alive it will be start()'ed. */ public void registerLogHandler(LogHandler ht) { if (hasBeenShutDown.get()) { throw new IllegalStateException("Tried to register LogHandler on LogDispatcher which was shut down"); } synchronized (this) { if (handlers.contains(ht)) { log.warning("LogHandler was already registered: " + ht); return; } handlers.add(ht); } if ((ht instanceof Thread) && (! ((Thread) ht).isAlive())) { ((Thread) ht).start(); } log.fine("Added (and possibly started) LogHandler " + ht); } /** * Make defensive copy and return array of LogHandlers. */ /** * Return message counter. * * @return Returns the number of messages that we have seen. */ public int getMessageCount() { return messageCount.get(); } /** * Hook which is called when the select loop has finished. */ public void selectLoopHook(boolean before) { if (batchedMode.get()) { flushBatch(stealBatch()); } } }
See above comment.
private void downloadFile(Request req) { rpcDownloadExecutor.execute(() -> { FileReference fileReference = new FileReference(req.parameters().get(0).asString()); log.log(LogLevel.DEBUG, "getFile() called for file reference '" + fileReference.value() + "'"); Optional<File> pathToFile = downloader.getFile(fileReference); try { if (pathToFile.isPresent()) { req.returnValues().add(new StringValue(pathToFile.get().getAbsolutePath())); log.log(LogLevel.DEBUG, "File reference '" + fileReference.value() + "' available at " + pathToFile.get()); } else { log.log(LogLevel.INFO, "File reference '" + fileReference.value() + "' not found, returning error"); req.setError(fileReferenceDoesNotExists, "File reference '" + fileReference.value() + "' not found"); } } catch (Throwable e) { log.log(LogLevel.WARNING, "File reference '" + fileReference.value() + "' got exception: " + e.getMessage()); req.setError(fileReferenceInternalError, "File reference '" + fileReference.value() + "' removed"); } req.returnRequest(); }); }
rpcDownloadExecutor.execute(() -> {
private void downloadFile(Request req) { FileReference fileReference = new FileReference(req.parameters().get(0).asString()); log.log(LogLevel.DEBUG, "getFile() called for file reference '" + fileReference.value() + "'"); Optional<File> pathToFile = downloader.getFile(fileReference); try { if (pathToFile.isPresent()) { req.returnValues().add(new StringValue(pathToFile.get().getAbsolutePath())); log.log(LogLevel.DEBUG, "File reference '" + fileReference.value() + "' available at " + pathToFile.get()); } else { log.log(LogLevel.INFO, "File reference '" + fileReference.value() + "' not found, returning error"); req.setError(fileReferenceDoesNotExists, "File reference '" + fileReference.value() + "' not found"); } } catch (Throwable e) { log.log(LogLevel.WARNING, "File reference '" + fileReference.value() + "' got exception: " + e.getMessage()); req.setError(fileReferenceInternalError, "File reference '" + fileReference.value() + "' removed"); } req.returnRequest(); }
class FileDistributionRpcServer { private final static Logger log = Logger.getLogger(FileDistributionRpcServer.class.getName()); private final Supervisor supervisor; private final FileDownloader downloader; private final ExecutorService rpcDownloadExecutor = Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors(), new DaemonThreadFactory("Rpc executor")); public FileDistributionRpcServer(Supervisor supervisor, FileDownloader downloader) { this.supervisor = supervisor; this.downloader = downloader; declareFileDistributionMethods(); } private void declareFileDistributionMethods() { supervisor.addMethod(new Method("waitFor", "s", "s", this, "getFile") .methodDesc("get path to file reference") .paramDesc(0, "file reference", "file reference") .returnDesc(0, "path", "path to file")); supervisor.addMethod(new Method("filedistribution.getFile", "s", "s", this, "getFile") .methodDesc("get path to file reference") .paramDesc(0, "file reference", "file reference") .returnDesc(0, "path", "path to file")); supervisor.addMethod(new Method("filedistribution.getActiveFileReferencesStatus", "", "SD", this, "getActiveFileReferencesStatus") .methodDesc("download status for file references") .returnDesc(0, "file references", "array of file references") .returnDesc(1, "download status", "percentage downloaded of each file reference in above array")); supervisor.addMethod(new Method("filedistribution.setFileReferencesToDownload", "S", "i", this, "setFileReferencesToDownload") .methodDesc("set which file references to download") .paramDesc(0, "file references", "file reference to download") .returnDesc(0, "ret", "0 if success, 1 otherwise")); } private static final int baseErrorCode = 0x10000; private static final int baseFileProviderErrorCode = baseErrorCode + 0x1000; private static final int fileReferenceDoesNotExists = baseFileProviderErrorCode; private static final int fileReferenceRemoved = fileReferenceDoesNotExists + 1; private static final int fileReferenceInternalError = fileReferenceRemoved + 1; @SuppressWarnings({"UnusedDeclaration"}) public final void getFile(Request req) { req.detach(); downloadFile(req); } @SuppressWarnings({"UnusedDeclaration"}) public final void getActiveFileReferencesStatus(Request req) { Map<FileReference, Double> downloadStatus = downloader.downloadStatus(); String[] fileRefArray = new String[downloadStatus.keySet().size()]; fileRefArray = downloadStatus.keySet().stream() .map(FileReference::value) .collect(Collectors.toList()) .toArray(fileRefArray); double[] downloadStatusArray = new double[downloadStatus.values().size()]; int i = 0; for (Double d : downloadStatus.values()) { downloadStatusArray[i++] = d; } req.returnValues().add(new StringArray(fileRefArray)); req.returnValues().add(new DoubleArray(downloadStatusArray)); } @SuppressWarnings({"UnusedDeclaration"}) public final void setFileReferencesToDownload(Request req) { String[] fileReferenceStrings = req.parameters().get(0).asStringArray(); List<FileReference> fileReferences = Stream.of(fileReferenceStrings) .map(FileReference::new) .collect(Collectors.toList()); downloader.queueForAsyncDownload(fileReferences); req.returnValues().add(new Int32Value(0)); } }
class FileDistributionRpcServer { private final static Logger log = Logger.getLogger(FileDistributionRpcServer.class.getName()); private final Supervisor supervisor; private final FileDownloader downloader; private final ExecutorService rpcDownloadExecutor = Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors(), new DaemonThreadFactory("Rpc executor")); public FileDistributionRpcServer(Supervisor supervisor, FileDownloader downloader) { this.supervisor = supervisor; this.downloader = downloader; declareFileDistributionMethods(); } private void declareFileDistributionMethods() { supervisor.addMethod(new Method("waitFor", "s", "s", this, "getFile") .methodDesc("get path to file reference") .paramDesc(0, "file reference", "file reference") .returnDesc(0, "path", "path to file")); supervisor.addMethod(new Method("filedistribution.getFile", "s", "s", this, "getFile") .methodDesc("get path to file reference") .paramDesc(0, "file reference", "file reference") .returnDesc(0, "path", "path to file")); supervisor.addMethod(new Method("filedistribution.getActiveFileReferencesStatus", "", "SD", this, "getActiveFileReferencesStatus") .methodDesc("download status for file references") .returnDesc(0, "file references", "array of file references") .returnDesc(1, "download status", "percentage downloaded of each file reference in above array")); supervisor.addMethod(new Method("filedistribution.setFileReferencesToDownload", "S", "i", this, "setFileReferencesToDownload") .methodDesc("set which file references to download") .paramDesc(0, "file references", "file reference to download") .returnDesc(0, "ret", "0 if success, 1 otherwise")); } private static final int baseErrorCode = 0x10000; private static final int baseFileProviderErrorCode = baseErrorCode + 0x1000; private static final int fileReferenceDoesNotExists = baseFileProviderErrorCode; private static final int fileReferenceRemoved = fileReferenceDoesNotExists + 1; private static final int fileReferenceInternalError = fileReferenceRemoved + 1; @SuppressWarnings({"UnusedDeclaration"}) public final void getFile(Request req) { req.detach(); rpcDownloadExecutor.execute(() -> downloadFile(req)); } @SuppressWarnings({"UnusedDeclaration"}) public final void getActiveFileReferencesStatus(Request req) { Map<FileReference, Double> downloadStatus = downloader.downloadStatus(); String[] fileRefArray = new String[downloadStatus.keySet().size()]; fileRefArray = downloadStatus.keySet().stream() .map(FileReference::value) .collect(Collectors.toList()) .toArray(fileRefArray); double[] downloadStatusArray = new double[downloadStatus.values().size()]; int i = 0; for (Double d : downloadStatus.values()) { downloadStatusArray[i++] = d; } req.returnValues().add(new StringArray(fileRefArray)); req.returnValues().add(new DoubleArray(downloadStatusArray)); } @SuppressWarnings({"UnusedDeclaration"}) public final void setFileReferencesToDownload(Request req) { String[] fileReferenceStrings = req.parameters().get(0).asStringArray(); List<FileReference> fileReferences = Stream.of(fileReferenceStrings) .map(FileReference::new) .collect(Collectors.toList()); downloader.queueForAsyncDownload(fileReferences); req.returnValues().add(new Int32Value(0)); } }
this loop also looks a bit iffy (hopefully unneeded)
void setConfigIfChangedIncGen(T config) { ConfigState<T> prev = this.config.get(); while (!this.config.compareAndSet(prev, new ConfigState<>(true, prev.getGeneration() + 1, !config.equals(prev.getConfig()), config))) { prev = this.config.get(); } }
while (!this.config.compareAndSet(prev, new ConfigState<>(true, prev.getGeneration() + 1, !config.equals(prev.getConfig()), config))) {
void setConfigIfChangedIncGen(T config) { ConfigState<T> prev = this.config.get(); this.config.set(new ConfigState<>(true, prev.getGeneration() + 1, !config.equals(prev.getConfig()), config)); }
class of a ConfigsSubscription */ public static <T extends ConfigInstance> ConfigSubscription<T> get(ConfigKey<T> key, ConfigSubscriber subscriber, ConfigSource source, TimingValues timingValues) { String configId = key.getConfigId(); if (source instanceof RawSource || configId.startsWith("raw:")) return getRawSub(key, subscriber, source); if (source instanceof FileSource || configId.startsWith("file:")) return getFileSub(key, subscriber, source); if (source instanceof DirSource || configId.startsWith("dir:")) return getDirFileSub(key, subscriber, source); if (source instanceof JarSource || configId.startsWith("jar:")) return getJarSub(key, subscriber, source); if (source instanceof ConfigSet) return new ConfigSetSubscription<>(key, subscriber, source); if (source instanceof ConfigSourceSet) return new JRTConfigSubscription<>(key, subscriber, source, timingValues); throw new IllegalArgumentException("Unknown source type: "+source); }
class of a ConfigsSubscription */ public static <T extends ConfigInstance> ConfigSubscription<T> get(ConfigKey<T> key, ConfigSubscriber subscriber, ConfigSource source, TimingValues timingValues) { String configId = key.getConfigId(); if (source instanceof RawSource || configId.startsWith("raw:")) return getRawSub(key, subscriber, source); if (source instanceof FileSource || configId.startsWith("file:")) return getFileSub(key, subscriber, source); if (source instanceof DirSource || configId.startsWith("dir:")) return getDirFileSub(key, subscriber, source); if (source instanceof JarSource || configId.startsWith("jar:")) return getJarSub(key, subscriber, source); if (source instanceof ConfigSet) return new ConfigSetSubscription<>(key, subscriber, source); if (source instanceof ConfigSourceSet) return new JRTConfigSubscription<>(key, subscriber, source, timingValues); throw new IllegalArgumentException("Unknown source type: "+source); }
This is a constant that can be pulled out to a static?
public void startContainer(ContainerName containerName, final ContainerNodeSpec nodeSpec) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); logger.info("Starting container " + containerName); try { InetAddress nodeInetAddress = environment.getInetAddressForHost(nodeSpec.hostname); final boolean isIPv6 = nodeInetAddress instanceof Inet6Address; String configServers = environment.getConfigServerUris().stream() .map(URI::getHost) .collect(Collectors.joining(",")); Docker.CreateContainerCommand command = docker.createContainerCommand( nodeSpec.wantedDockerImage.get(), ContainerResources.from(nodeSpec.minCpuCores, nodeSpec.minMainMemoryAvailableGb), containerName, nodeSpec.hostname) .withManagedBy(MANAGER_NAME) .withEnvironment("CONFIG_SERVER_ADDRESS", configServers) .withUlimit("nofile", 262_144, 262_144) .withUlimit("nproc", 32_768, 409_600) .withUlimit("core", -1, -1) .withAddCapability("SYS_PTRACE") .withAddCapability("SYS_ADMIN"); if (!docker.networkNPTed()) { logger.info("Network is macvlan - setting up container with public ip address on a macvlan"); command.withIpAddress(nodeInetAddress); command.withNetworkMode(DockerImpl.DOCKER_CUSTOM_MACVLAN_NETWORK_NAME); command.withVolume("/etc/hosts", "/etc/hosts"); } else { logger.info("Network is NPTed - setting up container with private ip address"); command.withIpAddress(NetworkPrefixTranslator.translate( nodeInetAddress, InetAddress.getByName("fd00::"), 64)); command.withNetworkMode("vespa-bridge"); } for (String pathInNode : DIRECTORIES_TO_MOUNT.keySet()) { String pathInHost = environment.pathInHostFromPathInNode(containerName, pathInNode).toString(); command.withVolume(pathInHost, pathInNode); } long minMainMemoryAvailableMb = (long) (nodeSpec.minMainMemoryAvailableGb * 1024); if (minMainMemoryAvailableMb > 0) { command.withEnvironment("VESPA_TOTAL_MEMORY_MB", Long.toString(minMainMemoryAvailableMb)); } logger.info("Starting new container with args: " + command); command.create(); if (isIPv6) { if (!docker.networkNPTed()) { docker.connectContainerToNetwork(containerName, "bridge"); } docker.startContainer(containerName); setupContainerNetworkConnectivity(containerName); } else { docker.startContainer(containerName); } DIRECTORIES_TO_MOUNT.entrySet().stream().filter(Map.Entry::getValue).forEach(entry -> docker.executeInContainerAsRoot(containerName, "chmod", "-R", "a+w", entry.getKey())); } catch (IOException e) { throw new RuntimeException("Failed to create container " + containerName.asString(), e); } }
InetAddress.getByName("fd00::"),
public void startContainer(ContainerName containerName, final ContainerNodeSpec nodeSpec) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); logger.info("Starting container " + containerName); try { InetAddress nodeInetAddress = environment.getInetAddressForHost(nodeSpec.hostname); final boolean isIPv6 = nodeInetAddress instanceof Inet6Address; String configServers = environment.getConfigServerUris().stream() .map(URI::getHost) .collect(Collectors.joining(",")); Docker.CreateContainerCommand command = docker.createContainerCommand( nodeSpec.wantedDockerImage.get(), ContainerResources.from(nodeSpec.minCpuCores, nodeSpec.minMainMemoryAvailableGb), containerName, nodeSpec.hostname) .withManagedBy(MANAGER_NAME) .withEnvironment("CONFIG_SERVER_ADDRESS", configServers) .withUlimit("nofile", 262_144, 262_144) .withUlimit("nproc", 32_768, 409_600) .withUlimit("core", -1, -1) .withAddCapability("SYS_PTRACE") .withAddCapability("SYS_ADMIN"); if (!docker.networkNPTed()) { command.withIpAddress(nodeInetAddress); command.withNetworkMode(DockerImpl.DOCKER_CUSTOM_MACVLAN_NETWORK_NAME); command.withVolume("/etc/hosts", "/etc/hosts"); } else { command.withIpAddress(NetworkPrefixTranslator.translate( nodeInetAddress, InetAddress.getByName(LOCAL_IPV6_PREFIX), 64)); command.withNetworkMode(DOCKER_CUSTOM_BRIDGE_NETWORK_NAME); } for (String pathInNode : DIRECTORIES_TO_MOUNT.keySet()) { String pathInHost = environment.pathInHostFromPathInNode(containerName, pathInNode).toString(); command.withVolume(pathInHost, pathInNode); } long minMainMemoryAvailableMb = (long) (nodeSpec.minMainMemoryAvailableGb * 1024); if (minMainMemoryAvailableMb > 0) { command.withEnvironment("VESPA_TOTAL_MEMORY_MB", Long.toString(minMainMemoryAvailableMb)); } logger.info("Starting new container with args: " + command); command.create(); if (isIPv6) { if (!docker.networkNPTed()) { docker.connectContainerToNetwork(containerName, "bridge"); } docker.startContainer(containerName); setupContainerNetworkConnectivity(containerName); } else { docker.startContainer(containerName); } DIRECTORIES_TO_MOUNT.entrySet().stream().filter(Map.Entry::getValue).forEach(entry -> docker.executeInContainerAsRoot(containerName, "chmod", "-R", "a+w", entry.getKey())); } catch (IOException e) { throw new RuntimeException("Failed to create container " + containerName.asString(), e); } }
class DockerOperationsImpl implements DockerOperations { public static final String NODE_PROGRAM = getDefaults().underVespaHome("bin/vespa-nodectl"); private static final String[] RESUME_NODE_COMMAND = new String[]{NODE_PROGRAM, "resume"}; private static final String[] SUSPEND_NODE_COMMAND = new String[]{NODE_PROGRAM, "suspend"}; private static final String[] RESTART_VESPA_ON_NODE_COMMAND = new String[]{NODE_PROGRAM, "restart-vespa"}; private static final String[] STOP_NODE_COMMAND = new String[]{NODE_PROGRAM, "stop"}; private static final String MANAGER_NAME = "node-admin"; private static final Map<String, Boolean> DIRECTORIES_TO_MOUNT = new HashMap<>(); static { DIRECTORIES_TO_MOUNT.put("/etc/yamas-agent", true); DIRECTORIES_TO_MOUNT.put("/etc/filebeat", true); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/daemontools_y"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/jdisc_core"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/langdetect/"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/vespa"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/yca"), true); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/yck"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/yell"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/ykeykey"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/ykeykeyd"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/yms_agent"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/ysar"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/ystatus"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/zpu"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/cache"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/crash"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/db/jdisc"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/db/vespa"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/jdisc_container"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/jdisc_core"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/maven"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/run"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/scoreboards"), true); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/service"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/share"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/spool"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/vespa"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/yca"), true); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/ycore++"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/zookeeper"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/zpe"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("tmp"), false); } private final Docker docker; private final Environment environment; private final ProcessExecuter processExecuter; public DockerOperationsImpl(Docker docker, Environment environment, ProcessExecuter processExecuter) { this.docker = docker; this.environment = environment; this.processExecuter = processExecuter; } @Override private InetAddress toPrivateSubnet(InetAddress nodeInetAddress) { return null; } @Override public void removeContainer(final Container existingContainer, ContainerNodeSpec nodeSpec) { final ContainerName containerName = existingContainer.name; PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); if (existingContainer.state.isRunning()) { logger.info("Stopping container " + containerName.asString()); docker.stopContainer(containerName); } logger.info("Deleting container " + containerName.asString()); docker.deleteContainer(containerName); if (docker.networkNPTed()) { logger.info("Delete iptables NAT rules for " + containerName.asString()); try { InetAddress nodeInetAddress = environment.getInetAddressForHost(nodeSpec.hostname); String ipv6Str = docker.getGlobalIPv6Address(containerName); String drop = NATCommand.drop(nodeInetAddress, InetAddress.getByName(ipv6Str)); Pair<Integer, String> result = processExecuter.exec(drop); if (result.getFirst() != 0) { logger.debug("Unable to drop NAT rule - error message: " + result.getSecond()); } } catch (IOException e) { logger.warning("Unable to drop NAT rule for container " + containerName, e); } } } @Override public Optional<Container> getContainer(ContainerName containerName) { return docker.getContainer(containerName); } /** * Try to suspend node. Suspending a node means the node should be taken offline, * such that maintenance can be done of the node (upgrading, rebooting, etc), * and such that we will start serving again as soon as possible afterwards. * <p> * Any failures are logged and ignored. */ @Override public void trySuspendNode(ContainerName containerName) { try { executeCommandInContainer(containerName, SUSPEND_NODE_COMMAND); } catch (RuntimeException e) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); logger.warning("Failed trying to suspend container " + containerName.asString() + " with " + Arrays.toString(SUSPEND_NODE_COMMAND), e); } } /** * For macvlan: * <p> * Due to a bug in docker (https: * IPv6 gateway in containers connected to more than one docker network */ private void setupContainerNetworkConnectivity(ContainerName containerName) throws IOException { if (!docker.networkNPTed()) { InetAddress hostDefaultGateway = DockerNetworkCreator.getDefaultGatewayLinux(true); executeCommandInNetworkNamespace(containerName, "route", "-A", "inet6", "add", "default", "gw", hostDefaultGateway.getHostAddress(), "dev", "eth1"); } } @Override public boolean pullImageAsyncIfNeeded(DockerImage dockerImage) { return docker.pullImageAsyncIfNeeded(dockerImage); } ProcessResult executeCommandInContainer(ContainerName containerName, String... command) { ProcessResult result = docker.executeInContainerAsRoot(containerName, command); if (!result.isSuccess()) { throw new RuntimeException("Container " + containerName.asString() + ": command " + Arrays.toString(command) + " failed: " + result); } return result; } @Override public ProcessResult executeCommandInContainerAsRoot(ContainerName containerName, Long timeoutSeconds, String... command) { return docker.executeInContainerAsRoot(containerName, timeoutSeconds, command); } @Override public ProcessResult executeCommandInContainerAsRoot(ContainerName containerName, String... command) { return docker.executeInContainerAsRoot(containerName, command); } @Override public void executeCommandInNetworkNamespace(ContainerName containerName, String... command) { final PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); final Integer containerPid = docker.getContainer(containerName) .filter(container -> container.state.isRunning()) .map(container -> container.pid) .orElseThrow(() -> new RuntimeException("PID not found for container with name: " + containerName.asString())); final String[] wrappedCommand = Stream.concat( Stream.of("sudo", "nsenter", String.format("--net=/host/proc/%d/ns/net", containerPid), "--"), Stream.of(command)) .toArray(String[]::new); try { Pair<Integer, String> result = processExecuter.exec(wrappedCommand); if (result.getFirst() != 0) { String msg = String.format( "Failed to execute %s in network namespace for %s (PID = %d), exit code: %d, output: %s", Arrays.toString(wrappedCommand), containerName.asString(), containerPid, result.getFirst(), result.getSecond()); logger.error(msg); throw new RuntimeException(msg); } } catch (IOException e) { logger.warning(String.format("IOException while executing %s in network namespace for %s (PID = %d)", Arrays.toString(wrappedCommand), containerName.asString(), containerPid), e); throw new RuntimeException(e); } } @Override public void resumeNode(ContainerName containerName) { executeCommandInContainer(containerName, RESUME_NODE_COMMAND); } @Override public void restartVespaOnNode(ContainerName containerName) { executeCommandInContainer(containerName, RESTART_VESPA_ON_NODE_COMMAND); } @Override public void stopServicesOnNode(ContainerName containerName) { executeCommandInContainer(containerName, STOP_NODE_COMMAND); } @Override public Optional<Docker.ContainerStats> getContainerStats(ContainerName containerName) { return docker.getContainerStats(containerName); } @Override public List<Container> getAllManagedContainers() { return docker.getAllContainersManagedBy(MANAGER_NAME); } @Override public List<ContainerName> listAllManagedContainers() { return docker.listAllContainersManagedBy(MANAGER_NAME); } @Override public void deleteUnusedDockerImages() { docker.deleteUnusedDockerImages(); } }
class DockerOperationsImpl implements DockerOperations { public static final String NODE_PROGRAM = getDefaults().underVespaHome("bin/vespa-nodectl"); private static final String[] RESUME_NODE_COMMAND = new String[]{NODE_PROGRAM, "resume"}; private static final String[] SUSPEND_NODE_COMMAND = new String[]{NODE_PROGRAM, "suspend"}; private static final String[] RESTART_VESPA_ON_NODE_COMMAND = new String[]{NODE_PROGRAM, "restart-vespa"}; private static final String[] STOP_NODE_COMMAND = new String[]{NODE_PROGRAM, "stop"}; private static final String MANAGER_NAME = "node-admin"; private static final String LOCAL_IPV6_PREFIX = "fd00::"; private static final String DOCKER_CUSTOM_BRIDGE_NETWORK_NAME = "vespa-bridge"; private static final Map<String, Boolean> DIRECTORIES_TO_MOUNT = new HashMap<>(); static { DIRECTORIES_TO_MOUNT.put("/etc/yamas-agent", true); DIRECTORIES_TO_MOUNT.put("/etc/filebeat", true); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/daemontools_y"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/jdisc_core"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/langdetect/"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/vespa"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/yca"), true); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/yck"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/yell"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/ykeykey"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/ykeykeyd"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/yms_agent"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/ysar"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/ystatus"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/zpu"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/cache"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/crash"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/db/jdisc"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/db/vespa"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/jdisc_container"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/jdisc_core"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/maven"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/run"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/scoreboards"), true); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/service"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/share"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/spool"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/vespa"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/yca"), true); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/ycore++"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/zookeeper"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/zpe"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("tmp"), false); } private final Docker docker; private final Environment environment; private final ProcessExecuter processExecuter; public DockerOperationsImpl(Docker docker, Environment environment, ProcessExecuter processExecuter) { this.docker = docker; this.environment = environment; this.processExecuter = processExecuter; } @Override @Override public void removeContainer(final Container existingContainer, ContainerNodeSpec nodeSpec) { final ContainerName containerName = existingContainer.name; PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); if (existingContainer.state.isRunning()) { logger.info("Stopping container " + containerName.asString()); docker.stopContainer(containerName); } logger.info("Deleting container " + containerName.asString()); docker.deleteContainer(containerName); if (docker.networkNPTed()) { logger.info("Delete iptables NAT rules for " + containerName.asString()); try { InetAddress nodeInetAddress = environment.getInetAddressForHost(nodeSpec.hostname); String ipv6Str = docker.getGlobalIPv6Address(containerName); String drop = NATCommand.drop(nodeInetAddress, InetAddress.getByName(ipv6Str)); Pair<Integer, String> result = processExecuter.exec(drop); if (result.getFirst() != 0) { logger.debug("Unable to drop NAT rule - error message: " + result.getSecond()); } } catch (IOException e) { logger.warning("Unable to drop NAT rule for container " + containerName, e); } } } @Override public Optional<Container> getContainer(ContainerName containerName) { return docker.getContainer(containerName); } /** * Try to suspend node. Suspending a node means the node should be taken offline, * such that maintenance can be done of the node (upgrading, rebooting, etc), * and such that we will start serving again as soon as possible afterwards. * <p> * Any failures are logged and ignored. */ @Override public void trySuspendNode(ContainerName containerName) { try { executeCommandInContainer(containerName, SUSPEND_NODE_COMMAND); } catch (RuntimeException e) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); logger.warning("Failed trying to suspend container " + containerName.asString() + " with " + Arrays.toString(SUSPEND_NODE_COMMAND), e); } } /** * For macvlan: * <p> * Due to a bug in docker (https: * IPv6 gateway in containers connected to more than one docker network */ private void setupContainerNetworkConnectivity(ContainerName containerName) throws IOException { if (!docker.networkNPTed()) { InetAddress hostDefaultGateway = DockerNetworkCreator.getDefaultGatewayLinux(true); executeCommandInNetworkNamespace(containerName, "route", "-A", "inet6", "add", "default", "gw", hostDefaultGateway.getHostAddress(), "dev", "eth1"); } } @Override public boolean pullImageAsyncIfNeeded(DockerImage dockerImage) { return docker.pullImageAsyncIfNeeded(dockerImage); } ProcessResult executeCommandInContainer(ContainerName containerName, String... command) { ProcessResult result = docker.executeInContainerAsRoot(containerName, command); if (!result.isSuccess()) { throw new RuntimeException("Container " + containerName.asString() + ": command " + Arrays.toString(command) + " failed: " + result); } return result; } @Override public ProcessResult executeCommandInContainerAsRoot(ContainerName containerName, Long timeoutSeconds, String... command) { return docker.executeInContainerAsRoot(containerName, timeoutSeconds, command); } @Override public ProcessResult executeCommandInContainerAsRoot(ContainerName containerName, String... command) { return docker.executeInContainerAsRoot(containerName, command); } @Override public void executeCommandInNetworkNamespace(ContainerName containerName, String... command) { final PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); final Integer containerPid = docker.getContainer(containerName) .filter(container -> container.state.isRunning()) .map(container -> container.pid) .orElseThrow(() -> new RuntimeException("PID not found for container with name: " + containerName.asString())); final String[] wrappedCommand = Stream.concat( Stream.of("sudo", "nsenter", String.format("--net=/host/proc/%d/ns/net", containerPid), "--"), Stream.of(command)) .toArray(String[]::new); try { Pair<Integer, String> result = processExecuter.exec(wrappedCommand); if (result.getFirst() != 0) { String msg = String.format( "Failed to execute %s in network namespace for %s (PID = %d), exit code: %d, output: %s", Arrays.toString(wrappedCommand), containerName.asString(), containerPid, result.getFirst(), result.getSecond()); logger.error(msg); throw new RuntimeException(msg); } } catch (IOException e) { logger.warning(String.format("IOException while executing %s in network namespace for %s (PID = %d)", Arrays.toString(wrappedCommand), containerName.asString(), containerPid), e); throw new RuntimeException(e); } } @Override public void resumeNode(ContainerName containerName) { executeCommandInContainer(containerName, RESUME_NODE_COMMAND); } @Override public void restartVespaOnNode(ContainerName containerName) { executeCommandInContainer(containerName, RESTART_VESPA_ON_NODE_COMMAND); } @Override public void stopServicesOnNode(ContainerName containerName) { executeCommandInContainer(containerName, STOP_NODE_COMMAND); } @Override public Optional<Docker.ContainerStats> getContainerStats(ContainerName containerName) { return docker.getContainerStats(containerName); } @Override public List<Container> getAllManagedContainers() { return docker.getAllContainersManagedBy(MANAGER_NAME); } @Override public List<ContainerName> listAllManagedContainers() { return docker.listAllContainersManagedBy(MANAGER_NAME); } @Override public void deleteUnusedDockerImages() { docker.deleteUnusedDockerImages(); } }
the added log messages can be inferred from the logging of the command below?
public void startContainer(ContainerName containerName, final ContainerNodeSpec nodeSpec) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); logger.info("Starting container " + containerName); try { InetAddress nodeInetAddress = environment.getInetAddressForHost(nodeSpec.hostname); final boolean isIPv6 = nodeInetAddress instanceof Inet6Address; String configServers = environment.getConfigServerUris().stream() .map(URI::getHost) .collect(Collectors.joining(",")); Docker.CreateContainerCommand command = docker.createContainerCommand( nodeSpec.wantedDockerImage.get(), ContainerResources.from(nodeSpec.minCpuCores, nodeSpec.minMainMemoryAvailableGb), containerName, nodeSpec.hostname) .withManagedBy(MANAGER_NAME) .withEnvironment("CONFIG_SERVER_ADDRESS", configServers) .withUlimit("nofile", 262_144, 262_144) .withUlimit("nproc", 32_768, 409_600) .withUlimit("core", -1, -1) .withAddCapability("SYS_PTRACE") .withAddCapability("SYS_ADMIN"); if (!docker.networkNPTed()) { logger.info("Network is macvlan - setting up container with public ip address on a macvlan"); command.withIpAddress(nodeInetAddress); command.withNetworkMode(DockerImpl.DOCKER_CUSTOM_MACVLAN_NETWORK_NAME); command.withVolume("/etc/hosts", "/etc/hosts"); } else { logger.info("Network is NPTed - setting up container with private ip address"); command.withIpAddress(NetworkPrefixTranslator.translate( nodeInetAddress, InetAddress.getByName("fd00::"), 64)); command.withNetworkMode("vespa-bridge"); } for (String pathInNode : DIRECTORIES_TO_MOUNT.keySet()) { String pathInHost = environment.pathInHostFromPathInNode(containerName, pathInNode).toString(); command.withVolume(pathInHost, pathInNode); } long minMainMemoryAvailableMb = (long) (nodeSpec.minMainMemoryAvailableGb * 1024); if (minMainMemoryAvailableMb > 0) { command.withEnvironment("VESPA_TOTAL_MEMORY_MB", Long.toString(minMainMemoryAvailableMb)); } logger.info("Starting new container with args: " + command); command.create(); if (isIPv6) { if (!docker.networkNPTed()) { docker.connectContainerToNetwork(containerName, "bridge"); } docker.startContainer(containerName); setupContainerNetworkConnectivity(containerName); } else { docker.startContainer(containerName); } DIRECTORIES_TO_MOUNT.entrySet().stream().filter(Map.Entry::getValue).forEach(entry -> docker.executeInContainerAsRoot(containerName, "chmod", "-R", "a+w", entry.getKey())); } catch (IOException e) { throw new RuntimeException("Failed to create container " + containerName.asString(), e); } }
logger.info("Network is NPTed - setting up container with private ip address");
public void startContainer(ContainerName containerName, final ContainerNodeSpec nodeSpec) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); logger.info("Starting container " + containerName); try { InetAddress nodeInetAddress = environment.getInetAddressForHost(nodeSpec.hostname); final boolean isIPv6 = nodeInetAddress instanceof Inet6Address; String configServers = environment.getConfigServerUris().stream() .map(URI::getHost) .collect(Collectors.joining(",")); Docker.CreateContainerCommand command = docker.createContainerCommand( nodeSpec.wantedDockerImage.get(), ContainerResources.from(nodeSpec.minCpuCores, nodeSpec.minMainMemoryAvailableGb), containerName, nodeSpec.hostname) .withManagedBy(MANAGER_NAME) .withEnvironment("CONFIG_SERVER_ADDRESS", configServers) .withUlimit("nofile", 262_144, 262_144) .withUlimit("nproc", 32_768, 409_600) .withUlimit("core", -1, -1) .withAddCapability("SYS_PTRACE") .withAddCapability("SYS_ADMIN"); if (!docker.networkNPTed()) { command.withIpAddress(nodeInetAddress); command.withNetworkMode(DockerImpl.DOCKER_CUSTOM_MACVLAN_NETWORK_NAME); command.withVolume("/etc/hosts", "/etc/hosts"); } else { command.withIpAddress(NetworkPrefixTranslator.translate( nodeInetAddress, InetAddress.getByName(LOCAL_IPV6_PREFIX), 64)); command.withNetworkMode(DOCKER_CUSTOM_BRIDGE_NETWORK_NAME); } for (String pathInNode : DIRECTORIES_TO_MOUNT.keySet()) { String pathInHost = environment.pathInHostFromPathInNode(containerName, pathInNode).toString(); command.withVolume(pathInHost, pathInNode); } long minMainMemoryAvailableMb = (long) (nodeSpec.minMainMemoryAvailableGb * 1024); if (minMainMemoryAvailableMb > 0) { command.withEnvironment("VESPA_TOTAL_MEMORY_MB", Long.toString(minMainMemoryAvailableMb)); } logger.info("Starting new container with args: " + command); command.create(); if (isIPv6) { if (!docker.networkNPTed()) { docker.connectContainerToNetwork(containerName, "bridge"); } docker.startContainer(containerName); setupContainerNetworkConnectivity(containerName); } else { docker.startContainer(containerName); } DIRECTORIES_TO_MOUNT.entrySet().stream().filter(Map.Entry::getValue).forEach(entry -> docker.executeInContainerAsRoot(containerName, "chmod", "-R", "a+w", entry.getKey())); } catch (IOException e) { throw new RuntimeException("Failed to create container " + containerName.asString(), e); } }
class DockerOperationsImpl implements DockerOperations { public static final String NODE_PROGRAM = getDefaults().underVespaHome("bin/vespa-nodectl"); private static final String[] RESUME_NODE_COMMAND = new String[]{NODE_PROGRAM, "resume"}; private static final String[] SUSPEND_NODE_COMMAND = new String[]{NODE_PROGRAM, "suspend"}; private static final String[] RESTART_VESPA_ON_NODE_COMMAND = new String[]{NODE_PROGRAM, "restart-vespa"}; private static final String[] STOP_NODE_COMMAND = new String[]{NODE_PROGRAM, "stop"}; private static final String MANAGER_NAME = "node-admin"; private static final Map<String, Boolean> DIRECTORIES_TO_MOUNT = new HashMap<>(); static { DIRECTORIES_TO_MOUNT.put("/etc/yamas-agent", true); DIRECTORIES_TO_MOUNT.put("/etc/filebeat", true); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/daemontools_y"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/jdisc_core"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/langdetect/"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/vespa"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/yca"), true); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/yck"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/yell"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/ykeykey"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/ykeykeyd"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/yms_agent"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/ysar"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/ystatus"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/zpu"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/cache"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/crash"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/db/jdisc"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/db/vespa"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/jdisc_container"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/jdisc_core"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/maven"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/run"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/scoreboards"), true); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/service"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/share"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/spool"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/vespa"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/yca"), true); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/ycore++"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/zookeeper"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/zpe"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("tmp"), false); } private final Docker docker; private final Environment environment; private final ProcessExecuter processExecuter; public DockerOperationsImpl(Docker docker, Environment environment, ProcessExecuter processExecuter) { this.docker = docker; this.environment = environment; this.processExecuter = processExecuter; } @Override private InetAddress toPrivateSubnet(InetAddress nodeInetAddress) { return null; } @Override public void removeContainer(final Container existingContainer, ContainerNodeSpec nodeSpec) { final ContainerName containerName = existingContainer.name; PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); if (existingContainer.state.isRunning()) { logger.info("Stopping container " + containerName.asString()); docker.stopContainer(containerName); } logger.info("Deleting container " + containerName.asString()); docker.deleteContainer(containerName); if (docker.networkNPTed()) { logger.info("Delete iptables NAT rules for " + containerName.asString()); try { InetAddress nodeInetAddress = environment.getInetAddressForHost(nodeSpec.hostname); String ipv6Str = docker.getGlobalIPv6Address(containerName); String drop = NATCommand.drop(nodeInetAddress, InetAddress.getByName(ipv6Str)); Pair<Integer, String> result = processExecuter.exec(drop); if (result.getFirst() != 0) { logger.debug("Unable to drop NAT rule - error message: " + result.getSecond()); } } catch (IOException e) { logger.warning("Unable to drop NAT rule for container " + containerName, e); } } } @Override public Optional<Container> getContainer(ContainerName containerName) { return docker.getContainer(containerName); } /** * Try to suspend node. Suspending a node means the node should be taken offline, * such that maintenance can be done of the node (upgrading, rebooting, etc), * and such that we will start serving again as soon as possible afterwards. * <p> * Any failures are logged and ignored. */ @Override public void trySuspendNode(ContainerName containerName) { try { executeCommandInContainer(containerName, SUSPEND_NODE_COMMAND); } catch (RuntimeException e) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); logger.warning("Failed trying to suspend container " + containerName.asString() + " with " + Arrays.toString(SUSPEND_NODE_COMMAND), e); } } /** * For macvlan: * <p> * Due to a bug in docker (https: * IPv6 gateway in containers connected to more than one docker network */ private void setupContainerNetworkConnectivity(ContainerName containerName) throws IOException { if (!docker.networkNPTed()) { InetAddress hostDefaultGateway = DockerNetworkCreator.getDefaultGatewayLinux(true); executeCommandInNetworkNamespace(containerName, "route", "-A", "inet6", "add", "default", "gw", hostDefaultGateway.getHostAddress(), "dev", "eth1"); } } @Override public boolean pullImageAsyncIfNeeded(DockerImage dockerImage) { return docker.pullImageAsyncIfNeeded(dockerImage); } ProcessResult executeCommandInContainer(ContainerName containerName, String... command) { ProcessResult result = docker.executeInContainerAsRoot(containerName, command); if (!result.isSuccess()) { throw new RuntimeException("Container " + containerName.asString() + ": command " + Arrays.toString(command) + " failed: " + result); } return result; } @Override public ProcessResult executeCommandInContainerAsRoot(ContainerName containerName, Long timeoutSeconds, String... command) { return docker.executeInContainerAsRoot(containerName, timeoutSeconds, command); } @Override public ProcessResult executeCommandInContainerAsRoot(ContainerName containerName, String... command) { return docker.executeInContainerAsRoot(containerName, command); } @Override public void executeCommandInNetworkNamespace(ContainerName containerName, String... command) { final PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); final Integer containerPid = docker.getContainer(containerName) .filter(container -> container.state.isRunning()) .map(container -> container.pid) .orElseThrow(() -> new RuntimeException("PID not found for container with name: " + containerName.asString())); final String[] wrappedCommand = Stream.concat( Stream.of("sudo", "nsenter", String.format("--net=/host/proc/%d/ns/net", containerPid), "--"), Stream.of(command)) .toArray(String[]::new); try { Pair<Integer, String> result = processExecuter.exec(wrappedCommand); if (result.getFirst() != 0) { String msg = String.format( "Failed to execute %s in network namespace for %s (PID = %d), exit code: %d, output: %s", Arrays.toString(wrappedCommand), containerName.asString(), containerPid, result.getFirst(), result.getSecond()); logger.error(msg); throw new RuntimeException(msg); } } catch (IOException e) { logger.warning(String.format("IOException while executing %s in network namespace for %s (PID = %d)", Arrays.toString(wrappedCommand), containerName.asString(), containerPid), e); throw new RuntimeException(e); } } @Override public void resumeNode(ContainerName containerName) { executeCommandInContainer(containerName, RESUME_NODE_COMMAND); } @Override public void restartVespaOnNode(ContainerName containerName) { executeCommandInContainer(containerName, RESTART_VESPA_ON_NODE_COMMAND); } @Override public void stopServicesOnNode(ContainerName containerName) { executeCommandInContainer(containerName, STOP_NODE_COMMAND); } @Override public Optional<Docker.ContainerStats> getContainerStats(ContainerName containerName) { return docker.getContainerStats(containerName); } @Override public List<Container> getAllManagedContainers() { return docker.getAllContainersManagedBy(MANAGER_NAME); } @Override public List<ContainerName> listAllManagedContainers() { return docker.listAllContainersManagedBy(MANAGER_NAME); } @Override public void deleteUnusedDockerImages() { docker.deleteUnusedDockerImages(); } }
class DockerOperationsImpl implements DockerOperations { public static final String NODE_PROGRAM = getDefaults().underVespaHome("bin/vespa-nodectl"); private static final String[] RESUME_NODE_COMMAND = new String[]{NODE_PROGRAM, "resume"}; private static final String[] SUSPEND_NODE_COMMAND = new String[]{NODE_PROGRAM, "suspend"}; private static final String[] RESTART_VESPA_ON_NODE_COMMAND = new String[]{NODE_PROGRAM, "restart-vespa"}; private static final String[] STOP_NODE_COMMAND = new String[]{NODE_PROGRAM, "stop"}; private static final String MANAGER_NAME = "node-admin"; private static final String LOCAL_IPV6_PREFIX = "fd00::"; private static final String DOCKER_CUSTOM_BRIDGE_NETWORK_NAME = "vespa-bridge"; private static final Map<String, Boolean> DIRECTORIES_TO_MOUNT = new HashMap<>(); static { DIRECTORIES_TO_MOUNT.put("/etc/yamas-agent", true); DIRECTORIES_TO_MOUNT.put("/etc/filebeat", true); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/daemontools_y"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/jdisc_core"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/langdetect/"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/vespa"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/yca"), true); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/yck"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/yell"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/ykeykey"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/ykeykeyd"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/yms_agent"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/ysar"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/ystatus"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/zpu"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/cache"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/crash"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/db/jdisc"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/db/vespa"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/jdisc_container"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/jdisc_core"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/maven"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/run"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/scoreboards"), true); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/service"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/share"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/spool"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/vespa"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/yca"), true); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/ycore++"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/zookeeper"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/zpe"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("tmp"), false); } private final Docker docker; private final Environment environment; private final ProcessExecuter processExecuter; public DockerOperationsImpl(Docker docker, Environment environment, ProcessExecuter processExecuter) { this.docker = docker; this.environment = environment; this.processExecuter = processExecuter; } @Override @Override public void removeContainer(final Container existingContainer, ContainerNodeSpec nodeSpec) { final ContainerName containerName = existingContainer.name; PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); if (existingContainer.state.isRunning()) { logger.info("Stopping container " + containerName.asString()); docker.stopContainer(containerName); } logger.info("Deleting container " + containerName.asString()); docker.deleteContainer(containerName); if (docker.networkNPTed()) { logger.info("Delete iptables NAT rules for " + containerName.asString()); try { InetAddress nodeInetAddress = environment.getInetAddressForHost(nodeSpec.hostname); String ipv6Str = docker.getGlobalIPv6Address(containerName); String drop = NATCommand.drop(nodeInetAddress, InetAddress.getByName(ipv6Str)); Pair<Integer, String> result = processExecuter.exec(drop); if (result.getFirst() != 0) { logger.debug("Unable to drop NAT rule - error message: " + result.getSecond()); } } catch (IOException e) { logger.warning("Unable to drop NAT rule for container " + containerName, e); } } } @Override public Optional<Container> getContainer(ContainerName containerName) { return docker.getContainer(containerName); } /** * Try to suspend node. Suspending a node means the node should be taken offline, * such that maintenance can be done of the node (upgrading, rebooting, etc), * and such that we will start serving again as soon as possible afterwards. * <p> * Any failures are logged and ignored. */ @Override public void trySuspendNode(ContainerName containerName) { try { executeCommandInContainer(containerName, SUSPEND_NODE_COMMAND); } catch (RuntimeException e) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); logger.warning("Failed trying to suspend container " + containerName.asString() + " with " + Arrays.toString(SUSPEND_NODE_COMMAND), e); } } /** * For macvlan: * <p> * Due to a bug in docker (https: * IPv6 gateway in containers connected to more than one docker network */ private void setupContainerNetworkConnectivity(ContainerName containerName) throws IOException { if (!docker.networkNPTed()) { InetAddress hostDefaultGateway = DockerNetworkCreator.getDefaultGatewayLinux(true); executeCommandInNetworkNamespace(containerName, "route", "-A", "inet6", "add", "default", "gw", hostDefaultGateway.getHostAddress(), "dev", "eth1"); } } @Override public boolean pullImageAsyncIfNeeded(DockerImage dockerImage) { return docker.pullImageAsyncIfNeeded(dockerImage); } ProcessResult executeCommandInContainer(ContainerName containerName, String... command) { ProcessResult result = docker.executeInContainerAsRoot(containerName, command); if (!result.isSuccess()) { throw new RuntimeException("Container " + containerName.asString() + ": command " + Arrays.toString(command) + " failed: " + result); } return result; } @Override public ProcessResult executeCommandInContainerAsRoot(ContainerName containerName, Long timeoutSeconds, String... command) { return docker.executeInContainerAsRoot(containerName, timeoutSeconds, command); } @Override public ProcessResult executeCommandInContainerAsRoot(ContainerName containerName, String... command) { return docker.executeInContainerAsRoot(containerName, command); } @Override public void executeCommandInNetworkNamespace(ContainerName containerName, String... command) { final PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); final Integer containerPid = docker.getContainer(containerName) .filter(container -> container.state.isRunning()) .map(container -> container.pid) .orElseThrow(() -> new RuntimeException("PID not found for container with name: " + containerName.asString())); final String[] wrappedCommand = Stream.concat( Stream.of("sudo", "nsenter", String.format("--net=/host/proc/%d/ns/net", containerPid), "--"), Stream.of(command)) .toArray(String[]::new); try { Pair<Integer, String> result = processExecuter.exec(wrappedCommand); if (result.getFirst() != 0) { String msg = String.format( "Failed to execute %s in network namespace for %s (PID = %d), exit code: %d, output: %s", Arrays.toString(wrappedCommand), containerName.asString(), containerPid, result.getFirst(), result.getSecond()); logger.error(msg); throw new RuntimeException(msg); } } catch (IOException e) { logger.warning(String.format("IOException while executing %s in network namespace for %s (PID = %d)", Arrays.toString(wrappedCommand), containerName.asString(), containerPid), e); throw new RuntimeException(e); } } @Override public void resumeNode(ContainerName containerName) { executeCommandInContainer(containerName, RESUME_NODE_COMMAND); } @Override public void restartVespaOnNode(ContainerName containerName) { executeCommandInContainer(containerName, RESTART_VESPA_ON_NODE_COMMAND); } @Override public void stopServicesOnNode(ContainerName containerName) { executeCommandInContainer(containerName, STOP_NODE_COMMAND); } @Override public Optional<Docker.ContainerStats> getContainerStats(ContainerName containerName) { return docker.getContainerStats(containerName); } @Override public List<Container> getAllManagedContainers() { return docker.getAllContainersManagedBy(MANAGER_NAME); } @Override public List<ContainerName> listAllManagedContainers() { return docker.listAllContainersManagedBy(MANAGER_NAME); } @Override public void deleteUnusedDockerImages() { docker.deleteUnusedDockerImages(); } }
moved out the strings static
public void startContainer(ContainerName containerName, final ContainerNodeSpec nodeSpec) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); logger.info("Starting container " + containerName); try { InetAddress nodeInetAddress = environment.getInetAddressForHost(nodeSpec.hostname); final boolean isIPv6 = nodeInetAddress instanceof Inet6Address; String configServers = environment.getConfigServerUris().stream() .map(URI::getHost) .collect(Collectors.joining(",")); Docker.CreateContainerCommand command = docker.createContainerCommand( nodeSpec.wantedDockerImage.get(), ContainerResources.from(nodeSpec.minCpuCores, nodeSpec.minMainMemoryAvailableGb), containerName, nodeSpec.hostname) .withManagedBy(MANAGER_NAME) .withEnvironment("CONFIG_SERVER_ADDRESS", configServers) .withUlimit("nofile", 262_144, 262_144) .withUlimit("nproc", 32_768, 409_600) .withUlimit("core", -1, -1) .withAddCapability("SYS_PTRACE") .withAddCapability("SYS_ADMIN"); if (!docker.networkNPTed()) { logger.info("Network is macvlan - setting up container with public ip address on a macvlan"); command.withIpAddress(nodeInetAddress); command.withNetworkMode(DockerImpl.DOCKER_CUSTOM_MACVLAN_NETWORK_NAME); command.withVolume("/etc/hosts", "/etc/hosts"); } else { logger.info("Network is NPTed - setting up container with private ip address"); command.withIpAddress(NetworkPrefixTranslator.translate( nodeInetAddress, InetAddress.getByName("fd00::"), 64)); command.withNetworkMode("vespa-bridge"); } for (String pathInNode : DIRECTORIES_TO_MOUNT.keySet()) { String pathInHost = environment.pathInHostFromPathInNode(containerName, pathInNode).toString(); command.withVolume(pathInHost, pathInNode); } long minMainMemoryAvailableMb = (long) (nodeSpec.minMainMemoryAvailableGb * 1024); if (minMainMemoryAvailableMb > 0) { command.withEnvironment("VESPA_TOTAL_MEMORY_MB", Long.toString(minMainMemoryAvailableMb)); } logger.info("Starting new container with args: " + command); command.create(); if (isIPv6) { if (!docker.networkNPTed()) { docker.connectContainerToNetwork(containerName, "bridge"); } docker.startContainer(containerName); setupContainerNetworkConnectivity(containerName); } else { docker.startContainer(containerName); } DIRECTORIES_TO_MOUNT.entrySet().stream().filter(Map.Entry::getValue).forEach(entry -> docker.executeInContainerAsRoot(containerName, "chmod", "-R", "a+w", entry.getKey())); } catch (IOException e) { throw new RuntimeException("Failed to create container " + containerName.asString(), e); } }
InetAddress.getByName("fd00::"),
public void startContainer(ContainerName containerName, final ContainerNodeSpec nodeSpec) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); logger.info("Starting container " + containerName); try { InetAddress nodeInetAddress = environment.getInetAddressForHost(nodeSpec.hostname); final boolean isIPv6 = nodeInetAddress instanceof Inet6Address; String configServers = environment.getConfigServerUris().stream() .map(URI::getHost) .collect(Collectors.joining(",")); Docker.CreateContainerCommand command = docker.createContainerCommand( nodeSpec.wantedDockerImage.get(), ContainerResources.from(nodeSpec.minCpuCores, nodeSpec.minMainMemoryAvailableGb), containerName, nodeSpec.hostname) .withManagedBy(MANAGER_NAME) .withEnvironment("CONFIG_SERVER_ADDRESS", configServers) .withUlimit("nofile", 262_144, 262_144) .withUlimit("nproc", 32_768, 409_600) .withUlimit("core", -1, -1) .withAddCapability("SYS_PTRACE") .withAddCapability("SYS_ADMIN"); if (!docker.networkNPTed()) { command.withIpAddress(nodeInetAddress); command.withNetworkMode(DockerImpl.DOCKER_CUSTOM_MACVLAN_NETWORK_NAME); command.withVolume("/etc/hosts", "/etc/hosts"); } else { command.withIpAddress(NetworkPrefixTranslator.translate( nodeInetAddress, InetAddress.getByName(LOCAL_IPV6_PREFIX), 64)); command.withNetworkMode(DOCKER_CUSTOM_BRIDGE_NETWORK_NAME); } for (String pathInNode : DIRECTORIES_TO_MOUNT.keySet()) { String pathInHost = environment.pathInHostFromPathInNode(containerName, pathInNode).toString(); command.withVolume(pathInHost, pathInNode); } long minMainMemoryAvailableMb = (long) (nodeSpec.minMainMemoryAvailableGb * 1024); if (minMainMemoryAvailableMb > 0) { command.withEnvironment("VESPA_TOTAL_MEMORY_MB", Long.toString(minMainMemoryAvailableMb)); } logger.info("Starting new container with args: " + command); command.create(); if (isIPv6) { if (!docker.networkNPTed()) { docker.connectContainerToNetwork(containerName, "bridge"); } docker.startContainer(containerName); setupContainerNetworkConnectivity(containerName); } else { docker.startContainer(containerName); } DIRECTORIES_TO_MOUNT.entrySet().stream().filter(Map.Entry::getValue).forEach(entry -> docker.executeInContainerAsRoot(containerName, "chmod", "-R", "a+w", entry.getKey())); } catch (IOException e) { throw new RuntimeException("Failed to create container " + containerName.asString(), e); } }
class DockerOperationsImpl implements DockerOperations { public static final String NODE_PROGRAM = getDefaults().underVespaHome("bin/vespa-nodectl"); private static final String[] RESUME_NODE_COMMAND = new String[]{NODE_PROGRAM, "resume"}; private static final String[] SUSPEND_NODE_COMMAND = new String[]{NODE_PROGRAM, "suspend"}; private static final String[] RESTART_VESPA_ON_NODE_COMMAND = new String[]{NODE_PROGRAM, "restart-vespa"}; private static final String[] STOP_NODE_COMMAND = new String[]{NODE_PROGRAM, "stop"}; private static final String MANAGER_NAME = "node-admin"; private static final Map<String, Boolean> DIRECTORIES_TO_MOUNT = new HashMap<>(); static { DIRECTORIES_TO_MOUNT.put("/etc/yamas-agent", true); DIRECTORIES_TO_MOUNT.put("/etc/filebeat", true); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/daemontools_y"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/jdisc_core"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/langdetect/"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/vespa"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/yca"), true); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/yck"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/yell"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/ykeykey"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/ykeykeyd"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/yms_agent"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/ysar"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/ystatus"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/zpu"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/cache"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/crash"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/db/jdisc"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/db/vespa"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/jdisc_container"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/jdisc_core"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/maven"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/run"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/scoreboards"), true); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/service"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/share"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/spool"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/vespa"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/yca"), true); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/ycore++"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/zookeeper"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/zpe"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("tmp"), false); } private final Docker docker; private final Environment environment; private final ProcessExecuter processExecuter; public DockerOperationsImpl(Docker docker, Environment environment, ProcessExecuter processExecuter) { this.docker = docker; this.environment = environment; this.processExecuter = processExecuter; } @Override private InetAddress toPrivateSubnet(InetAddress nodeInetAddress) { return null; } @Override public void removeContainer(final Container existingContainer, ContainerNodeSpec nodeSpec) { final ContainerName containerName = existingContainer.name; PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); if (existingContainer.state.isRunning()) { logger.info("Stopping container " + containerName.asString()); docker.stopContainer(containerName); } logger.info("Deleting container " + containerName.asString()); docker.deleteContainer(containerName); if (docker.networkNPTed()) { logger.info("Delete iptables NAT rules for " + containerName.asString()); try { InetAddress nodeInetAddress = environment.getInetAddressForHost(nodeSpec.hostname); String ipv6Str = docker.getGlobalIPv6Address(containerName); String drop = NATCommand.drop(nodeInetAddress, InetAddress.getByName(ipv6Str)); Pair<Integer, String> result = processExecuter.exec(drop); if (result.getFirst() != 0) { logger.debug("Unable to drop NAT rule - error message: " + result.getSecond()); } } catch (IOException e) { logger.warning("Unable to drop NAT rule for container " + containerName, e); } } } @Override public Optional<Container> getContainer(ContainerName containerName) { return docker.getContainer(containerName); } /** * Try to suspend node. Suspending a node means the node should be taken offline, * such that maintenance can be done of the node (upgrading, rebooting, etc), * and such that we will start serving again as soon as possible afterwards. * <p> * Any failures are logged and ignored. */ @Override public void trySuspendNode(ContainerName containerName) { try { executeCommandInContainer(containerName, SUSPEND_NODE_COMMAND); } catch (RuntimeException e) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); logger.warning("Failed trying to suspend container " + containerName.asString() + " with " + Arrays.toString(SUSPEND_NODE_COMMAND), e); } } /** * For macvlan: * <p> * Due to a bug in docker (https: * IPv6 gateway in containers connected to more than one docker network */ private void setupContainerNetworkConnectivity(ContainerName containerName) throws IOException { if (!docker.networkNPTed()) { InetAddress hostDefaultGateway = DockerNetworkCreator.getDefaultGatewayLinux(true); executeCommandInNetworkNamespace(containerName, "route", "-A", "inet6", "add", "default", "gw", hostDefaultGateway.getHostAddress(), "dev", "eth1"); } } @Override public boolean pullImageAsyncIfNeeded(DockerImage dockerImage) { return docker.pullImageAsyncIfNeeded(dockerImage); } ProcessResult executeCommandInContainer(ContainerName containerName, String... command) { ProcessResult result = docker.executeInContainerAsRoot(containerName, command); if (!result.isSuccess()) { throw new RuntimeException("Container " + containerName.asString() + ": command " + Arrays.toString(command) + " failed: " + result); } return result; } @Override public ProcessResult executeCommandInContainerAsRoot(ContainerName containerName, Long timeoutSeconds, String... command) { return docker.executeInContainerAsRoot(containerName, timeoutSeconds, command); } @Override public ProcessResult executeCommandInContainerAsRoot(ContainerName containerName, String... command) { return docker.executeInContainerAsRoot(containerName, command); } @Override public void executeCommandInNetworkNamespace(ContainerName containerName, String... command) { final PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); final Integer containerPid = docker.getContainer(containerName) .filter(container -> container.state.isRunning()) .map(container -> container.pid) .orElseThrow(() -> new RuntimeException("PID not found for container with name: " + containerName.asString())); final String[] wrappedCommand = Stream.concat( Stream.of("sudo", "nsenter", String.format("--net=/host/proc/%d/ns/net", containerPid), "--"), Stream.of(command)) .toArray(String[]::new); try { Pair<Integer, String> result = processExecuter.exec(wrappedCommand); if (result.getFirst() != 0) { String msg = String.format( "Failed to execute %s in network namespace for %s (PID = %d), exit code: %d, output: %s", Arrays.toString(wrappedCommand), containerName.asString(), containerPid, result.getFirst(), result.getSecond()); logger.error(msg); throw new RuntimeException(msg); } } catch (IOException e) { logger.warning(String.format("IOException while executing %s in network namespace for %s (PID = %d)", Arrays.toString(wrappedCommand), containerName.asString(), containerPid), e); throw new RuntimeException(e); } } @Override public void resumeNode(ContainerName containerName) { executeCommandInContainer(containerName, RESUME_NODE_COMMAND); } @Override public void restartVespaOnNode(ContainerName containerName) { executeCommandInContainer(containerName, RESTART_VESPA_ON_NODE_COMMAND); } @Override public void stopServicesOnNode(ContainerName containerName) { executeCommandInContainer(containerName, STOP_NODE_COMMAND); } @Override public Optional<Docker.ContainerStats> getContainerStats(ContainerName containerName) { return docker.getContainerStats(containerName); } @Override public List<Container> getAllManagedContainers() { return docker.getAllContainersManagedBy(MANAGER_NAME); } @Override public List<ContainerName> listAllManagedContainers() { return docker.listAllContainersManagedBy(MANAGER_NAME); } @Override public void deleteUnusedDockerImages() { docker.deleteUnusedDockerImages(); } }
class DockerOperationsImpl implements DockerOperations { public static final String NODE_PROGRAM = getDefaults().underVespaHome("bin/vespa-nodectl"); private static final String[] RESUME_NODE_COMMAND = new String[]{NODE_PROGRAM, "resume"}; private static final String[] SUSPEND_NODE_COMMAND = new String[]{NODE_PROGRAM, "suspend"}; private static final String[] RESTART_VESPA_ON_NODE_COMMAND = new String[]{NODE_PROGRAM, "restart-vespa"}; private static final String[] STOP_NODE_COMMAND = new String[]{NODE_PROGRAM, "stop"}; private static final String MANAGER_NAME = "node-admin"; private static final String LOCAL_IPV6_PREFIX = "fd00::"; private static final String DOCKER_CUSTOM_BRIDGE_NETWORK_NAME = "vespa-bridge"; private static final Map<String, Boolean> DIRECTORIES_TO_MOUNT = new HashMap<>(); static { DIRECTORIES_TO_MOUNT.put("/etc/yamas-agent", true); DIRECTORIES_TO_MOUNT.put("/etc/filebeat", true); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/daemontools_y"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/jdisc_core"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/langdetect/"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/vespa"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/yca"), true); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/yck"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/yell"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/ykeykey"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/ykeykeyd"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/yms_agent"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/ysar"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/ystatus"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/zpu"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/cache"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/crash"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/db/jdisc"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/db/vespa"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/jdisc_container"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/jdisc_core"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/maven"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/run"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/scoreboards"), true); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/service"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/share"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/spool"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/vespa"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/yca"), true); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/ycore++"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/zookeeper"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/zpe"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("tmp"), false); } private final Docker docker; private final Environment environment; private final ProcessExecuter processExecuter; public DockerOperationsImpl(Docker docker, Environment environment, ProcessExecuter processExecuter) { this.docker = docker; this.environment = environment; this.processExecuter = processExecuter; } @Override @Override public void removeContainer(final Container existingContainer, ContainerNodeSpec nodeSpec) { final ContainerName containerName = existingContainer.name; PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); if (existingContainer.state.isRunning()) { logger.info("Stopping container " + containerName.asString()); docker.stopContainer(containerName); } logger.info("Deleting container " + containerName.asString()); docker.deleteContainer(containerName); if (docker.networkNPTed()) { logger.info("Delete iptables NAT rules for " + containerName.asString()); try { InetAddress nodeInetAddress = environment.getInetAddressForHost(nodeSpec.hostname); String ipv6Str = docker.getGlobalIPv6Address(containerName); String drop = NATCommand.drop(nodeInetAddress, InetAddress.getByName(ipv6Str)); Pair<Integer, String> result = processExecuter.exec(drop); if (result.getFirst() != 0) { logger.debug("Unable to drop NAT rule - error message: " + result.getSecond()); } } catch (IOException e) { logger.warning("Unable to drop NAT rule for container " + containerName, e); } } } @Override public Optional<Container> getContainer(ContainerName containerName) { return docker.getContainer(containerName); } /** * Try to suspend node. Suspending a node means the node should be taken offline, * such that maintenance can be done of the node (upgrading, rebooting, etc), * and such that we will start serving again as soon as possible afterwards. * <p> * Any failures are logged and ignored. */ @Override public void trySuspendNode(ContainerName containerName) { try { executeCommandInContainer(containerName, SUSPEND_NODE_COMMAND); } catch (RuntimeException e) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); logger.warning("Failed trying to suspend container " + containerName.asString() + " with " + Arrays.toString(SUSPEND_NODE_COMMAND), e); } } /** * For macvlan: * <p> * Due to a bug in docker (https: * IPv6 gateway in containers connected to more than one docker network */ private void setupContainerNetworkConnectivity(ContainerName containerName) throws IOException { if (!docker.networkNPTed()) { InetAddress hostDefaultGateway = DockerNetworkCreator.getDefaultGatewayLinux(true); executeCommandInNetworkNamespace(containerName, "route", "-A", "inet6", "add", "default", "gw", hostDefaultGateway.getHostAddress(), "dev", "eth1"); } } @Override public boolean pullImageAsyncIfNeeded(DockerImage dockerImage) { return docker.pullImageAsyncIfNeeded(dockerImage); } ProcessResult executeCommandInContainer(ContainerName containerName, String... command) { ProcessResult result = docker.executeInContainerAsRoot(containerName, command); if (!result.isSuccess()) { throw new RuntimeException("Container " + containerName.asString() + ": command " + Arrays.toString(command) + " failed: " + result); } return result; } @Override public ProcessResult executeCommandInContainerAsRoot(ContainerName containerName, Long timeoutSeconds, String... command) { return docker.executeInContainerAsRoot(containerName, timeoutSeconds, command); } @Override public ProcessResult executeCommandInContainerAsRoot(ContainerName containerName, String... command) { return docker.executeInContainerAsRoot(containerName, command); } @Override public void executeCommandInNetworkNamespace(ContainerName containerName, String... command) { final PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); final Integer containerPid = docker.getContainer(containerName) .filter(container -> container.state.isRunning()) .map(container -> container.pid) .orElseThrow(() -> new RuntimeException("PID not found for container with name: " + containerName.asString())); final String[] wrappedCommand = Stream.concat( Stream.of("sudo", "nsenter", String.format("--net=/host/proc/%d/ns/net", containerPid), "--"), Stream.of(command)) .toArray(String[]::new); try { Pair<Integer, String> result = processExecuter.exec(wrappedCommand); if (result.getFirst() != 0) { String msg = String.format( "Failed to execute %s in network namespace for %s (PID = %d), exit code: %d, output: %s", Arrays.toString(wrappedCommand), containerName.asString(), containerPid, result.getFirst(), result.getSecond()); logger.error(msg); throw new RuntimeException(msg); } } catch (IOException e) { logger.warning(String.format("IOException while executing %s in network namespace for %s (PID = %d)", Arrays.toString(wrappedCommand), containerName.asString(), containerPid), e); throw new RuntimeException(e); } } @Override public void resumeNode(ContainerName containerName) { executeCommandInContainer(containerName, RESUME_NODE_COMMAND); } @Override public void restartVespaOnNode(ContainerName containerName) { executeCommandInContainer(containerName, RESTART_VESPA_ON_NODE_COMMAND); } @Override public void stopServicesOnNode(ContainerName containerName) { executeCommandInContainer(containerName, STOP_NODE_COMMAND); } @Override public Optional<Docker.ContainerStats> getContainerStats(ContainerName containerName) { return docker.getContainerStats(containerName); } @Override public List<Container> getAllManagedContainers() { return docker.getAllContainersManagedBy(MANAGER_NAME); } @Override public List<ContainerName> listAllManagedContainers() { return docker.listAllContainersManagedBy(MANAGER_NAME); } @Override public void deleteUnusedDockerImages() { docker.deleteUnusedDockerImages(); } }
this probably also has its own race-condition; signalling a new config is available before it actually is...
protected void setNewConfig(JRTClientConfigRequest jrtReq) { setConfig(jrtReq.getNewGeneration(), null ); this.config = RawConfig.createFromResponseParameters(jrtReq); if (log.isLoggable(LogLevel.DEBUG)) { log.log(LogLevel.DEBUG, "in setNewConfig, config=" + this.config); } }
setConfig(jrtReq.getNewGeneration(), null );
protected void setNewConfig(JRTClientConfigRequest jrtReq) { setConfig(jrtReq.getNewGeneration(), RawConfig.createFromResponseParameters(jrtReq) ); if (log.isLoggable(LogLevel.DEBUG)) { log.log(LogLevel.DEBUG, "in setNewConfig, config=" + this.getConfigState().getConfig()); } }
class GenericJRTConfigSubscription extends JRTConfigSubscription { private RawConfig config; private final List<String> defContent; @SuppressWarnings("unchecked") public GenericJRTConfigSubscription(ConfigKey<?> key, List<String> defContent, ConfigSubscriber subscriber, ConfigSource source, TimingValues timingValues) { super(key, subscriber, source, timingValues); this.defContent = defContent; } @Override @SuppressWarnings("unchecked") @Override void setGeneration(Long generation) { super.setGeneration(generation); if (this.config != null) { this.config.setGeneration(generation); } } public RawConfig getRawConfig() { return config; } /** * The config definition schema * * @return the config definition for this subscription */ @Override public DefContent getDefContent() { return (DefContent.fromList(defContent)); } }
class GenericJRTConfigSubscription extends JRTConfigSubscription<RawConfig> { private final List<String> defContent; public GenericJRTConfigSubscription(ConfigKey<RawConfig> key, List<String> defContent, ConfigSubscriber subscriber, ConfigSource source, TimingValues timingValues) { super(key, subscriber, source, timingValues); this.defContent = defContent; } @Override @Override void setGeneration(Long generation) { super.setGeneration(generation); ConfigState<RawConfig> configState = getConfigState(); if (configState.getConfig() != null) { configState.getConfig().setGeneration(generation); } } public RawConfig getRawConfig() { return getConfigState().getConfig(); } /** * The config definition schema * * @return the config definition for this subscription */ @Override public DefContent getDefContent() { return (DefContent.fromList(defContent)); } }
agreed - fixed
public void startContainer(ContainerName containerName, final ContainerNodeSpec nodeSpec) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); logger.info("Starting container " + containerName); try { InetAddress nodeInetAddress = environment.getInetAddressForHost(nodeSpec.hostname); final boolean isIPv6 = nodeInetAddress instanceof Inet6Address; String configServers = environment.getConfigServerUris().stream() .map(URI::getHost) .collect(Collectors.joining(",")); Docker.CreateContainerCommand command = docker.createContainerCommand( nodeSpec.wantedDockerImage.get(), ContainerResources.from(nodeSpec.minCpuCores, nodeSpec.minMainMemoryAvailableGb), containerName, nodeSpec.hostname) .withManagedBy(MANAGER_NAME) .withEnvironment("CONFIG_SERVER_ADDRESS", configServers) .withUlimit("nofile", 262_144, 262_144) .withUlimit("nproc", 32_768, 409_600) .withUlimit("core", -1, -1) .withAddCapability("SYS_PTRACE") .withAddCapability("SYS_ADMIN"); if (!docker.networkNPTed()) { logger.info("Network is macvlan - setting up container with public ip address on a macvlan"); command.withIpAddress(nodeInetAddress); command.withNetworkMode(DockerImpl.DOCKER_CUSTOM_MACVLAN_NETWORK_NAME); command.withVolume("/etc/hosts", "/etc/hosts"); } else { logger.info("Network is NPTed - setting up container with private ip address"); command.withIpAddress(NetworkPrefixTranslator.translate( nodeInetAddress, InetAddress.getByName("fd00::"), 64)); command.withNetworkMode("vespa-bridge"); } for (String pathInNode : DIRECTORIES_TO_MOUNT.keySet()) { String pathInHost = environment.pathInHostFromPathInNode(containerName, pathInNode).toString(); command.withVolume(pathInHost, pathInNode); } long minMainMemoryAvailableMb = (long) (nodeSpec.minMainMemoryAvailableGb * 1024); if (minMainMemoryAvailableMb > 0) { command.withEnvironment("VESPA_TOTAL_MEMORY_MB", Long.toString(minMainMemoryAvailableMb)); } logger.info("Starting new container with args: " + command); command.create(); if (isIPv6) { if (!docker.networkNPTed()) { docker.connectContainerToNetwork(containerName, "bridge"); } docker.startContainer(containerName); setupContainerNetworkConnectivity(containerName); } else { docker.startContainer(containerName); } DIRECTORIES_TO_MOUNT.entrySet().stream().filter(Map.Entry::getValue).forEach(entry -> docker.executeInContainerAsRoot(containerName, "chmod", "-R", "a+w", entry.getKey())); } catch (IOException e) { throw new RuntimeException("Failed to create container " + containerName.asString(), e); } }
logger.info("Network is NPTed - setting up container with private ip address");
public void startContainer(ContainerName containerName, final ContainerNodeSpec nodeSpec) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); logger.info("Starting container " + containerName); try { InetAddress nodeInetAddress = environment.getInetAddressForHost(nodeSpec.hostname); final boolean isIPv6 = nodeInetAddress instanceof Inet6Address; String configServers = environment.getConfigServerUris().stream() .map(URI::getHost) .collect(Collectors.joining(",")); Docker.CreateContainerCommand command = docker.createContainerCommand( nodeSpec.wantedDockerImage.get(), ContainerResources.from(nodeSpec.minCpuCores, nodeSpec.minMainMemoryAvailableGb), containerName, nodeSpec.hostname) .withManagedBy(MANAGER_NAME) .withEnvironment("CONFIG_SERVER_ADDRESS", configServers) .withUlimit("nofile", 262_144, 262_144) .withUlimit("nproc", 32_768, 409_600) .withUlimit("core", -1, -1) .withAddCapability("SYS_PTRACE") .withAddCapability("SYS_ADMIN"); if (!docker.networkNPTed()) { command.withIpAddress(nodeInetAddress); command.withNetworkMode(DockerImpl.DOCKER_CUSTOM_MACVLAN_NETWORK_NAME); command.withVolume("/etc/hosts", "/etc/hosts"); } else { command.withIpAddress(NetworkPrefixTranslator.translate( nodeInetAddress, InetAddress.getByName(LOCAL_IPV6_PREFIX), 64)); command.withNetworkMode(DOCKER_CUSTOM_BRIDGE_NETWORK_NAME); } for (String pathInNode : DIRECTORIES_TO_MOUNT.keySet()) { String pathInHost = environment.pathInHostFromPathInNode(containerName, pathInNode).toString(); command.withVolume(pathInHost, pathInNode); } long minMainMemoryAvailableMb = (long) (nodeSpec.minMainMemoryAvailableGb * 1024); if (minMainMemoryAvailableMb > 0) { command.withEnvironment("VESPA_TOTAL_MEMORY_MB", Long.toString(minMainMemoryAvailableMb)); } logger.info("Starting new container with args: " + command); command.create(); if (isIPv6) { if (!docker.networkNPTed()) { docker.connectContainerToNetwork(containerName, "bridge"); } docker.startContainer(containerName); setupContainerNetworkConnectivity(containerName); } else { docker.startContainer(containerName); } DIRECTORIES_TO_MOUNT.entrySet().stream().filter(Map.Entry::getValue).forEach(entry -> docker.executeInContainerAsRoot(containerName, "chmod", "-R", "a+w", entry.getKey())); } catch (IOException e) { throw new RuntimeException("Failed to create container " + containerName.asString(), e); } }
class DockerOperationsImpl implements DockerOperations { public static final String NODE_PROGRAM = getDefaults().underVespaHome("bin/vespa-nodectl"); private static final String[] RESUME_NODE_COMMAND = new String[]{NODE_PROGRAM, "resume"}; private static final String[] SUSPEND_NODE_COMMAND = new String[]{NODE_PROGRAM, "suspend"}; private static final String[] RESTART_VESPA_ON_NODE_COMMAND = new String[]{NODE_PROGRAM, "restart-vespa"}; private static final String[] STOP_NODE_COMMAND = new String[]{NODE_PROGRAM, "stop"}; private static final String MANAGER_NAME = "node-admin"; private static final Map<String, Boolean> DIRECTORIES_TO_MOUNT = new HashMap<>(); static { DIRECTORIES_TO_MOUNT.put("/etc/yamas-agent", true); DIRECTORIES_TO_MOUNT.put("/etc/filebeat", true); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/daemontools_y"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/jdisc_core"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/langdetect/"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/vespa"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/yca"), true); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/yck"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/yell"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/ykeykey"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/ykeykeyd"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/yms_agent"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/ysar"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/ystatus"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/zpu"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/cache"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/crash"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/db/jdisc"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/db/vespa"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/jdisc_container"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/jdisc_core"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/maven"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/run"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/scoreboards"), true); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/service"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/share"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/spool"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/vespa"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/yca"), true); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/ycore++"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/zookeeper"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/zpe"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("tmp"), false); } private final Docker docker; private final Environment environment; private final ProcessExecuter processExecuter; public DockerOperationsImpl(Docker docker, Environment environment, ProcessExecuter processExecuter) { this.docker = docker; this.environment = environment; this.processExecuter = processExecuter; } @Override private InetAddress toPrivateSubnet(InetAddress nodeInetAddress) { return null; } @Override public void removeContainer(final Container existingContainer, ContainerNodeSpec nodeSpec) { final ContainerName containerName = existingContainer.name; PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); if (existingContainer.state.isRunning()) { logger.info("Stopping container " + containerName.asString()); docker.stopContainer(containerName); } logger.info("Deleting container " + containerName.asString()); docker.deleteContainer(containerName); if (docker.networkNPTed()) { logger.info("Delete iptables NAT rules for " + containerName.asString()); try { InetAddress nodeInetAddress = environment.getInetAddressForHost(nodeSpec.hostname); String ipv6Str = docker.getGlobalIPv6Address(containerName); String drop = NATCommand.drop(nodeInetAddress, InetAddress.getByName(ipv6Str)); Pair<Integer, String> result = processExecuter.exec(drop); if (result.getFirst() != 0) { logger.debug("Unable to drop NAT rule - error message: " + result.getSecond()); } } catch (IOException e) { logger.warning("Unable to drop NAT rule for container " + containerName, e); } } } @Override public Optional<Container> getContainer(ContainerName containerName) { return docker.getContainer(containerName); } /** * Try to suspend node. Suspending a node means the node should be taken offline, * such that maintenance can be done of the node (upgrading, rebooting, etc), * and such that we will start serving again as soon as possible afterwards. * <p> * Any failures are logged and ignored. */ @Override public void trySuspendNode(ContainerName containerName) { try { executeCommandInContainer(containerName, SUSPEND_NODE_COMMAND); } catch (RuntimeException e) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); logger.warning("Failed trying to suspend container " + containerName.asString() + " with " + Arrays.toString(SUSPEND_NODE_COMMAND), e); } } /** * For macvlan: * <p> * Due to a bug in docker (https: * IPv6 gateway in containers connected to more than one docker network */ private void setupContainerNetworkConnectivity(ContainerName containerName) throws IOException { if (!docker.networkNPTed()) { InetAddress hostDefaultGateway = DockerNetworkCreator.getDefaultGatewayLinux(true); executeCommandInNetworkNamespace(containerName, "route", "-A", "inet6", "add", "default", "gw", hostDefaultGateway.getHostAddress(), "dev", "eth1"); } } @Override public boolean pullImageAsyncIfNeeded(DockerImage dockerImage) { return docker.pullImageAsyncIfNeeded(dockerImage); } ProcessResult executeCommandInContainer(ContainerName containerName, String... command) { ProcessResult result = docker.executeInContainerAsRoot(containerName, command); if (!result.isSuccess()) { throw new RuntimeException("Container " + containerName.asString() + ": command " + Arrays.toString(command) + " failed: " + result); } return result; } @Override public ProcessResult executeCommandInContainerAsRoot(ContainerName containerName, Long timeoutSeconds, String... command) { return docker.executeInContainerAsRoot(containerName, timeoutSeconds, command); } @Override public ProcessResult executeCommandInContainerAsRoot(ContainerName containerName, String... command) { return docker.executeInContainerAsRoot(containerName, command); } @Override public void executeCommandInNetworkNamespace(ContainerName containerName, String... command) { final PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); final Integer containerPid = docker.getContainer(containerName) .filter(container -> container.state.isRunning()) .map(container -> container.pid) .orElseThrow(() -> new RuntimeException("PID not found for container with name: " + containerName.asString())); final String[] wrappedCommand = Stream.concat( Stream.of("sudo", "nsenter", String.format("--net=/host/proc/%d/ns/net", containerPid), "--"), Stream.of(command)) .toArray(String[]::new); try { Pair<Integer, String> result = processExecuter.exec(wrappedCommand); if (result.getFirst() != 0) { String msg = String.format( "Failed to execute %s in network namespace for %s (PID = %d), exit code: %d, output: %s", Arrays.toString(wrappedCommand), containerName.asString(), containerPid, result.getFirst(), result.getSecond()); logger.error(msg); throw new RuntimeException(msg); } } catch (IOException e) { logger.warning(String.format("IOException while executing %s in network namespace for %s (PID = %d)", Arrays.toString(wrappedCommand), containerName.asString(), containerPid), e); throw new RuntimeException(e); } } @Override public void resumeNode(ContainerName containerName) { executeCommandInContainer(containerName, RESUME_NODE_COMMAND); } @Override public void restartVespaOnNode(ContainerName containerName) { executeCommandInContainer(containerName, RESTART_VESPA_ON_NODE_COMMAND); } @Override public void stopServicesOnNode(ContainerName containerName) { executeCommandInContainer(containerName, STOP_NODE_COMMAND); } @Override public Optional<Docker.ContainerStats> getContainerStats(ContainerName containerName) { return docker.getContainerStats(containerName); } @Override public List<Container> getAllManagedContainers() { return docker.getAllContainersManagedBy(MANAGER_NAME); } @Override public List<ContainerName> listAllManagedContainers() { return docker.listAllContainersManagedBy(MANAGER_NAME); } @Override public void deleteUnusedDockerImages() { docker.deleteUnusedDockerImages(); } }
class DockerOperationsImpl implements DockerOperations { public static final String NODE_PROGRAM = getDefaults().underVespaHome("bin/vespa-nodectl"); private static final String[] RESUME_NODE_COMMAND = new String[]{NODE_PROGRAM, "resume"}; private static final String[] SUSPEND_NODE_COMMAND = new String[]{NODE_PROGRAM, "suspend"}; private static final String[] RESTART_VESPA_ON_NODE_COMMAND = new String[]{NODE_PROGRAM, "restart-vespa"}; private static final String[] STOP_NODE_COMMAND = new String[]{NODE_PROGRAM, "stop"}; private static final String MANAGER_NAME = "node-admin"; private static final String LOCAL_IPV6_PREFIX = "fd00::"; private static final String DOCKER_CUSTOM_BRIDGE_NETWORK_NAME = "vespa-bridge"; private static final Map<String, Boolean> DIRECTORIES_TO_MOUNT = new HashMap<>(); static { DIRECTORIES_TO_MOUNT.put("/etc/yamas-agent", true); DIRECTORIES_TO_MOUNT.put("/etc/filebeat", true); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/daemontools_y"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/jdisc_core"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/langdetect/"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/vespa"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/yca"), true); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/yck"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/yell"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/ykeykey"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/ykeykeyd"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/yms_agent"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/ysar"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/ystatus"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs/zpu"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/cache"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/crash"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/db/jdisc"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/db/vespa"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/jdisc_container"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/jdisc_core"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/maven"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/run"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/scoreboards"), true); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/service"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/share"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/spool"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/vespa"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/yca"), true); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/ycore++"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/zookeeper"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/zpe"), false); DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("tmp"), false); } private final Docker docker; private final Environment environment; private final ProcessExecuter processExecuter; public DockerOperationsImpl(Docker docker, Environment environment, ProcessExecuter processExecuter) { this.docker = docker; this.environment = environment; this.processExecuter = processExecuter; } @Override @Override public void removeContainer(final Container existingContainer, ContainerNodeSpec nodeSpec) { final ContainerName containerName = existingContainer.name; PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); if (existingContainer.state.isRunning()) { logger.info("Stopping container " + containerName.asString()); docker.stopContainer(containerName); } logger.info("Deleting container " + containerName.asString()); docker.deleteContainer(containerName); if (docker.networkNPTed()) { logger.info("Delete iptables NAT rules for " + containerName.asString()); try { InetAddress nodeInetAddress = environment.getInetAddressForHost(nodeSpec.hostname); String ipv6Str = docker.getGlobalIPv6Address(containerName); String drop = NATCommand.drop(nodeInetAddress, InetAddress.getByName(ipv6Str)); Pair<Integer, String> result = processExecuter.exec(drop); if (result.getFirst() != 0) { logger.debug("Unable to drop NAT rule - error message: " + result.getSecond()); } } catch (IOException e) { logger.warning("Unable to drop NAT rule for container " + containerName, e); } } } @Override public Optional<Container> getContainer(ContainerName containerName) { return docker.getContainer(containerName); } /** * Try to suspend node. Suspending a node means the node should be taken offline, * such that maintenance can be done of the node (upgrading, rebooting, etc), * and such that we will start serving again as soon as possible afterwards. * <p> * Any failures are logged and ignored. */ @Override public void trySuspendNode(ContainerName containerName) { try { executeCommandInContainer(containerName, SUSPEND_NODE_COMMAND); } catch (RuntimeException e) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); logger.warning("Failed trying to suspend container " + containerName.asString() + " with " + Arrays.toString(SUSPEND_NODE_COMMAND), e); } } /** * For macvlan: * <p> * Due to a bug in docker (https: * IPv6 gateway in containers connected to more than one docker network */ private void setupContainerNetworkConnectivity(ContainerName containerName) throws IOException { if (!docker.networkNPTed()) { InetAddress hostDefaultGateway = DockerNetworkCreator.getDefaultGatewayLinux(true); executeCommandInNetworkNamespace(containerName, "route", "-A", "inet6", "add", "default", "gw", hostDefaultGateway.getHostAddress(), "dev", "eth1"); } } @Override public boolean pullImageAsyncIfNeeded(DockerImage dockerImage) { return docker.pullImageAsyncIfNeeded(dockerImage); } ProcessResult executeCommandInContainer(ContainerName containerName, String... command) { ProcessResult result = docker.executeInContainerAsRoot(containerName, command); if (!result.isSuccess()) { throw new RuntimeException("Container " + containerName.asString() + ": command " + Arrays.toString(command) + " failed: " + result); } return result; } @Override public ProcessResult executeCommandInContainerAsRoot(ContainerName containerName, Long timeoutSeconds, String... command) { return docker.executeInContainerAsRoot(containerName, timeoutSeconds, command); } @Override public ProcessResult executeCommandInContainerAsRoot(ContainerName containerName, String... command) { return docker.executeInContainerAsRoot(containerName, command); } @Override public void executeCommandInNetworkNamespace(ContainerName containerName, String... command) { final PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); final Integer containerPid = docker.getContainer(containerName) .filter(container -> container.state.isRunning()) .map(container -> container.pid) .orElseThrow(() -> new RuntimeException("PID not found for container with name: " + containerName.asString())); final String[] wrappedCommand = Stream.concat( Stream.of("sudo", "nsenter", String.format("--net=/host/proc/%d/ns/net", containerPid), "--"), Stream.of(command)) .toArray(String[]::new); try { Pair<Integer, String> result = processExecuter.exec(wrappedCommand); if (result.getFirst() != 0) { String msg = String.format( "Failed to execute %s in network namespace for %s (PID = %d), exit code: %d, output: %s", Arrays.toString(wrappedCommand), containerName.asString(), containerPid, result.getFirst(), result.getSecond()); logger.error(msg); throw new RuntimeException(msg); } } catch (IOException e) { logger.warning(String.format("IOException while executing %s in network namespace for %s (PID = %d)", Arrays.toString(wrappedCommand), containerName.asString(), containerPid), e); throw new RuntimeException(e); } } @Override public void resumeNode(ContainerName containerName) { executeCommandInContainer(containerName, RESUME_NODE_COMMAND); } @Override public void restartVespaOnNode(ContainerName containerName) { executeCommandInContainer(containerName, RESTART_VESPA_ON_NODE_COMMAND); } @Override public void stopServicesOnNode(ContainerName containerName) { executeCommandInContainer(containerName, STOP_NODE_COMMAND); } @Override public Optional<Docker.ContainerStats> getContainerStats(ContainerName containerName) { return docker.getContainerStats(containerName); } @Override public List<Container> getAllManagedContainers() { return docker.getAllContainersManagedBy(MANAGER_NAME); } @Override public List<ContainerName> listAllManagedContainers() { return docker.listAllContainersManagedBy(MANAGER_NAME); } @Override public void deleteUnusedDockerImages() { docker.deleteUnusedDockerImages(); } }
Shouldn't we use the generation for the new config instead of incrementing with 1? Config generations are monotonically increasing, but there is no guarantee that it increases with 1. For FileSubscription increasing with 1 is fine, so making a note of that for setConfigIncGen() above is OK.
void setConfigIfChangedIncGen(T config) { ConfigState<T> prev = this.config.get(); while (!this.config.compareAndSet(prev, new ConfigState<>(true, prev.getGeneration() + 1, !config.equals(prev.getConfig()), config))) { prev = this.config.get(); } }
while (!this.config.compareAndSet(prev, new ConfigState<>(true, prev.getGeneration() + 1, !config.equals(prev.getConfig()), config))) {
void setConfigIfChangedIncGen(T config) { ConfigState<T> prev = this.config.get(); this.config.set(new ConfigState<>(true, prev.getGeneration() + 1, !config.equals(prev.getConfig()), config)); }
class of a ConfigsSubscription */ public static <T extends ConfigInstance> ConfigSubscription<T> get(ConfigKey<T> key, ConfigSubscriber subscriber, ConfigSource source, TimingValues timingValues) { String configId = key.getConfigId(); if (source instanceof RawSource || configId.startsWith("raw:")) return getRawSub(key, subscriber, source); if (source instanceof FileSource || configId.startsWith("file:")) return getFileSub(key, subscriber, source); if (source instanceof DirSource || configId.startsWith("dir:")) return getDirFileSub(key, subscriber, source); if (source instanceof JarSource || configId.startsWith("jar:")) return getJarSub(key, subscriber, source); if (source instanceof ConfigSet) return new ConfigSetSubscription<>(key, subscriber, source); if (source instanceof ConfigSourceSet) return new JRTConfigSubscription<>(key, subscriber, source, timingValues); throw new IllegalArgumentException("Unknown source type: "+source); }
class of a ConfigsSubscription */ public static <T extends ConfigInstance> ConfigSubscription<T> get(ConfigKey<T> key, ConfigSubscriber subscriber, ConfigSource source, TimingValues timingValues) { String configId = key.getConfigId(); if (source instanceof RawSource || configId.startsWith("raw:")) return getRawSub(key, subscriber, source); if (source instanceof FileSource || configId.startsWith("file:")) return getFileSub(key, subscriber, source); if (source instanceof DirSource || configId.startsWith("dir:")) return getDirFileSub(key, subscriber, source); if (source instanceof JarSource || configId.startsWith("jar:")) return getJarSub(key, subscriber, source); if (source instanceof ConfigSet) return new ConfigSetSubscription<>(key, subscriber, source); if (source instanceof ConfigSourceSet) return new JRTConfigSubscription<>(key, subscriber, source, timingValues); throw new IllegalArgumentException("Unknown source type: "+source); }
this loop looks kind of iffy... if it is needed, generations are unordered. Maybe just set once as above. (assuming single writer, and letting reader/flag clearing light writer back off on change)
void setConfigIncGen(T config) { ConfigState<T> prev = this.config.get(); while ( !this.config.compareAndSet(prev, new ConfigState<>(true, prev.getGeneration()+1, true, config))) { prev = this.config.get(); } }
while ( !this.config.compareAndSet(prev, new ConfigState<>(true, prev.getGeneration()+1, true, config))) {
void setConfigIncGen(T config) { ConfigState<T> prev = this.config.get(); setConfig(prev.getGeneration() + 1, config); }
class of a ConfigsSubscription */ public static <T extends ConfigInstance> ConfigSubscription<T> get(ConfigKey<T> key, ConfigSubscriber subscriber, ConfigSource source, TimingValues timingValues) { String configId = key.getConfigId(); if (source instanceof RawSource || configId.startsWith("raw:")) return getRawSub(key, subscriber, source); if (source instanceof FileSource || configId.startsWith("file:")) return getFileSub(key, subscriber, source); if (source instanceof DirSource || configId.startsWith("dir:")) return getDirFileSub(key, subscriber, source); if (source instanceof JarSource || configId.startsWith("jar:")) return getJarSub(key, subscriber, source); if (source instanceof ConfigSet) return new ConfigSetSubscription<>(key, subscriber, source); if (source instanceof ConfigSourceSet) return new JRTConfigSubscription<>(key, subscriber, source, timingValues); throw new IllegalArgumentException("Unknown source type: "+source); }
class of a ConfigsSubscription */ public static <T extends ConfigInstance> ConfigSubscription<T> get(ConfigKey<T> key, ConfigSubscriber subscriber, ConfigSource source, TimingValues timingValues) { String configId = key.getConfigId(); if (source instanceof RawSource || configId.startsWith("raw:")) return getRawSub(key, subscriber, source); if (source instanceof FileSource || configId.startsWith("file:")) return getFileSub(key, subscriber, source); if (source instanceof DirSource || configId.startsWith("dir:")) return getDirFileSub(key, subscriber, source); if (source instanceof JarSource || configId.startsWith("jar:")) return getJarSub(key, subscriber, source); if (source instanceof ConfigSet) return new ConfigSetSubscription<>(key, subscriber, source); if (source instanceof ConfigSourceSet) return new JRTConfigSubscription<>(key, subscriber, source, timingValues); throw new IllegalArgumentException("Unknown source type: "+source); }